I'm doing semantic segmentation for microscope image stacks. My code works fine but the thing is it only uses one core of my CPU which makes me wait a long time to get the segmented images.
I recently knew that there are ways to use multicore processing with other python libraries, but I don't know how to implement it.
So someone can help me edit my code with one of the multiprocessing libraries? My code is in the below.
import numpy as np
from patchify import patchify, unpatchify
import os
import cv2
from tqdm import tqdm
from tensorflow import keras
from tensorflow.keras.utils import normalize
import natsort
model = keras.models.load_model("C:/mymodel.h5", compile=False)
#creating recon image directory
recon_image_directory = "C:/Users/recon"
if not os.path.exists(recon_image_directory):
os.makedirs(recon_image_directory)
large_image_path = "C:/original_images/"
check_images = natsort.natsorted(os.listdir(large_image_path))
for num, large_image_name in tqdm(enumerate(check_images), total=len(check_images)):
if (large_image_name.split('.')[1] == "tif"):
img = cv2.imread(large_image_path large_image_name, 0)
patches = patchify(img, (256, 256), step=256)
predicted_patches = []
for i in range(patches.shape[0]):
for j in range(patches.shape[1]):
single_patch = patches[i,j,:,:] #(256, 256)
single_patch_norm = normalize(np.array(single_patch), axis=1)
single_patch_input = np.stack((single_patch_norm,)*3, axis=-1) # (256, 256, 3)
single_patch_input = np.expand_dims(single_patch_input, 0) #(1,256,256,3)
single_patch_prediction = (model.predict(single_patch_input)[0,:,:,0]>0.5).astype(np.uint8)
predicted_patches.append(single_patch_prediction)
predicted_patches = np.array(predicted_patches)
predicted_patches_reshaped = np.reshape(predicted_patches, (patches.shape[0], patches.shape[1], 256,256) )
reconstructed_image = unpatchify(predicted_patches_reshaped, img.shape)
cv2.imwrite(recon_image_directory "/recon" '_' str(num) ".tif", reconstructed_image)
CodePudding user response:
Does this snippet work? It should run each prediction in a separate process.
@ray.remote
def predict(large_image_name: str) -> None:
img = cv2.imread(large_image_path large_image_name, 0)
patches = patchify(img, (256, 256), step=256)
predicted_patches = []
for i in range(patches.shape[0]):
for j in range(patches.shape[1]):
single_patch = patches[i,j,:,:] #(256, 256)
single_patch_norm = normalize(np.array(single_patch), axis=1)
single_patch_input = np.stack((single_patch_norm,)*3, axis=-1) # (256, 256, 3)
single_patch_input = np.expand_dims(single_patch_input, 0) #(1,256,256,3)
single_patch_prediction = (model.predict(single_patch_input)[0,:,:,0]>0.5).astype(np.uint8)
predicted_patches.append(single_patch_prediction)
predicted_patches = np.array(predicted_patches)
predicted_patches_reshaped = np.reshape(predicted_patches, (patches.shape[0], patches.shape[1], 256,256) )
reconstructed_image = unpatchify(predicted_patches_reshaped, img.shape)
cv2.imwrite(recon_image_directory "/recon" '_' str(num) ".tif", reconstructed_image)
futures = []
for num, large_image_name in tqdm(enumerate(check_images), total=len(check_images)):
if (large_image_name.split('.')[1] == "tif"):
futures.append(predict.remote(large_image_name))
ray.get(futures)
We also have a high-level abstraction for doing this sort of thing. If you're interested, you should check out the Ray AI Runtime (AIR).