tensorflow2 unet加载自己的图像进行训练
生活随笔
收集整理的這篇文章主要介紹了
tensorflow2 unet加载自己的图像进行训练
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
? 根據Tensorflow圖像分割教程https://www.tensorflow.org/tutorials/images/segmentation按步驟敲代碼,可以很方便的生成一個unet范例,其中會自動從網上下載模型、樣本。啟動訓練,得到模型,從網上下載寵物圖片,進行識別,效果很不錯。
? 查看代碼發現樣本是經過tensorflow_datasets封裝的,如何加載我們自己標注的圖像進行訓練識別呢?一種思路是抽取tensorflow_datasets中的代碼,修改后重用。經過跟蹤分析,發現內部代碼經過重重封裝,不容易提取。最直接的手段就是自己從磁盤中加載圖像,組織DataSet,給模型進行訓練。
? 首先,在官方范例下載的過程中,提示了樣本保存路徑,打開目錄,如下圖:
在extracted目錄下,可以找到元素的圖像和標注png圖像。將這兩部分圖像拷貝到自己項目中:
ann為標注圖像目錄,images為原始圖像目錄。
數據準備完畢,下面上代碼,加載圖像,組織訓練集、驗證集Dataset,訓練圖像:
import tensorflow as tf from tensorflow_examples.models.pix2pix import pix2piximport numpy as np import osfrom PIL import Imagefrom IPython.display import clear_output import matplotlib.pyplot as plt#從磁盤上加載原始圖片 def load_tensor_from_file(img_file):img = Image.open(img_file)sample_image = np.array(img)#樣本圖片規格不一致,需要做通道轉換,否則拋異常if len(sample_image.shape) != 3 or sample_image.shape[2] == 4:img = img.convert("RGB")sample_image = np.array(img)sample_image = tf.image.resize(sample_image,[128, 128])return sample_image#從磁盤上加載標記圖片 def load_ann_from_file(img_file):sample_image = tf.image.decode_image(tf.io.read_file(img_file))#樣本圖片規格不一致,需要做通道轉換,否則拋異常if sample_image.shape[2] != 1:img = Image.open(img_file)img = img.convert("L")#轉為灰度圖sample_image = np.array(img)sample_image = tf.image.resize(sample_image,[128, 128])return sample_image#加載圖片并轉換為訓練集和驗證集,同時輸出訓練集、驗證集樣本數量 def load(img_path):trainImageList = []valImageList = []path = img_path + "/images"files = os.listdir(path)cnt = 0for imgFile in files:if os.path.isdir(imgFile):continuefile = path + "/" + imgFileprint("load image ", file)cnt += 1img = load_tensor_from_file(file)img = tf.squeeze(img)#每8張圖片中抽取一個樣本作為驗證集if cnt % 8 == 0:valImageList.append(img)else:trainImageList.append(img)#加載1000張樣本,機器配置有限,樣本過多,報00M錯誤if cnt > 1000:breaktrainAnnList = []valAnnList = []path = img_path + "/ann"files = os.listdir(path)cnt = 0for imgFile in files:if os.path.isdir(imgFile):continuefile = path + "/" + imgFileprint("load image ", file)img = load_ann_from_file(file)cnt+=1if cnt % 8 == 0:valAnnList.append(img)else:trainAnnList.append(img)#加載1000張樣本,機器配置有限,樣本過多,報00M錯誤if cnt > 1000:breaktrain_num = len(trainImageList)val_num = len(valImageList)x = tf.convert_to_tensor(trainImageList, dtype=tf.float32)y = tf.convert_to_tensor(trainAnnList, dtype=tf.float32)dataset_train = tf.data.Dataset.from_tensor_slices((x, y))x_val = tf.convert_to_tensor(valImageList, dtype=tf.float32)y_val = tf.convert_to_tensor(valAnnList, dtype=tf.float32)dataset_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))return dataset_train, train_num, dataset_val, val_numtrain_dataset, train_num, val_dataset, val_num = load("./pet_images")def normalize(input_image, input_mask):input_image = tf.cast(input_image, tf.float32) / 128.0 - 1#mask圖像數據需根據標記數據做具體轉換,使用labelme工具標記的圖像,需要將圖像顏色轉換為類別標簽索引,否則loss=naninput_mask -= 1return input_image, input_mask@tf.function def load_image_train(x, y):input_image = tf.image.resize(x, (128, 128))input_mask = tf.image.resize(y, (128,128))if tf.random.uniform(()) > 0.5:input_image = tf.image.flip_left_right(input_image)input_mask = tf.image.flip_left_right(input_mask)input_image, input_mask = normalize(input_image, input_mask)return input_image, input_maskdef load_image_test(x, y):input_image = tf.image.resize(x, (128, 128))input_mask = tf.image.resize(y, (128,128))input_image, input_mask = normalize(input_image, input_mask)return input_image, input_maskTRAIN_LENGTH = train_num #根據GPU性能調節BATCH_SIZE大小 BATCH_SIZE = 16#64 BUFFER_SIZE = 1000 STEPS_PER_EPOCH= TRAIN_LENGTHtrain = train_dataset.map(load_image_train, num_parallel_calls=tf.data.experimental.AUTOTUNE) val_dataset = val_dataset.map(load_image_test)train_dataset = train.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE).repeat() train_dataset = train_dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE) val_dataset = val_dataset.batch(BATCH_SIZE)def display(display_list):plt.figure(figsize=(15,15))title = ['Input Image', 'True Mask', 'Predicted Mask']for i in range(len(display_list)):plt.subplot(1, len(display_list), i+1)plt.title(title[i])plt.imshow(tf.keras.preprocessing.image.array_to_img(display_list[i]))plt.axis('off')plt.show()np.set_printoptions(threshold=128*128) for image, mask in train.take(1):sample_image, sample_mask = image, maskprint(tf.reduce_min(mask), tf.reduce_max(mask), tf.reduce_mean(mask))display([sample_image,sample_mask])OUTPUT_CHANNELS = 3base_model = tf.keras.applications.MobileNetV2(input_shape=[128,128,3], include_top=False) layer_names = ['block_1_expand_relu', #64x64'block_3_expand_relu', #32x32'block_6_expand_relu', #16x16'block_13_expand_relu',#8x8'block_16_project', #4x4 ]layers = [base_model.get_layer(name).output for name in layer_names] #創建特征提取模型 down_stack = tf.keras.Model(inputs=base_model.input, outputs=layers) down_stack.trainable = Falseup_stack =[pix2pix.upsample(512, 3),#4x4 -> 8x8pix2pix.upsample(256, 3),#8x8 -> 16x16pix2pix.upsample(128, 3),#16x16 -> 32x32pix2pix.upsample(64, 3), #32x32 -> 64x64 ]def unet_model(output_channels):last = tf.keras.layers.Conv2DTranspose(output_channels, 3, strides=2,padding='same', activation='softmax')inputs = tf.keras.layers.Input(shape=[128,128,3])x = inputs#降頻采樣skips = down_stack(x)x = skips[-1]#取最后一次輸出skips=reversed(skips[:-1])#升頻采樣for up, skip in zip(up_stack, skips):x = up(x)concat = tf.keras.layers.Concatenate()x = concat([x, skip])x = last(x)return tf.keras.Model(inputs=inputs, outputs=x)model = unet_model(OUTPUT_CHANNELS) adam = tf.keras.optimizers.Adam(lr=1e-3) #optimizer = optimizers.Adam(lr=1e-3) model.compile(adam, loss='sparse_categorical_crossentropy', metrics=['accuracy'])def create_mask(pred_mask):pred_mask = tf.argmax(pred_mask, axis=-1)pred_mask = pred_mask[..., tf.newaxis]return pred_mask[0]def show_predictions(dataset=None, num=1):if dataset:for image, mask in dataset.take(num):pred_mask = model.predict(image)display([image[0], mask[0], create_mask(pred_mask)])else:display([sample_image, sample_mask, create_mask(model.predict(sample_image[tf.newaxis, ...]))])show_predictions()class DisplayCallback(tf.keras.callbacks.Callback):def on_epoch_end(self, epoch, logs=None):clear_output(wait=True)show_predictions()print ('\nSample Prediction after epoch {}\n'.format(epoch+1))EPOCHS = 10 VAL_SUBSPLITS = 5 VALIDATION_STEPS = val_num//BATCH_SIZE//VAL_SUBSPLITSmodel_history = model.fit(train_dataset, epochs=EPOCHS,steps_per_epoch=STEPS_PER_EPOCH,validation_steps=VALIDATION_STEPS,validation_data=val_dataset,callbacks=[DisplayCallback()])model.save("poker.h5") loss = model_history.history['loss'] val_loss = model_history.history['val_loss'] epochs = range(EPOCHS) plt.figure() plt.plot(epochs, loss, 'r', label='Training loss') plt.plot(epochs, val_loss, 'bo', label='Validation loss') plt.title('Training and Validation Loss') plt.xlabel('Epoch') plt.ylabel('Loss Value') plt.ylim([0, 1]) plt.legend() plt.show()具體過程代碼中交代的比較詳細了,不在累述。由于pc資源限制,樣本數量、epochs都沒有調整到最優。效果圖如下:
?
使用sklearn.train_test_split方法改進load方法:
#加載圖片并轉換為訓練集和驗證集,同時輸出訓練集、驗證集樣本數量 def load(img_path):trainImageList = []path = img_path + "/images"files = os.listdir(path)cnt = 0for imgFile in files:if os.path.isdir(imgFile):continuefile = path + "/" + imgFileprint("load image ", file)cnt += 1img = load_tensor_from_file(file)img = tf.squeeze(img)trainImageList.append(img)#加載1000張樣本,機器配置有限,樣本過多,報00M錯誤if cnt > 1000:breaktrainAnnList = []path = img_path + "/ann"files = os.listdir(path)cnt = 0for imgFile in files:if os.path.isdir(imgFile):continuefile = path + "/" + imgFileprint("load image ", file)img = load_ann_from_file(file)cnt+=1trainAnnList.append(img)#加載1000張樣本,機器配置有限,樣本過多,報00M錯誤if cnt > 1000:breaktrain_x, val_x, train_y, val_y = train_test_split(trainImageList, trainAnnList, test_size=0.2, random_state=0)train_num = len(train_x)val_num = len(val_x)x = tf.convert_to_tensor(train_x, dtype=tf.float32)y = tf.convert_to_tensor(train_y, dtype=tf.float32)dataset_train = tf.data.Dataset.from_tensor_slices((x, y))x_val = tf.convert_to_tensor(val_x, dtype=tf.float32)y_val = tf.convert_to_tensor(val_y, dtype=tf.float32)dataset_val = tf.data.Dataset.from_tensor_slices((x_val, y_val))return dataset_train, train_num, dataset_val, val_num?
總結
以上是生活随笔為你收集整理的tensorflow2 unet加载自己的图像进行训练的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 求质数的个数
- 下一篇: java websocket修改为同步_