深度学习和目标检测系列教程 10-300:通过torch训练第一个Faster-RCNN模型
@Author:Runsen
上次介紹了Faster-RCNN模型,那么今天就開始訓練第一個Faster-RCNN模型。
本文將展示如何在水果圖像數據集上使用Faster-RCNN模型。
代碼的靈感來自此處的 Pytorch 文檔教程和Kaggle
- https://pytorch.org/tutorials/intermediate/torchvision_tutorial.html
- https://www.kaggle.com/yerramvarun/fine-tuning-faster-rcnn-using-pytorch/
這是我目前見到RCNN最好的教程
數據集來源:https://www.kaggle.com/mbkinaci/fruit-images-for-object-detection
由于很多對象檢測代碼是相同的,并且必須編寫,torch為我們提供了相關的代碼,直接克隆復制到工作目錄中。
git clone https://github.com/pytorch/vision.git git checkout v0.3.0cp vision/references/detection/utils.py ./ cp vision/references/detection/transforms.py ./ cp vision/references/detection/coco_eval.py ./ cp vision/references/detection/engine.py ./ cp vision/references/detection/coco_utils.py ./下載的數據集,在train和test文件夾中存在對應的xml和jpg文件。
import os import numpy as np import cv2 import torch import matplotlib.patches as patches import albumentations as A from albumentations.pytorch.transforms import ToTensorV2 from matplotlib import pyplot as plt from torch.utils.data import Dataset from xml.etree import ElementTree as et from torchvision import transforms as torchtransclass FruitImagesDataset(torch.utils.data.Dataset):def __init__(self, files_dir, width, height, transforms=None):self.transforms = transformsself.files_dir = files_dirself.height = heightself.width = widthself.imgs = [image for image in sorted(os.listdir(files_dir))if image[-4:] == '.jpg']self.classes = [_,'apple', 'banana', 'orange']def __getitem__(self, idx):img_name = self.imgs[idx]image_path = os.path.join(self.files_dir, img_name)# reading the images and converting them to correct size and colorimg = cv2.imread(image_path)img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)img_res = cv2.resize(img_rgb, (self.width, self.height), cv2.INTER_AREA)# diving by 255img_res /= 255.0# annotation fileannot_filename = img_name[:-4] + '.xml'annot_file_path = os.path.join(self.files_dir, annot_filename)boxes = []labels = []tree = et.parse(annot_file_path)root = tree.getroot()# cv2 image gives size as height x widthwt = img.shape[1]ht = img.shape[0]# box coordinates for xml files are extracted and corrected for image size givenfor member in root.findall('object'):labels.append(self.classes.index(member.find('name').text))# bounding boxxmin = int(member.find('bndbox').find('xmin').text)xmax = int(member.find('bndbox').find('xmax').text)ymin = int(member.find('bndbox').find('ymin').text)ymax = int(member.find('bndbox').find('ymax').text)xmin_corr = (xmin / wt) * self.widthxmax_corr = (xmax / wt) * self.widthymin_corr = (ymin / ht) * self.heightymax_corr = (ymax / ht) * self.heightboxes.append([xmin_corr, ymin_corr, xmax_corr, ymax_corr])# convert boxes into a torch.Tensorboxes = torch.as_tensor(boxes, dtype=torch.float32)# getting the areas of the boxesarea = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])# suppose all instances are not crowdiscrowd = torch.zeros((boxes.shape[0],), dtype=torch.int64)labels = torch.as_tensor(labels, dtype=torch.int64)target = {}target["boxes"] = boxestarget["labels"] = labelstarget["area"] = areatarget["iscrowd"] = iscrowd# image_idimage_id = torch.tensor([idx])target["image_id"] = image_idif self.transforms:sample = self.transforms(image=img_res,bboxes=target['boxes'],labels=labels)img_res = sample['image']target['boxes'] = torch.Tensor(sample['bboxes'])return img_res, targetdef __len__(self):return len(self.imgs)def torch_to_pil(img):return torchtrans.ToPILImage()(img).convert('RGB')def plot_img_bbox(img, target):fig, a = plt.subplots(1, 1)fig.set_size_inches(5, 5)a.imshow(img)for box in (target['boxes']):x, y, width, height = box[0], box[1], box[2] - box[0], box[3] - box[1]rect = patches.Rectangle((x, y),width, height,linewidth=2,edgecolor='r',facecolor='none')a.add_patch(rect)plt.show()def get_transform(train):if train:return A.Compose([A.HorizontalFlip(0.5),ToTensorV2(p=1.0)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})else:return A.Compose([ToTensorV2(p=1.0)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})files_dir = '../input/fruit-images-for-object-detection/train_zip/train' test_dir = '../input/fruit-images-for-object-detection/test_zip/test'dataset = FruitImagesDataset(train_dir, 480, 480)img, target = dataset[78] print(img.shape, '\n', target) plot_img_bbox(torch_to_pil(img), target)輸出如下:
在torch中Faster-RCNN模型導入from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
import torchvision from torchvision.models.detection.faster_rcnn import FastRCNNPredictordef get_object_detection_model(num_classes):# 加載在COCO上預先訓練過的模型(會下載對應的權重)model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)# 獲取分類器的輸入特征數in_features = model.roi_heads.box_predictor.cls_score.in_features# 用新的頭替換預先訓練好的頭model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)return model對象檢測的增強與正常增強不同,因為在這里需要確保 bbox 在轉換后仍然正確與對象對齊。
在這里,添加了隨機翻轉轉換,隨機圖片處理
def get_transform(train):if train:return A.Compose([A.HorizontalFlip(0.5),ToTensorV2(p=1.0) ], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})else:return A.Compose([ToTensorV2(p=1.0)], bbox_params={'format': 'pascal_voc', 'label_fields': ['labels']})現在讓我們準備數據集和數據加載器進行訓練和測試。
dataset = FruitImagesDataset(files_dir, 480, 480, transforms= get_transform(train=True)) dataset_test = FruitImagesDataset(files_dir, 480, 480, transforms= get_transform(train=False))# split the dataset in train and test set torch.manual_seed(1) indices = torch.randperm(len(dataset)).tolist()# train test split test_split = 0.2 tsize = int(len(dataset)*test_split) dataset = torch.utils.data.Subset(dataset, indices[:-tsize]) dataset_test = torch.utils.data.Subset(dataset_test, indices[-tsize:])# define training and validation data loaders data_loader = torch.utils.data.DataLoader(dataset, batch_size=10, shuffle=True, num_workers=4,collate_fn=utils.collate_fn)data_loader_test = torch.utils.data.DataLoader(dataset_test, batch_size=10, shuffle=False, num_workers=4,collate_fn=utils.collate_fn)準備模型
# to train on gpu if selected. device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')num_classes = 4# get the model using our helper function model = get_object_detection_model(num_classes)# move model to the right device model.to(device)# construct an optimizer params = [p for p in model.parameters() if p.requires_grad] optimizer = torch.optim.SGD(params, lr=0.005,momentum=0.9, weight_decay=0.0005)# and a learning rate scheduler which decreases the learning rate by # 10x every 3 epochs lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=3,gamma=0.1)# training for 10 epochs num_epochs = 10for epoch in range(num_epochs):# training for one epochtrain_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)# update the learning ratelr_scheduler.step()# evaluate on the test datasetevaluate(model, data_loader_test, device=device)
Torchvision 為我們提供了一個將 nms 應用于我們的預測的實用程序,讓我們apply_nms使用它構建一個函數。
讓我們從我們的測試數據集中取一張圖像,看看我們的模型是如何工作的。
我們將首先看到,與實際相比,我們的模型預測了多少個邊界框
# pick one image from the test set img, target = dataset_test[5] # put the model in evaluation mode model.eval() with torch.no_grad():prediction = model([img.to(device)])[0]print('predicted #boxes: ', len(prediction['labels'])) print('real #boxes: ', len(target['labels']))預測#boxes:14
真實#boxes:1
真實數據
print('EXPECTED OUTPUT') plot_img_bbox(torch_to_pil(img), target) print('MODEL OUTPUT') plot_img_bbox(torch_to_pil(img), prediction)你可以看到我們的模型為每個蘋果預測了很多邊界框。讓我們對其應用 nms 并查看最終輸出
nms_prediction = apply_nms(prediction, iou_thresh=0.2) print('NMS APPLIED MODEL OUTPUT') plot_img_bbox(torch_to_pil(img), nms_prediction)算法和代碼邏輯是我目前見到,最好的Faster-RCNN教程:
https://www.kaggle.com/yerramvarun/fine-tuning-faster-rcnn-using-pytorch/
這個RCNN對于系統的要求非常高,在公司的GPU中也會顯出內存不夠。
主要是DataLoader中的num_workers=4在做多線程。
如何微調RCNN模型,并對resnet 50進行微調。如何更改訓練配置,比如圖像大小、優化器和學習率。如何更好使用Albumentations ,值得去探索。
最后附上整個RCNN的網絡結構
FasterRCNN((transform): GeneralizedRCNNTransform(Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])Resize(min_size=(800,), max_size=1333, mode='bilinear'))(backbone): BackboneWithFPN((body): IntermediateLayerGetter((conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)(bn1): FrozenBatchNorm2d(64, eps=0.0)(relu): ReLU(inplace=True)(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)(layer1): Sequential((0): Bottleneck((conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(64, eps=0.0)(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(64, eps=0.0)(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(256, eps=0.0)(relu): ReLU(inplace=True)(downsample): Sequential((0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(1): FrozenBatchNorm2d(256, eps=0.0)))(1): Bottleneck((conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(64, eps=0.0)(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(64, eps=0.0)(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(256, eps=0.0)(relu): ReLU(inplace=True))(2): Bottleneck((conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(64, eps=0.0)(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(64, eps=0.0)(conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(256, eps=0.0)(relu): ReLU(inplace=True)))(layer2): Sequential((0): Bottleneck((conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(128, eps=0.0)(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(128, eps=0.0)(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(512, eps=0.0)(relu): ReLU(inplace=True)(downsample): Sequential((0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)(1): FrozenBatchNorm2d(512, eps=0.0)))(1): Bottleneck((conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(128, eps=0.0)(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(128, eps=0.0)(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(512, eps=0.0)(relu): ReLU(inplace=True))(2): Bottleneck((conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(128, eps=0.0)(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(128, eps=0.0)(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(512, eps=0.0)(relu): ReLU(inplace=True))(3): Bottleneck((conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(128, eps=0.0)(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(128, eps=0.0)(conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(512, eps=0.0)(relu): ReLU(inplace=True)))(layer3): Sequential((0): Bottleneck((conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True)(downsample): Sequential((0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)(1): FrozenBatchNorm2d(1024, eps=0.0)))(1): Bottleneck((conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True))(2): Bottleneck((conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True))(3): Bottleneck((conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True))(4): Bottleneck((conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True))(5): Bottleneck((conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(256, eps=0.0)(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(256, eps=0.0)(conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(1024, eps=0.0)(relu): ReLU(inplace=True)))(layer4): Sequential((0): Bottleneck((conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(512, eps=0.0)(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(512, eps=0.0)(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(2048, eps=0.0)(relu): ReLU(inplace=True)(downsample): Sequential((0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)(1): FrozenBatchNorm2d(2048, eps=0.0)))(1): Bottleneck((conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(512, eps=0.0)(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(512, eps=0.0)(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(2048, eps=0.0)(relu): ReLU(inplace=True))(2): Bottleneck((conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn1): FrozenBatchNorm2d(512, eps=0.0)(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)(bn2): FrozenBatchNorm2d(512, eps=0.0)(conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)(bn3): FrozenBatchNorm2d(2048, eps=0.0)(relu): ReLU(inplace=True))))(fpn): FeaturePyramidNetwork((inner_blocks): ModuleList((0): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))(1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))(2): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))(3): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1)))(layer_blocks): ModuleList((0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))(extra_blocks): LastLevelMaxPool()))(rpn): RegionProposalNetwork((anchor_generator): AnchorGenerator()(head): RPNHead((conv): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))(cls_logits): Conv2d(256, 3, kernel_size=(1, 1), stride=(1, 1))(bbox_pred): Conv2d(256, 12, kernel_size=(1, 1), stride=(1, 1))))(roi_heads): RoIHeads((box_roi_pool): MultiScaleRoIAlign(featmap_names=['0', '1', '2', '3'], output_size=(7, 7), sampling_ratio=2)(box_head): TwoMLPHead((fc6): Linear(in_features=12544, out_features=1024, bias=True)(fc7): Linear(in_features=1024, out_features=1024, bias=True))(box_predictor): FastRCNNPredictor((cls_score): Linear(in_features=1024, out_features=4, bias=True)(bbox_pred): Linear(in_features=1024, out_features=16, bias=True))) )總結
以上是生活随笔為你收集整理的深度学习和目标检测系列教程 10-300:通过torch训练第一个Faster-RCNN模型的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 大众途观消音器哒哒声音
- 下一篇: 斯柯达明锐方向机几个螺丝