import torch
import numpy as np# list the path of the two kind of weight file below
src_file = '/disk2/pretrained_model/official_yolov3_weights_pytorch.pth'
dst_file = '/disk2/pretrained_model/yolov3.weight'####################################################### structure of yolov3 ######################################################
# backbone part
backbone = ['module.backbone.','module.backbone.layer1.','module.backbone.layer1.residual', #1'module.backbone.layer2.','module.backbone.layer2.residual', #2'module.backbone.layer3.','module.backbone.layer3.residual', #8'module.backbone.layer4.','module.backbone.layer4.residual', #8'module.backbone.layer5.','module.backbone.layer5.residual' #4]
num_of_residual = {'layer1':1,'layer2':2,'layer3':8,'layer4':8,'layer5':4}
ds_convbn = ['ds_bn.bias','ds_bn.weight','ds_bn.running_mean','ds_bn.running_var','ds_conv.weight']
convbn1 = ['bn1.bias','bn1.weight','bn1.running_mean','bn1.running_var','conv1.weight']
convbn2 = ['bn2.bias','bn2.weight','bn2.running_mean','bn2.running_var','conv2.weight']# head part
embeddings = ['module.embedding0.','module.embedding1_cbl.','module.embedding1.','module.embedding2_cbl.','module.embedding2.']
convbn = ['bn.bias','bn.weight','bn.running_mean','bn.running_var','conv.weight']
conv = ['conv_out.bias','conv_out.weight']
####################################################### load the .pth file #######################################################net = torch.load(src_file,map_location=torch.device('cpu'))#################################################### write the .weight files ###################################################### open a empty file and start to write
fp = open(dst_file, "wb")# write head infomation into the file
header_info = np.array([0, 2, 0, 32013312, 0], dtype=np.int32)
header_info.tofile(fp)# write the backbone part
for layer in backbone:if layer.split('.')[-2] == 'backbone':for i in convbn1:content = net[layer+i]content.data.cpu().numpy().tofile(fp)if layer.split('.')[-2] == 'layer1':if layer.split('.')[-1] =='':# load the downsample partfor i in ds_convbn:content = net[layer+i]content.data.cpu().numpy().tofile(fp)else:# load the residual partfor j in range(num_of_residual['layer1']):layer_new = layer+'_'+str(j)+'.'for i in convbn1:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)for i in convbn2:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)if layer.split('.')[-2] == 'layer2':if layer.split('.')[-1] =='':# load the downsample partfor i in ds_convbn:content = net[layer+i]content.data.cpu().numpy().tofile(fp)else:# load the residual partfor j in range(num_of_residual['layer2']):layer_new = layer+'_'+str(j)+'.'for i in convbn1:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)for i in convbn2:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)if layer.split('.')[-2] == 'layer3':if layer.split('.')[-1] =='':# load the downsample partfor i in ds_convbn:content = net[layer+i]content.data.cpu().numpy().tofile(fp)else:# load the residual partfor j in range(num_of_residual['layer3']):layer_new = layer+'_'+str(j)+'.'for i in convbn1:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)for i in convbn2:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)if layer.split('.')[-2] == 'layer4':if layer.split('.')[-1] =='':# load the downsample partfor i in ds_convbn:content = net[layer+i]content.data.cpu().numpy().tofile(fp)else:# load the residual partfor j in range(num_of_residual['layer4']):layer_new = layer+'_'+str(j)+'.'for i in convbn1:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)for i in convbn2:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)if layer.split('.')[-2] == 'layer5':if layer.split('.')[-1] =='':# load the downsample partfor i in ds_convbn:content = net[layer+i]content.data.cpu().numpy().tofile(fp)else:# load the residual partfor j in range(num_of_residual['layer5']):layer_new = layer+'_'+str(j)+'.'for i in convbn1:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)for i in convbn2:content = net[layer_new+i]content.data.cpu().numpy().tofile(fp)# write the head part
for embedding in embeddings:if embedding.split('_')[-1] == 'cbl.':for i in convbn:content = net[embedding+i]content.data.cpu().numpy().tofile(fp)else:for j in range(6):embedding_new = embedding+str(j)+'.'for i in convbn:content = net[embedding_new+i]content.data.cpu().numpy().tofile(fp)for i in conv:content = net[embedding+i]content.data.cpu().numpy().tofile(fp) fp.close()
# finish !