RuntimeError: Legacy autograd function with non-static forward method is deprecated.
生活随笔
收集整理的這篇文章主要介紹了
RuntimeError: Legacy autograd function with non-static forward method is deprecated.
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
顯卡RTX3080 cuda 11.1 cudnn 8.0.5 python3.6.4
在使用pytorch1.0訓練densefusion模型時報錯,改用pytorch1.7,然后報上面的錯誤
Traceback (most recent call last):File "./tools/train.py", line 240, in <module>main()File "./tools/train.py", line 143, in mainloss, dis, new_points, new_target = criterion(pred_r, pred_t, pred_c, target, model_points, idx, points, opt.w,opt.refine_start)File "/home/xsy/anaconda3/envs/python36/lib/python3.6/site-packages/torch/nn/modules/module.py", line 889, in _call_implresult = self.forward(*input, **kwargs)File "/home/xsy/DenseFusion-Pytorch-1.0/lib/loss.py", line 83, in forwardreturn loss_calculation(pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine, self.num_pt_mesh, self.sym_list)File "/home/xsy/DenseFusion-Pytorch-1.0/lib/loss.py", line 44, in loss_calculationinds = knn(target.unsqueeze(0), pred.unsqueeze(0))File "/home/xsy/anaconda3/envs/python36/lib/python3.6/site-packages/torch/autograd/function.py", line 160, in __call__ "Legacy autograd function with non-static forward method is deprecated. " RuntimeError: Legacy autograd function with non-static forward method is deprecated. Please use new-style autograd function with static forward method. (Example: https://pytorch.org/docs/stable/autograd.html#torch.autograd.Function)下面是我的損失函數:
from torch.nn.modules.loss import _Loss from torch.autograd import Variable import torch import time import numpy as np import torch.nn as nn import random import torch.backends.cudnn as cudnn from lib.knn.__init__ import KNearestNeighbordef loss_calculation(pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine, num_point_mesh, sym_list):knn = KNearestNeighbor(1)bs, num_p, _ = pred_c.size()pred_r = pred_r / (torch.norm(pred_r, dim=2).view(bs, num_p, 1))base = torch.cat(((1.0 - 2.0*(pred_r[:, :, 2]**2 + pred_r[:, :, 3]**2)).view(bs, num_p, 1),\(2.0*pred_r[:, :, 1]*pred_r[:, :, 2] - 2.0*pred_r[:, :, 0]*pred_r[:, :, 3]).view(bs, num_p, 1), \(2.0*pred_r[:, :, 0]*pred_r[:, :, 2] + 2.0*pred_r[:, :, 1]*pred_r[:, :, 3]).view(bs, num_p, 1), \(2.0*pred_r[:, :, 1]*pred_r[:, :, 2] + 2.0*pred_r[:, :, 3]*pred_r[:, :, 0]).view(bs, num_p, 1), \(1.0 - 2.0*(pred_r[:, :, 1]**2 + pred_r[:, :, 3]**2)).view(bs, num_p, 1), \(-2.0*pred_r[:, :, 0]*pred_r[:, :, 1] + 2.0*pred_r[:, :, 2]*pred_r[:, :, 3]).view(bs, num_p, 1), \(-2.0*pred_r[:, :, 0]*pred_r[:, :, 2] + 2.0*pred_r[:, :, 1]*pred_r[:, :, 3]).view(bs, num_p, 1), \(2.0*pred_r[:, :, 0]*pred_r[:, :, 1] + 2.0*pred_r[:, :, 2]*pred_r[:, :, 3]).view(bs, num_p, 1), \(1.0 - 2.0*(pred_r[:, :, 1]**2 + pred_r[:, :, 2]**2)).view(bs, num_p, 1)), dim=2).contiguous().view(bs * num_p, 3, 3)ori_base = basebase = base.contiguous().transpose(2, 1).contiguous()model_points = model_points.view(bs, 1, num_point_mesh, 3).repeat(1, num_p, 1, 1).view(bs * num_p, num_point_mesh, 3)target = target.view(bs, 1, num_point_mesh, 3).repeat(1, num_p, 1, 1).view(bs * num_p, num_point_mesh, 3)ori_target = targetpred_t = pred_t.contiguous().view(bs * num_p, 1, 3)ori_t = pred_tpoints = points.contiguous().view(bs * num_p, 1, 3)pred_c = pred_c.contiguous().view(bs * num_p)pred = torch.add(torch.bmm(model_points, base), points + pred_t)if not refine:if idx[0].item() in sym_list:target = target[0].transpose(1, 0).contiguous().view(3, -1)pred = pred.permute(2, 0, 1).contiguous().view(3, -1)inds = knn(target.unsqueeze(0), pred.unsqueeze(0))target = torch.index_select(target, 1, inds.view(-1).detach() - 1)target = target.view(3, bs * num_p, num_point_mesh).permute(1, 2, 0).contiguous()pred = pred.view(3, bs * num_p, num_point_mesh).permute(1, 2, 0).contiguous()dis = torch.mean(torch.norm((pred - target), dim=2), dim=1)loss = torch.mean((dis * pred_c - w * torch.log(pred_c)), dim=0)pred_c = pred_c.view(bs, num_p)how_max, which_max = torch.max(pred_c, 1)dis = dis.view(bs, num_p)t = ori_t[which_max[0]] + points[which_max[0]]points = points.view(1, bs * num_p, 3)ori_base = ori_base[which_max[0]].view(1, 3, 3).contiguous()ori_t = t.repeat(bs * num_p, 1).contiguous().view(1, bs * num_p, 3)new_points = torch.bmm((points - ori_t), ori_base).contiguous()new_target = ori_target[0].view(1, num_point_mesh, 3).contiguous()ori_t = t.repeat(num_point_mesh, 1).contiguous().view(1, num_point_mesh, 3)new_target = torch.bmm((new_target - ori_t), ori_base).contiguous()# print('------------> ', dis[0][which_max[0]].item(), pred_c[0][which_max[0]].item(), idx[0].item())del knnreturn loss, dis[0][which_max[0]], new_points.detach(), new_target.detach()class Loss(_Loss):def __init__(self, num_points_mesh, sym_list):super(Loss, self).__init__(True)self.num_pt_mesh = num_points_meshself.sym_list = sym_listdef forward(self, pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine):return loss_calculation(pred_r, pred_t, pred_c, target, model_points, idx, points, w, refine, self.num_pt_mesh, self.sym_list)請教一下各位大神怎么解決?
總結
以上是生活随笔為你收集整理的RuntimeError: Legacy autograd function with non-static forward method is deprecated.的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: oop三大特性--封装性
- 下一篇: RK3399 Android7.1设置f