报错 uniform_sampling.compute由uniform_sampling.filter替代

在pcl1.8.1中, uniform_sampling.compute 由 uniform_sampling.filter替代,否则报错。

//uniform_sampling.compute (sampled_indices);
  //pcl::copyPointCloud(*scene, sampled_indices.points, *scene_keypoints);
  uniform_sampling.filter(*scene_keypoints);//下采样得到的关键点 

``` import numpy as np from pcd_register import registration_pipeline def main(): """ Main function contating the demo application of registration pipeline module. """ # load the pointcloud dataset source_path = "1model.pcd" target_path = "die1.pcd" # set voxel size for downsampling while voxel_size = 0.1 # load dataset, downsample it for global registration and calculate FPFH feature source, target, source_down, target_down, source_fpfh, target_fpfh\ = registration_pipeline.load_dataset(source_path,target_path,voxel_size) #visualize pointcloud registration_pipeline.draw_registration_result(source, target, np.identity(4), "Raw PointCloud") # global registration; method can be either "RANSAC" or "FAST" result_global = registration_pipeline.global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size,method="RANSAC") print("全局配准结果:\n", result_global.transformation) # visualize global registration results registration_pipeline.draw_registration_result(source_down, target_down, result_global.transformation, window_name = "Global Registration Results") # refine registration using ICP result_icp = registration_pipeline.refine_registration(source, target, result_global.transformation, voxel_size) print("ICP结果:\n", result_icp.transformation) # visualize refine registration results registration_pipeline.draw_registration_result(source, target, result_icp.transformation, window_name = "Local Registration Results") if __name__ == "__main__": main()```帮我生成去除重合部分的模块,并展示全部代码
03-08
class FixedSAGEConv(nn.Module): """修改后的SAGEConv,禁止自动梯度更新""" def __init__(self, in_feats, out_feats, aggregator_type='pool'): super().__init__() # 手动初始化权重矩阵 self.fc_neigh = nn.Parameter(torch.Tensor(in_feats, out_feats)) self.fc_self = nn.Parameter(torch.Tensor(in_feats, out_feats)) self.reset_parameters() # 冻结自动梯度计算 self.fc_neigh.requires_grad_(False) self.fc_self.requires_grad_(False) self.aggregator_type = aggregator_type def reset_parameters(self): nn.init.xavier_uniform_(self.fc_neigh) nn.init.xavier_uniform_(self.fc_self) def forward(self, graph, feat): with torch.no_grad(): # 禁止自动梯度 graph = graph.local_var() # 自定义聚合逻辑 if self.aggregator_type == 'pool': graph.ndata['h'] = feat graph.update_all( dgl.function.copy_u('h', 'm'), dgl.function.max('m', 'neigh') ) h_neigh = graph.ndata['neigh'] else: raise NotImplementedError # 手动计算线性变换 h_neigh = torch.matmul(h_neigh, self.fc_neigh) h_self = torch.matmul(feat, self.fc_self) return h_neigh + h_self class DynamicGraphSAGE(nn.Module): def __init__(self, in_feats, hidden_size, num_layers): super().__init__() self.layers = nn.ModuleList() # 使用修改后的FixedSAGEConv for i in range(num_layers): self.layers.append( FixedSAGEConv(in_feats if i==0 else hidden_size, hidden_size, aggregator_type='pool')) # 投影头参数保持自动梯度 self.projection = nn.Linear(hidden_size, hidden_size) # 注册所有需要手动更新的参数 self.manual_params = [] for layer in self.layers: self.manual_params.extend([layer.fc_neigh, layer.fc_self]) def forward(self, g, features): h = features for layer in self.layers: subg = dgl.sampling.select_topk( g, k=3, weight='flow_weight', nodes=torch.arange(g.num_nodes()) ) h = torch.relu(layer(subg, h)) return h def compute_loss(self, h, original_graph): """带手动梯度计算的对比损失""" # 正负样本计算 pos_src, pos_dst = original_graph.edges() neg_dst = torch.randint(0, original_graph.num_nodes(), (pos_src.size(0),)) neg_src = pos_src.clone() # 开启手动参数的梯度计算 for param in self.manual_params: param.requires_grad_(True) # 投影计算 proj = self.projection(h) pos_score = torch.sum(proj[pos_src] * proj[pos_dst], dim=1) neg_score = torch.sum(proj[neg_src] * proj[neg_dst], dim=1) # 损失计算 pos_loss = F.logsigmoid(pos_score) neg_loss = F.logsigmoid(-neg_score) loss = -torch.mean(pos_loss + neg_loss) # 反向传播仅更新手动参数 loss.backward(retain_graph=True) # 手动参数更新(示例使用SGD) with torch.no_grad(): for param in self.manual_params: if param.grad is not None: param -= 0.001 * param.grad # 自定义学习率 param.grad.zero_() # 冻结手动参数梯度 for param in self.manual_params: param.requires_grad_(False) return loss.detach()> 122 loss.backward() 报错element 0 of tensors does not require grad and does not have a grad_fn
03-29
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值