no.5_得到4升的水

本文介绍了一种使用容量为5升和3升的两个杯子精确测量4升水的方法。通过正向和逆向思维,详细步骤包括:1. 3升水倒入5升水壶;2. 再次加满3升水,直至5升水壶满;3. 记录剩余水量;4. 将5升水壶清空,倒入保留的1升水;5. 最后,再次加满3升水并倒入5升水壶中,即可得到4升水。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

有两个杯子,容量分别为5升和3开,水的供应不断。问怎么用这两个杯子得到4升的水。

先看一种解释:

可以理解为用若干个5和3做减法得到4。

逆向思维:
不能从3做减法得到4,那么只能从5做减法得到4,最后一个运算应该为5一1 =4,此时问题转换为得到1升的水
然后1升的水可以由3做减法得到,3-2=1,此时问题转换为得到2升的水;
所以:5-3=2。
 

是不是不是很明白,别慌:

正向思维了:

1、将3升的装满倒入5升的;
2、再一次将3升的转满,倒入5升的,把5升装满;
3、3升杯里剩下的就是1升水;
4、倒掉5升的,把1升水倒入5升杯;
5、第三次加满3升杯,倒入5升杯,得到4升水。

keep  moving  !!! lmdhk!!!

IMAGENET_MEAN = [0.485, 0.456, 0.406] IMAGENET_STD = [0.229, 0.224, 0.225] def get_args_parser(): parser = argparse.ArgumentParser() # dataset parser.add_argument(&#39;--checkpoint_dir&#39;, default=&#39;tmp&#39;, type=str, help=&#39;where to save the training log and models&#39;) parser.add_argument(&#39;--stage&#39;, default=&#39;sceneflow&#39;, type=str, help=&#39;training stage on different datasets&#39;) parser.add_argument(&#39;--val_dataset&#39;, default=[&#39;kitti15&#39;], type=str, nargs=&#39;+&#39;) parser.add_argument(&#39;--max_disp&#39;, default=400, type=int, help=&#39;exclude very large disparity in the loss function&#39;) parser.add_argument(&#39;--img_height&#39;, default=288, type=int) parser.add_argument(&#39;--img_width&#39;, default=512, type=int) parser.add_argument(&#39;--padding_factor&#39;, default=16, type=int) # training parser.add_argument(&#39;--batch_size&#39;, default=64, type=int) parser.add_argument(&#39;--num_workers&#39;, default=8, type=int) parser.add_argument(&#39;--lr&#39;, default=1e-3, type=float) parser.add_argument(&#39;--weight_decay&#39;, default=1e-4, type=float) parser.add_argument(&#39;--seed&#39;, default=326, type=int) # resume pretrained model or resume training parser.add_argument(&#39;--resume&#39;, default=None, type=str, help=&#39;resume from pretrained model or resume from unexpectedly terminated training&#39;) parser.add_argument(&#39;--strict_resume&#39;, action=&#39;store_true&#39;, help=&#39;strict resume while loading pretrained weights&#39;) parser.add_argument(&#39;--no_resume_optimizer&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--resume_exclude_upsampler&#39;, action=&#39;store_true&#39;) # model: learnable parameters parser.add_argument(&#39;--task&#39;, default=&#39;stereo&#39;, choices=[&#39;flow&#39;, &#39;stereo&#39;, &#39;depth&#39;], type=str) parser.add_argument(&#39;--num_scales&#39;, default=1, type=int, help=&#39;feature scales: 1/8 or 1/8 + 1/4&#39;) parser.add_argument(&#39;--feature_channels&#39;, default=128, type=int) parser.add_argument(&#39;--upsample_factor&#39;, default=8, type=int) parser.add_argument(&#39;--num_head&#39;, default=1, type=int) parser.add_argument(&#39;--ffn_dim_expansion&#39;, default=4, type=int) parser.add_argument(&#39;--num_transformer_layers&#39;, default=6, type=int) parser.add_argument(&#39;--reg_refine&#39;, action=&#39;store_true&#39;, help=&#39;optional task-specific local regression refinement&#39;) # model: parameter-free parser.add_argument(&#39;--attn_type&#39;, default=&#39;self_swin2d_cross_1d&#39;, type=str, help=&#39;attention function&#39;) parser.add_argument(&#39;--attn_splits_list&#39;, default=[2], type=int, nargs=&#39;+&#39;, help=&#39;number of splits in attention&#39;) parser.add_argument(&#39;--corr_radius_list&#39;, default=[-1], type=int, nargs=&#39;+&#39;, help=&#39;correlation radius for matching, -1 indicates global matching&#39;) parser.add_argument(&#39;--prop_radius_list&#39;, default=[-1], type=int, nargs=&#39;+&#39;, help=&#39;self-attention radius for propagation, -1 indicates global attention&#39;) parser.add_argument(&#39;--num_reg_refine&#39;, default=1, type=int, help=&#39;number of additional local regression refinement&#39;) # evaluation parser.add_argument(&#39;--eval&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--inference_size&#39;, default=None, type=int, nargs=&#39;+&#39;) parser.add_argument(&#39;--count_time&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--save_vis_disp&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--save_dir&#39;, default=None, type=str) parser.add_argument(&#39;--middlebury_resolution&#39;, default=&#39;F&#39;, choices=[&#39;Q&#39;, &#39;H&#39;, &#39;F&#39;]) # submission parser.add_argument(&#39;--submission&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--eth_submission_mode&#39;, default=&#39;train&#39;, type=str, choices=[&#39;train&#39;, &#39;test&#39;]) parser.add_argument(&#39;--middlebury_submission_mode&#39;, default=&#39;training&#39;, type=str, choices=[&#39;training&#39;, &#39;test&#39;]) parser.add_argument(&#39;--output_path&#39;, default=&#39;output&#39;, type=str) # log parser.add_argument(&#39;--summary_freq&#39;, default=100, type=int, help=&#39;Summary frequency to tensorboard (iterations)&#39;) parser.add_argument(&#39;--save_ckpt_freq&#39;, default=1000, type=int, help=&#39;Save checkpoint frequency (steps)&#39;) parser.add_argument(&#39;--val_freq&#39;, default=1000, type=int, help=&#39;validation frequency in terms of training steps&#39;) parser.add_argument(&#39;--save_latest_ckpt_freq&#39;, default=1000, type=int) parser.add_argument(&#39;--num_steps&#39;, default=100000, type=int) # distributed training parser.add_argument(&#39;--distributed&#39;, action=&#39;store_true&#39;) parser.add_argument(&#39;--local_rank&#39;, type=int, default=0) parser.add_argument(&#39;--launcher&#39;, default=&#39;none&#39;, type=str) parser.add_argument(&#39;--gpu_ids&#39;, default=0, type=int, nargs=&#39;+&#39;) # inference parser.add_argument(&#39;--inference_dir&#39;, default=None, type=str) parser.add_argument(&#39;--inference_dir_left&#39;, default=None, type=str) parser.add_argument(&#39;--inference_dir_right&#39;, default=None, type=str) parser.add_argument(&#39;--pred_bidir_disp&#39;, action=&#39;store_true&#39;, help=&#39;predict both left and right disparities&#39;) parser.add_argument(&#39;--pred_right_disp&#39;, action=&#39;store_true&#39;, help=&#39;predict right disparity&#39;) parser.add_argument(&#39;--save_pfm_disp&#39;, action=&#39;store_true&#39;, help=&#39;save predicted disparity as .pfm format&#39;) parser.add_argument(&#39;--debug&#39;, action=&#39;store_true&#39;) return parser def main(args): print_info = not args.eval and not args.submission and args.inference_dir is None and \ args.inference_dir_left is None and args.inference_dir_right is None if print_info and args.local_rank == 0: print(args) misc.save_args(args) misc.check_path(args.checkpoint_dir) misc.save_command(args.checkpoint_dir) misc.check_path(args.output_path) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) np.random.seed(args.seed) torch.backends.cudnn.benchmark = True if args.launcher == &#39;none&#39;: args.distributed = False device = torch.device(&#39;cuda&#39; if torch.cuda.is_available() else &#39;cpu&#39;) else: args.distributed = True # adjust batch size for each gpu assert args.batch_size % torch.cuda.device_count() == 0 args.batch_size = args.batch_size // torch.cuda.device_count() dist_params = dict(backend=&#39;nccl&#39;) init_dist(args.launcher, **dist_params) # re-set gpu_ids with distributed training mode _, world_size = get_dist_info() args.gpu_ids = range(world_size) device = torch.device(&#39;cuda:{}&#39;.format(args.local_rank)) setup_for_distributed(args.local_rank == 0) # model model = UniMatch(feature_channels=args.feature_channels, num_scales=args.num_scales, upsample_factor=args.upsample_factor, num_head=args.num_head, ffn_dim_expansion=args.ffn_dim_expansion, num_transformer_layers=args.num_transformer_layers, reg_refine=args.reg_refine, task=args.task).to(device) if print_info: print(model) if args.distributed: model = torch.nn.parallel.DistributedDataParallel( model.to(device), device_ids=[args.local_rank], output_device=args.local_rank) model_without_ddp = model.module else: if torch.cuda.device_count() > 1: print(&#39;Use %d GPUs&#39; % torch.cuda.device_count()) model = torch.nn.DataParallel(model) model_without_ddp = model.module else: model_without_ddp = model num_params = sum(p.numel() for p in model.parameters()) if print_info: print(&#39;=> Number of trainable parameters: %d&#39; % num_params) if not args.eval and not args.submission and args.inference_dir is None: save_name = &#39;%d_parameters&#39; % num_params open(os.path.join(args.checkpoint_dir, save_name), &#39;a&#39;).close() optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) start_epoch = 0 start_step = 0 if args.resume: print("=> Load checkpoint: %s" % args.resume) loc = &#39;cuda:{}&#39;.format(args.local_rank) if torch.cuda.is_available() else &#39;cpu&#39; checkpoint = torch.load(args.resume, map_location=loc) model_without_ddp.load_state_dict(checkpoint[&#39;model&#39;], strict=args.strict_resume) if &#39;optimizer&#39; in checkpoint and &#39;step&#39; in checkpoint and &#39;epoch&#39; in checkpoint and not \ args.no_resume_optimizer: print(&#39;Load optimizer&#39;) optimizer.load_state_dict(checkpoint[&#39;optimizer&#39;]) start_step = checkpoint[&#39;step&#39;] start_epoch = checkpoint[&#39;epoch&#39;] if print_info: print(&#39;start_epoch: %d, start_step: %d&#39; % (start_epoch, start_step)) if args.submission: if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: create_kitti_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, ) if &#39;eth3d&#39; in args.val_dataset: create_eth3d_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, submission_mode=args.eth_submission_mode, save_vis_disp=args.save_vis_disp, ) if &#39;middlebury&#39; in args.val_dataset: create_middlebury_submission(model_without_ddp, output_path=args.output_path, padding_factor=args.padding_factor, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, inference_size=args.inference_size, submission_mode=args.middlebury_submission_mode, save_vis_disp=args.save_vis_disp, ) return if args.eval: val_results = {} if &#39;things&#39; in args.val_dataset: results_dict = validate_things(model_without_ddp, max_disp=args.max_disp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: results_dict = validate_kitti15(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, count_time=args.count_time, debug=args.debug, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;eth3d&#39; in args.val_dataset: results_dict = validate_eth3d(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;middlebury&#39; in args.val_dataset: results_dict = validate_middlebury(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, resolution=args.middlebury_resolution, ) if args.local_rank == 0: val_results.update(results_dict) return if args.inference_dir or (args.inference_dir_left and args.inference_dir_right): inference_stereo(model_without_ddp, inference_dir=args.inference_dir, inference_dir_left=args.inference_dir_left, inference_dir_right=args.inference_dir_right, output_path=args.output_path, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, pred_bidir_disp=args.pred_bidir_disp, pred_right_disp=args.pred_right_disp, save_pfm_disp=args.save_pfm_disp, ) return train_data = build_dataset(args) print(&#39;=> {} training samples found in the training set&#39;.format(len(train_data))) if args.distributed: train_sampler = torch.utils.data.distributed.DistributedSampler( train_data, num_replicas=torch.cuda.device_count(), rank=args.local_rank ) else: train_sampler = None train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=train_sampler is None, num_workers=args.num_workers, pin_memory=True, drop_last=True, sampler=train_sampler, ) last_epoch = start_step if args.resume and not args.no_resume_optimizer else -1 lr_scheduler = torch.optim.lr_scheduler.OneCycleLR( optimizer, args.lr, args.num_steps + 10, pct_start=0.05, cycle_momentum=False, anneal_strategy=&#39;cos&#39;, last_epoch=last_epoch, ) if args.local_rank == 0: summary_writer = SummaryWriter(args.checkpoint_dir) total_steps = start_step epoch = start_epoch print(&#39;=> Start training...&#39;) while total_steps < args.num_steps: model.train() # mannually change random seed for shuffling every epoch if args.distributed: train_sampler.set_epoch(epoch) if args.local_rank == 0: summary_writer.add_scalar(&#39;lr&#39;, lr_scheduler.get_last_lr()[0], total_steps + 1) for i, sample in enumerate(train_loader): left = sample[&#39;left&#39;].to(device) # [B, 3, H, W] right = sample[&#39;right&#39;].to(device) gt_disp = sample[&#39;disp&#39;].to(device) # [B, H, W] mask = (gt_disp > 0) & (gt_disp < args.max_disp) if not mask.any(): continue pred_disps = model(left, right, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, task=&#39;stereo&#39;, )[&#39;flow_preds&#39;] disp_loss = 0 all_loss = [] # loss weights loss_weights = [0.9 ** (len(pred_disps) - 1 - power) for power in range(len(pred_disps))] for k in range(len(pred_disps)): pred_disp = pred_disps[k] weight = loss_weights[k] curr_loss = F.smooth_l1_loss(pred_disp[mask], gt_disp[mask], reduction=&#39;mean&#39;) disp_loss += weight * curr_loss all_loss.append(curr_loss) total_loss = disp_loss # more efficient zero_grad for param in model.parameters(): param.grad = None total_loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) optimizer.step() lr_scheduler.step() total_steps += 1 if total_steps % args.summary_freq == 0 and args.local_rank == 0: img_summary = dict() img_summary[&#39;left&#39;] = left img_summary[&#39;right&#39;] = right img_summary[&#39;gt_disp&#39;] = gt_disp img_summary[&#39;pred_disp&#39;] = pred_disps[-1] pred_disp = pred_disps[-1] img_summary[&#39;disp_error&#39;] = disp_error_img(pred_disp, gt_disp) save_images(summary_writer, &#39;train&#39;, img_summary, total_steps) epe = F.l1_loss(gt_disp[mask], pred_disp[mask], reduction=&#39;mean&#39;) print(&#39;step: %06d \t epe: %.3f&#39; % (total_steps, epe.item())) summary_writer.add_scalar(&#39;train/epe&#39;, epe.item(), total_steps) summary_writer.add_scalar(&#39;train/disp_loss&#39;, disp_loss.item(), total_steps) summary_writer.add_scalar(&#39;train/total_loss&#39;, total_loss.item(), total_steps) # save all losses for s in range(len(all_loss)): save_name = &#39;train/loss&#39; + str(len(all_loss) - s - 1) save_value = all_loss[s] summary_writer.add_scalar(save_name, save_value, total_steps) d1 = d1_metric(pred_disp, gt_disp, mask) summary_writer.add_scalar(&#39;train/d1&#39;, d1.item(), total_steps) # always save the latest model for resuming training if args.local_rank == 0 and total_steps % args.save_latest_ckpt_freq == 0: # Save lastest checkpoint after each epoch checkpoint_path = os.path.join(args.checkpoint_dir, &#39;checkpoint_latest.pth&#39;) save_dict = { &#39;model&#39;: model_without_ddp.state_dict(), &#39;optimizer&#39;: optimizer.state_dict(), &#39;step&#39;: total_steps, &#39;epoch&#39;: epoch, } torch.save(save_dict, checkpoint_path) # save checkpoint of specific epoch if args.local_rank == 0 and total_steps % args.save_ckpt_freq == 0: print(&#39;Save checkpoint at step: %d&#39; % total_steps) checkpoint_path = os.path.join(args.checkpoint_dir, &#39;step_%06d.pth&#39; % total_steps) save_dict = { &#39;model&#39;: model_without_ddp.state_dict(), } torch.save(save_dict, checkpoint_path) # validation if total_steps % args.val_freq == 0: val_results = {} if &#39;things&#39; in args.val_dataset: results_dict = validate_things(model_without_ddp, max_disp=args.max_disp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;kitti15&#39; in args.val_dataset or &#39;kitti12&#39; in args.val_dataset: results_dict = validate_kitti15(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, count_time=args.count_time, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;eth3d&#39; in args.val_dataset: results_dict = validate_eth3d(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, ) if args.local_rank == 0: val_results.update(results_dict) if &#39;middlebury&#39; in args.val_dataset: results_dict = validate_middlebury(model_without_ddp, padding_factor=args.padding_factor, inference_size=args.inference_size, attn_type=args.attn_type, attn_splits_list=args.attn_splits_list, corr_radius_list=args.corr_radius_list, prop_radius_list=args.prop_radius_list, num_reg_refine=args.num_reg_refine, resolution=args.middlebury_resolution, ) if args.local_rank == 0: val_results.update(results_dict) if args.local_rank == 0: # save to tensorboard for key in val_results: tag = key.split(&#39;_&#39;)[0] tag = tag + &#39;/&#39; + key summary_writer.add_scalar(tag, val_results[key], total_steps) # save validation results to file val_file = os.path.join(args.checkpoint_dir, &#39;val_results.txt&#39;) with open(val_file, &#39;a&#39;) as f: f.write(&#39;step: %06d\n&#39; % total_steps) # order of metrics metrics = [&#39;things_epe&#39;, &#39;things_d1&#39;, &#39;kitti15_epe&#39;, &#39;kitti15_d1&#39;, &#39;kitti15_3px&#39;, &#39;eth3d_epe&#39;, &#39;eth3d_1px&#39;, &#39;middlebury_epe&#39;, &#39;middlebury_2px&#39;, ] eval_metrics = [] for metric in metrics: if metric in val_results.keys(): eval_metrics.append(metric) metrics_values = [val_results[metric] for metric in eval_metrics] num_metrics = len(eval_metrics) f.write(("| {:>20} " * num_metrics + &#39;\n&#39;).format(*eval_metrics)) f.write(("| {:20.4f} " * num_metrics).format(*metrics_values)) f.write(&#39;\n\n&#39;) model.train() if total_steps >= args.num_steps: print(&#39;Training done&#39;) return epoch += 1 if __name__ == &#39;__main__&#39;: parser = get_args_parser() args = parser.parse_args() if &#39;LOCAL_RANK&#39; not in os.environ: os.environ[&#39;LOCAL_RANK&#39;] = str(args.local_rank) main(args)分析代码
最新发布
07-15
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值