为什么你要在Django项目中开始使用pathlib而不是os.path处理拼接文件路径

本文对比了Python的pathlib模块和os.path模块,阐述了Django从3.1版开始转向使用pathlib的原因。通过示例展示了pathlib在路径拼接、获取目录信息和创建文件夹等方面的简洁性和灵活性,建议开发者考虑在Python 3.6及Django 3.1之后的项目中采用pathlib。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

Python 3.4版本及以后的标准库添加了新的模块pathlib,它使用面向对象的编程方式来表示和处理文件系统路径。Django 3.1版本以后也将首选使用pathlib来拼接文件路径,来替代传统的os.path模块。小编我今天就对比下pathlib模块和os.path模块的基本用法,看完后你就会知道为什么pathlib会胜出的原因了。尽管未来的Django版本依然支持os.path模块,但使用pathlib会让你显得更加与时俱进。

简单对比

Django 3.0及以前版本默认配置文件settings.py中项目根目录BASE_DIR是使用os.path生成的,意思是项目根目录为settings.py所在文件夹的上级文件夹。

import os

BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))

Django 3.1及以后使用pathlib的代码更简洁,且使用了链式查找,意思是项目根目录为文件所在路径的

import os import cv2 import base64 import json import numpy as np from channels.generic.websocket import AsyncWebsocketConsumer from django.conf import settings from ultralytics import YOLO import time import asyncio import logging import uuid import json import cv2 import base64 from index.page_4_1_auto_set import image_detect logger = logging.getLogger(__name__) class VideoDetectionConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info(f"WebSocket 连接尝试: {self.scope}") await self.accept() logger.info("WebSocket 连接已建立") self.last_time = time.time() self.start_time = time.time() self.frame_count = 0 self.total_processing_time = 0 async def disconnect(self, close_code): pass async def receive(self, text_data=None, bytes_data=None): if text_data: text_data_json = json.loads(text_data) action = text_data_json.get('action') video_name = text_data_json.get('video') if action == 'start_detection': # 确保临时目录存在 temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) video_path = os.path.join(temp_dir, video_name) # 检查视频文件是否存在 if not os.path.exists(video_path): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'视频文件不存在: {video_path}' })) return model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt") output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name) output_video_dir = os.path.dirname(output_video_path) if not os.path.exists(output_video_dir): os.makedirs(output_video_dir) # 启动视频处理任务 asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path)) async def detect_objects_in_video(self, model_path, video_path, output_path): try: # 加载模型 model = YOLO(model_path) # 打开视频 cap = cv2.VideoCapture(video_path) if not cap.isOpened(): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'无法打开视频文件: {video_path}' })) return total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建视频写入器 fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_index = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break # 处理帧 frame_index += 1 start_time = time.time() # 目标检测 results = model(frame) annotated_frame = results[0].plot() # 计算处理时间 processing_time = time.time() - start_time self.total_processing_time += processing_time self.frame_count += 1 # 计算当前FPS current_fps = 1.0 / processing_time if processing_time > 0 else 0 # 添加FPS显示 fps_text = f"FPS: {current_fps:.2f}" cv2.putText(annotated_frame, fps_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 保存处理后的帧 out.write(annotated_frame) # 将处理后的帧转换为base64 _, buffer = cv2.imencode('.jpg', annotated_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 计算进度 progress = frame_index / total_frames # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'objects': len(results[0].boxes), 'fps': current_fps, 'progress': progress })) # 稍微延迟以控制发送速率 await asyncio.sleep(0.01) # 释放资源 cap.release() out.release() # 计算平均FPS avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0 # 发送完成消息 output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}' await self.send(text_data=json.dumps({ 'type': 'end', 'output_video_url': output_video_url, 'total_frames': total_frames, 'avg_fps': avg_fps, 'fps': fps })) except Exception as e: await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'处理错误: {str(e)}' })) import traceback traceback.print_exc() logger = logging.getLogger(__name__) class VideoProcessingConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info("WebSocket 连接已建立") await self.accept() self.processing = False self.video_path = None self.config = None self.temp_video_path = None self.cap = None self.last_update_time = time.time() async def disconnect(self, close_code): logger.info("WebSocket 连接已关闭") self.processing = False # 清理资源 if self.cap: self.cap.release() logger.info("视频捕获资源已释放") # 清理临时文件 if self.temp_video_path and os.path.exists(self.temp_video_path): try: os.remove(self.temp_video_path) logger.info(f"已删除临时文件: {self.temp_video_path}") except Exception as e: logger.error(f"删除临时文件失败: {str(e)}") async def receive(self, text_data): try: data = json.loads(text_data) command = data.get('command') if command == 'start_processing': # 停止任何正在进行的处理 self.processing = False # 获取参数 video_filename = data.get('video_path') self.config = data.get('config') if not video_filename or not self.config: await self.send_error('缺少必要参数: video_path 或 config') return # 创建临时目录 (使用统一的临时目录) temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) logger.info(f"创建临时目录: {temp_dir}") # 构建视频文件路径 self.temp_video_path = os.path.join(temp_dir, video_filename) logger.info(f"视频文件路径: {self.temp_video_path}") # 检查视频文件是否存在 if not os.path.exists(self.temp_video_path): await self.send_error(f'视频文件不存在: {self.temp_video_path}') return # 验证视频文件格式 if not self.is_valid_video_file(self.temp_video_path): await self.send_error(f'无效的视频格式: {self.temp_video_path}') return # 开始处理 self.processing = True self.last_update_time = time.time() asyncio.create_task(self.process_video()) elif command == 'stop_processing': self.processing = False await self.send_status('处理已停止') except Exception as e: logger.error(f"接收消息错误: {str(e)}") await self.send_error(f'处理错误: {str(e)}') def is_valid_video_file(self, file_path): """验证是否为有效的视频文件""" try: cap = cv2.VideoCapture(file_path) if not cap.isOpened(): return False # 检查帧数和尺寸 frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap.release() # 基本验证 if frame_count <= 0 or width <= 0 or height <= 0: return False return True except: return False async def send_error(self, message): await self.send(text_data=json.dumps({ 'type': 'error', 'message': message })) async def send_status(self, message): await self.send(text_data=json.dumps({ 'type': 'status', 'message': message })) async def send_progress(self, frame_count, total_frames, fps): """发送处理进度""" progress = int((frame_count / total_frames) * 100) await self.send(text_data=json.dumps({ 'type': 'progress', 'progress': progress, 'processed_frames': frame_count, 'fps': fps })) async def process_video(self): """异步处理视频帧并发送结果""" try: # 打开视频文件 self.cap = cv2.VideoCapture(self.temp_video_path) if not self.cap.isOpened(): await self.send_error(f"无法打开视频文件: {self.temp_video_path}") return # 获取视频信息 total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = self.cap.get(cv2.CAP_PROP_FPS) width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}") # 发送初始信息 await self.send(text_data=json.dumps({ 'type': 'info', 'total_frames': total_frames, 'fps': fps })) frame_count = 0 while self.processing and self.cap.isOpened(): ret, frame = self.cap.read() if not ret: break # 处理当前帧 processed_frame, _ = image_detect( frame, self.config['roi'], currentback=self.config.get('currentback', 0), kernal_erode=self.config.get('kernal_erode', 1), kernal_dilate=self.config.get('kernal_dilate', 1), kernal_erode_2=self.config.get('kernal_erode_2', 1), min_area=self.config.get('min_area', 1), max_area=self.config.get('max_area', 10000), adjust_threshold=self.config.get('adjust_threshold', 150), feature=self.config.get('feature', 1) ) # 转换为base64 _, buffer = cv2.imencode('.jpg', processed_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'frame_count': frame_count })) frame_count += 1 # 定期发送进度 (每秒最多1次) current_time = time.time() if current_time - self.last_update_time > 1.0: # 每秒更新一次 await self.send_progress(frame_count, total_frames, fps) self.last_update_time = current_time # 处理完成 if self.processing: # 发送最终进度 await self.send_progress(frame_count, total_frames, fps) # 生成最终结果图像 _, final_buffer = cv2.imencode('.jpg', processed_frame) final_image = base64.b64encode(final_buffer).decode('utf-8') await self.send(text_data=json.dumps({ 'type': 'end', 'result_image': final_image, 'processed_frames': frame_count, 'total_frames': total_frames })) except Exception as e: logger.error(f"视频处理错误: {str(e)}") await self.send_error(f'视频处理错误: {str(e)}') finally: self.processing = False if self.cap: self.cap.release() self.cap = None报错import os import cv2 import base64 import json import numpy as np from channels.generic.websocket import AsyncWebsocketConsumer from django.conf import settings from ultralytics import YOLO import time import asyncio import logging import uuid import json import cv2 import base64 from index.page_4_1_auto_set import image_detect logger = logging.getLogger(__name__) class VideoDetectionConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info(f"WebSocket 连接尝试: {self.scope}") await self.accept() logger.info("WebSocket 连接已建立") self.last_time = time.time() self.start_time = time.time() self.frame_count = 0 self.total_processing_time = 0 async def disconnect(self, close_code): pass async def receive(self, text_data=None, bytes_data=None): if text_data: text_data_json = json.loads(text_data) action = text_data_json.get('action') video_name = text_data_json.get('video') if action == 'start_detection': # 确保临时目录存在 temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) video_path = os.path.join(temp_dir, video_name) # 检查视频文件是否存在 if not os.path.exists(video_path): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'视频文件不存在: {video_path}' })) return model_path = os.path.join(settings.BASE_DIR, "C:/Users/16660/Desktop/网页搭建/Behaviewer/models/best.pt") output_video_path = os.path.join(settings.MEDIA_ROOT, 'videos', video_name) output_video_dir = os.path.dirname(output_video_path) if not os.path.exists(output_video_dir): os.makedirs(output_video_dir) # 启动视频处理任务 asyncio.create_task(self.detect_objects_in_video(model_path, video_path, output_video_path)) async def detect_objects_in_video(self, model_path, video_path, output_path): try: # 加载模型 model = YOLO(model_path) # 打开视频 cap = cv2.VideoCapture(video_path) if not cap.isOpened(): await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'无法打开视频文件: {video_path}' })) return total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = cap.get(cv2.CAP_PROP_FPS) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建视频写入器 fourcc = cv2.VideoWriter_fourcc(*'mp4v') out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) frame_index = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break # 处理帧 frame_index += 1 start_time = time.time() # 目标检测 results = model(frame) annotated_frame = results[0].plot() # 计算处理时间 processing_time = time.time() - start_time self.total_processing_time += processing_time self.frame_count += 1 # 计算当前FPS current_fps = 1.0 / processing_time if processing_time > 0 else 0 # 添加FPS显示 fps_text = f"FPS: {current_fps:.2f}" cv2.putText(annotated_frame, fps_text, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # 保存处理后的帧 out.write(annotated_frame) # 将处理后的帧转换为base64 _, buffer = cv2.imencode('.jpg', annotated_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 计算进度 progress = frame_index / total_frames # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'objects': len(results[0].boxes), 'fps': current_fps, 'progress': progress })) # 稍微延迟以控制发送速率 await asyncio.sleep(0.01) # 释放资源 cap.release() out.release() # 计算平均FPS avg_fps = self.frame_count / self.total_processing_time if self.total_processing_time > 0 else 0 # 发送完成消息 output_video_url = f'{settings.MEDIA_URL}videos/{os.path.basename(output_path)}' await self.send(text_data=json.dumps({ 'type': 'end', 'output_video_url': output_video_url, 'total_frames': total_frames, 'avg_fps': avg_fps, 'fps': fps })) except Exception as e: await self.send(text_data=json.dumps({ 'type': 'error', 'message': f'处理错误: {str(e)}' })) import traceback traceback.print_exc() logger = logging.getLogger(__name__) class VideoProcessingConsumer(AsyncWebsocketConsumer): async def connect(self): logger.info("WebSocket 连接已建立") await self.accept() self.processing = False self.video_path = None self.config = None self.temp_video_path = None self.cap = None self.last_update_time = time.time() async def disconnect(self, close_code): logger.info("WebSocket 连接已关闭") self.processing = False # 清理资源 if self.cap: self.cap.release() logger.info("视频捕获资源已释放") # 清理临时文件 if self.temp_video_path and os.path.exists(self.temp_video_path): try: os.remove(self.temp_video_path) logger.info(f"已删除临时文件: {self.temp_video_path}") except Exception as e: logger.error(f"删除临时文件失败: {str(e)}") async def receive(self, text_data): try: data = json.loads(text_data) command = data.get('command') if command == 'start_processing': # 停止任何正在进行的处理 self.processing = False # 获取参数 video_filename = data.get('video_path') self.config = data.get('config') if not video_filename or not self.config: await self.send_error('缺少必要参数: video_path 或 config') return # 创建临时目录 (使用统一的临时目录) temp_dir = os.path.join(settings.BASE_DIR, 'temp') if not os.path.exists(temp_dir): os.makedirs(temp_dir) logger.info(f"创建临时目录: {temp_dir}") # 构建视频文件路径 self.temp_video_path = os.path.join(temp_dir, video_filename) logger.info(f"视频文件路径: {self.temp_video_path}") # 检查视频文件是否存在 if not os.path.exists(self.temp_video_path): await self.send_error(f'视频文件不存在: {self.temp_video_path}') return # 验证视频文件格式 if not self.is_valid_video_file(self.temp_video_path): await self.send_error(f'无效的视频格式: {self.temp_video_path}') return # 开始处理 self.processing = True self.last_update_time = time.time() asyncio.create_task(self.process_video()) elif command == 'stop_processing': self.processing = False await self.send_status('处理已停止') except Exception as e: logger.error(f"接收消息错误: {str(e)}") await self.send_error(f'处理错误: {str(e)}') def is_valid_video_file(self, file_path): """验证是否为有效的视频文件""" try: cap = cv2.VideoCapture(file_path) if not cap.isOpened(): return False # 检查帧数和尺寸 frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) cap.release() # 基本验证 if frame_count <= 0 or width <= 0 or height <= 0: return False return True except: return False async def send_error(self, message): await self.send(text_data=json.dumps({ 'type': 'error', 'message': message })) async def send_status(self, message): await self.send(text_data=json.dumps({ 'type': 'status', 'message': message })) async def send_progress(self, frame_count, total_frames, fps): """发送处理进度""" progress = int((frame_count / total_frames) * 100) await self.send(text_data=json.dumps({ 'type': 'progress', 'progress': progress, 'processed_frames': frame_count, 'fps': fps })) async def process_video(self): """异步处理视频帧并发送结果""" try: # 打开视频文件 self.cap = cv2.VideoCapture(self.temp_video_path) if not self.cap.isOpened(): await self.send_error(f"无法打开视频文件: {self.temp_video_path}") return # 获取视频信息 total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) fps = self.cap.get(cv2.CAP_PROP_FPS) width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) logger.info(f"视频信息: {total_frames}帧, {fps} FPS, {width}x{height}") # 发送初始信息 await self.send(text_data=json.dumps({ 'type': 'info', 'total_frames': total_frames, 'fps': fps })) frame_count = 0 while self.processing and self.cap.isOpened(): ret, frame = self.cap.read() if not ret: break # 处理当前帧 processed_frame, _ = image_detect( frame, self.config['roi'], currentback=self.config.get('currentback', 0), kernal_erode=self.config.get('kernal_erode', 1), kernal_dilate=self.config.get('kernal_dilate', 1), kernal_erode_2=self.config.get('kernal_erode_2', 1), min_area=self.config.get('min_area', 1), max_area=self.config.get('max_area', 10000), adjust_threshold=self.config.get('adjust_threshold', 150), feature=self.config.get('feature', 1) ) # 转换为base64 _, buffer = cv2.imencode('.jpg', processed_frame) frame_base64 = base64.b64encode(buffer).decode('utf-8') # 发送处理后的帧 await self.send(text_data=json.dumps({ 'type': 'frame', 'frame': frame_base64, 'frame_count': frame_count })) frame_count += 1 # 定期发送进度 (每秒最多1次) current_time = time.time() if current_time - self.last_update_time > 1.0: # 每秒更新一次 await self.send_progress(frame_count, total_frames, fps) self.last_update_time = current_time # 处理完成 if self.processing: # 发送最终进度 await self.send_progress(frame_count, total_frames, fps) # 生成最终结果图像 _, final_buffer = cv2.imencode('.jpg', processed_frame) final_image = base64.b64encode(final_buffer).decode('utf-8') await self.send(text_data=json.dumps({ 'type': 'end', 'result_image': final_image, 'processed_frames': frame_count, 'total_frames': total_frames })) except Exception as e: logger.error(f"视频处理错误: {str(e)}") await self.send_error(f'视频处理错误: {str(e)}') finally: self.processing = False if self.cap: self.cap.release() self.cap = None报错处理错误: 视频文件不存在: C:\Users\16660\Desktop\网页搭建\Behaviewer\temp\1.mp4
最新发布
08-07
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值