“tmpxft_000061d4_00000000-7_cutlassB_f32_aligned_k65536_dropout.cudafe1.cpp [4835/7459] Building CXX object caffe2\CMakeFiles\torch_cuda.dir\__\torch\csrc\jit\tensorexpr\cuda_codegen.cpp.obj E:\PyTorch_Build\pytorch\torch/csrc/jit/tensorexpr/ir.h(395): warning C4805: “==”: 在操作中将类型“c10::impl::ScalarTypeToCPPType<c10::ScalarType::Bool>::type”与类型“T”混合不安全 with [ T=int ] E:\PyTorch_Build\pytorch\torch/csrc/jit/tensorexpr/ir.h(395): note: 模板实例化上下文(最早的实例化上下文)为 E:\PyTorch_Build\pytorch\torch\csrc\jit\tensorexpr\cuda_codegen.cpp(147): note: 查看对正在编译的函数 模板 实例化“bool torch::jit::tensorexpr::immediateEquals<int>(const torch::jit::tensorexpr::ExprPtr &,T)”的引用 with [ T=int ] [4844/7459] Building CUDA object caffe2\CMakeFiles\torch_c...aten\src\ATen\native\quantized\cuda\FakeQuantizeCore.cu.ob FakeQuantizeCore.cu tmpxft_000011c8_00000000-7_FakeQuantizeCore.cudafe1.cpp [4850/7459] Building CUDA object caffe2\CMakeFiles\torch_c...aten\src\ATen\native\sparse\cuda\SparseCsrTensorMath.cu.ob SparseCsrTensorMath.cu tmpxft_00000e1c_00000000-7_SparseCsrTensorMath.cudafe1.cpp [4854/7459] Building CXX object caffe2\torch\CMakeFiles\torch_python.dir\csrc\StorageMethods.cpp.obj FAILED: [code=2] caffe2/torch/CMakeFiles/torch_python.dir/csrc/StorageMethods.cpp.obj C:\PROGRA~2\MICROS~2\2022\BUILDT~1\VC\Tools\MSVC\1444~1.352\bin\Hostx64\x64\cl.exe /nologo /TP -DAT_PER_OPERATOR_HEADERS -DBUILDING_TESTS -DEXPORT_AOTI_FUNCTIONS -DFMT_HEADER_ONLY=1 -DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -DNOMINMAX -DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 -DONNX_NAMESPACE=onnx_torch -DPy_NO_LINK_LIB -DTHP_BUILD_MAIN_LIB -DTORCH_CUDA_USE_NVTX3 -DUSE_CUDA -DUSE_EXTERNAL_MZCRC -DUSE_ITT -DUSE_MIMALLOC -DUSE_NUMPY -DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE=1 -D_UCRT_LEGACY_INFINITY -Dtorch_python_EXPORTS -IE:\PyTorch_Build\pytorch\build\aten\src -IE:\PyTorch_Build\pytorch\aten\src -IE:\PyTorch_Build\pytorch\build -IE:\PyTorch_Build\pytorch -IE:\PyTorch_Build\pytorch\nlohmann -IE:\PyTorch_Build\pytorch\moodycamel -IE:\PyTorch_Build\pytorch\third_party\mimalloc\include -IE:\PyTorch_Build\pytorch\torch\.. -IE:\PyTorch_Build\pytorch\torch\..\aten\src -IE:\PyTorch_Build\pytorch\torch\..\aten\src\TH -IE:\PyTorch_Build\pytorch\build\caffe2\aten\src -IE:\PyTorch_Build\pytorch\build\third_party -IE:\PyTorch_Build\pytorch\build\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\valgrind-headers -IE:\PyTorch_Build\pytorch\torch\..\third_party\gloo -IE:\PyTorch_Build\pytorch\torch\..\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\flatbuffers\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\kineto\libkineto\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\cpp-httplib -IE:\PyTorch_Build\pytorch\torch\..\third_party\nlohmann\include -IE:\PyTorch_Build\pytorch\torch\csrc -IE:\PyTorch_Build\pytorch\torch\csrc\api\include -IE:\PyTorch_Build\pytorch\torch\lib -IE:\PyTorch_Build\pytorch\torch\standalone -IE:\PyTorch_Build\pytorch\torch\lib\libshm_windows -IE:\PyTorch_Build\pytorch\torch\csrc\api -IE:\PyTorch_Build\pytorch\c10\.. -IE:\PyTorch_Build\pytorch\c10\cuda\..\.. -IE:\PyTorch_Build\pytorch\third_party\fmt\include -IE:\PyTorch_Build\pytorch\third_party\onnx -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googlemock\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googletest\include -external:IE:\PyTorch_Build\pytorch\third_party\protobuf\src -external:IE:\PyTorch_Build\pytorch\third_party\XNNPACK\include -external:IE:\PyTorch_Build\pytorch\third_party\ittapi\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\eigen -external:IE:\PyTorch_Build\pytorch\third_party\ideep\mkl-dnn\include\oneapi\dnnl -external:IE:\PyTorch_Build\pytorch\third_party\ideep\include -external:IE:\PyTorch_Build\pytorch\INTERFACE -external:IE:\PyTorch_Build\pytorch\third_party\nlohmann\include -external:IE:\PyTorch_Build\pytorch\third_party\concurrentqueue -external:IE:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\numpy\_core\include -external:IE:\Python310\Include -external:I"E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\include" -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\pybind11\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\opentelemetry-cpp\api\include -external:IE:\PyTorch_Build\pytorch\third_party\cpp-httplib -external:IE:\PyTorch_Build\pytorch\third_party\NVTX\c\include -external:W0 /DWIN32 /D_WINDOWS /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE /wd4624 /wd4068 /wd4067 /wd4267 /wd4661 /wd4717 /wd4244 /wd4804 /wd4273 -DHAVE_AVX512_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION /Zc:preprocessor /Zc:preprocessor /Zc:preprocessor /O2 /Ob2 /DNDEBUG /bigobj -DNDEBUG -std:c++17 -MD /permissive- /EHsc /bigobj -O2 /utf-8 /showIncludes /Focaffe2\torch\CMakeFiles\torch_python.dir\csrc\StorageMethods.cpp.obj /Fdcaffe2\torch\CMakeFiles\torch_python.dir\ /FS -c E:\PyTorch_Build\pytorch\torch\csrc\StorageMethods.cpp E:\PyTorch_Build\pytorch\torch\csrc\StorageMethods.cpp(9): fatal error C1083: 无法打开包括文件: “libshm.h”: No such file or directory [4855/7459] Building CXX object caffe2\torch\CMakeFiles\torch_python.dir\csrc\Storage.cpp.obj FAILED: [code=2] caffe2/torch/CMakeFiles/torch_python.dir/csrc/Storage.cpp.obj C:\PROGRA~2\MICROS~2\2022\BUILDT~1\VC\Tools\MSVC\1444~1.352\bin\Hostx64\x64\cl.exe /nologo /TP -DAT_PER_OPERATOR_HEADERS -DBUILDING_TESTS -DEXPORT_AOTI_FUNCTIONS -DFMT_HEADER_ONLY=1 -DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -DNOMINMAX -DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 -DONNX_NAMESPACE=onnx_torch -DPy_NO_LINK_LIB -DTHP_BUILD_MAIN_LIB -DTORCH_CUDA_USE_NVTX3 -DUSE_CUDA -DUSE_EXTERNAL_MZCRC -DUSE_ITT -DUSE_MIMALLOC -DUSE_NUMPY -DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE=1 -D_UCRT_LEGACY_INFINITY -Dtorch_python_EXPORTS -IE:\PyTorch_Build\pytorch\build\aten\src -IE:\PyTorch_Build\pytorch\aten\src -IE:\PyTorch_Build\pytorch\build -IE:\PyTorch_Build\pytorch -IE:\PyTorch_Build\pytorch\nlohmann -IE:\PyTorch_Build\pytorch\moodycamel -IE:\PyTorch_Build\pytorch\third_party\mimalloc\include -IE:\PyTorch_Build\pytorch\torch\.. -IE:\PyTorch_Build\pytorch\torch\..\aten\src -IE:\PyTorch_Build\pytorch\torch\..\aten\src\TH -IE:\PyTorch_Build\pytorch\build\caffe2\aten\src -IE:\PyTorch_Build\pytorch\build\third_party -IE:\PyTorch_Build\pytorch\build\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\valgrind-headers -IE:\PyTorch_Build\pytorch\torch\..\third_party\gloo -IE:\PyTorch_Build\pytorch\torch\..\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\flatbuffers\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\kineto\libkineto\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\cpp-httplib -IE:\PyTorch_Build\pytorch\torch\..\third_party\nlohmann\include -IE:\PyTorch_Build\pytorch\torch\csrc -IE:\PyTorch_Build\pytorch\torch\csrc\api\include -IE:\PyTorch_Build\pytorch\torch\lib -IE:\PyTorch_Build\pytorch\torch\standalone -IE:\PyTorch_Build\pytorch\torch\lib\libshm_windows -IE:\PyTorch_Build\pytorch\torch\csrc\api -IE:\PyTorch_Build\pytorch\c10\.. -IE:\PyTorch_Build\pytorch\c10\cuda\..\.. -IE:\PyTorch_Build\pytorch\third_party\fmt\include -IE:\PyTorch_Build\pytorch\third_party\onnx -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googlemock\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googletest\include -external:IE:\PyTorch_Build\pytorch\third_party\protobuf\src -external:IE:\PyTorch_Build\pytorch\third_party\XNNPACK\include -external:IE:\PyTorch_Build\pytorch\third_party\ittapi\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\eigen -external:IE:\PyTorch_Build\pytorch\third_party\ideep\mkl-dnn\include\oneapi\dnnl -external:IE:\PyTorch_Build\pytorch\third_party\ideep\include -external:IE:\PyTorch_Build\pytorch\INTERFACE -external:IE:\PyTorch_Build\pytorch\third_party\nlohmann\include -external:IE:\PyTorch_Build\pytorch\third_party\concurrentqueue -external:IE:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\numpy\_core\include -external:IE:\Python310\Include -external:I"E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\include" -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\pybind11\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\opentelemetry-cpp\api\include -external:IE:\PyTorch_Build\pytorch\third_party\cpp-httplib -external:IE:\PyTorch_Build\pytorch\third_party\NVTX\c\include -external:W0 /DWIN32 /D_WINDOWS /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE /wd4624 /wd4068 /wd4067 /wd4267 /wd4661 /wd4717 /wd4244 /wd4804 /wd4273 -DHAVE_AVX512_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION /Zc:preprocessor /Zc:preprocessor /Zc:preprocessor /O2 /Ob2 /DNDEBUG /bigobj -DNDEBUG -std:c++17 -MD /permissive- /EHsc /bigobj -O2 /utf-8 /showIncludes /Focaffe2\torch\CMakeFiles\torch_python.dir\csrc\Storage.cpp.obj /Fdcaffe2\torch\CMakeFiles\torch_python.dir\ /FS -c E:\PyTorch_Build\pytorch\torch\csrc\Storage.cpp E:\PyTorch_Build\pytorch\torch\csrc\Storage.cpp(10): fatal error C1083: 无法打开包括文件: “libshm.h”: No such file or directory [4856/7459] Building CXX object caffe2\torch\CMakeFiles\torch_python.dir\csrc\Module.cpp.obj FAILED: [code=2] caffe2/torch/CMakeFiles/torch_python.dir/csrc/Module.cpp.obj C:\PROGRA~2\MICROS~2\2022\BUILDT~1\VC\Tools\MSVC\1444~1.352\bin\Hostx64\x64\cl.exe /nologo /TP -DAT_PER_OPERATOR_HEADERS -DBUILDING_TESTS -DEXPORT_AOTI_FUNCTIONS -DFMT_HEADER_ONLY=1 -DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -DNOMINMAX -DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 -DONNX_NAMESPACE=onnx_torch -DPy_NO_LINK_LIB -DTHP_BUILD_MAIN_LIB -DTORCH_CUDA_USE_NVTX3 -DUSE_CUDA -DUSE_EXTERNAL_MZCRC -DUSE_ITT -DUSE_MIMALLOC -DUSE_NUMPY -DWIN32_LEAN_AND_MEAN -D_CRT_SECURE_NO_DEPRECATE=1 -D_UCRT_LEGACY_INFINITY -Dtorch_python_EXPORTS -IE:\PyTorch_Build\pytorch\build\aten\src -IE:\PyTorch_Build\pytorch\aten\src -IE:\PyTorch_Build\pytorch\build -IE:\PyTorch_Build\pytorch -IE:\PyTorch_Build\pytorch\nlohmann -IE:\PyTorch_Build\pytorch\moodycamel -IE:\PyTorch_Build\pytorch\third_party\mimalloc\include -IE:\PyTorch_Build\pytorch\torch\.. -IE:\PyTorch_Build\pytorch\torch\..\aten\src -IE:\PyTorch_Build\pytorch\torch\..\aten\src\TH -IE:\PyTorch_Build\pytorch\build\caffe2\aten\src -IE:\PyTorch_Build\pytorch\build\third_party -IE:\PyTorch_Build\pytorch\build\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\valgrind-headers -IE:\PyTorch_Build\pytorch\torch\..\third_party\gloo -IE:\PyTorch_Build\pytorch\torch\..\third_party\onnx -IE:\PyTorch_Build\pytorch\torch\..\third_party\flatbuffers\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\kineto\libkineto\include -IE:\PyTorch_Build\pytorch\torch\..\third_party\cpp-httplib -IE:\PyTorch_Build\pytorch\torch\..\third_party\nlohmann\include -IE:\PyTorch_Build\pytorch\torch\csrc -IE:\PyTorch_Build\pytorch\torch\csrc\api\include -IE:\PyTorch_Build\pytorch\torch\lib -IE:\PyTorch_Build\pytorch\torch\standalone -IE:\PyTorch_Build\pytorch\torch\lib\libshm_windows -IE:\PyTorch_Build\pytorch\torch\csrc\api -IE:\PyTorch_Build\pytorch\c10\.. -IE:\PyTorch_Build\pytorch\c10\cuda\..\.. -IE:\PyTorch_Build\pytorch\third_party\fmt\include -IE:\PyTorch_Build\pytorch\third_party\onnx -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googlemock\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\googletest\googletest\include -external:IE:\PyTorch_Build\pytorch\third_party\protobuf\src -external:IE:\PyTorch_Build\pytorch\third_party\XNNPACK\include -external:IE:\PyTorch_Build\pytorch\third_party\ittapi\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\eigen -external:IE:\PyTorch_Build\pytorch\third_party\ideep\mkl-dnn\include\oneapi\dnnl -external:IE:\PyTorch_Build\pytorch\third_party\ideep\include -external:IE:\PyTorch_Build\pytorch\INTERFACE -external:IE:\PyTorch_Build\pytorch\third_party\nlohmann\include -external:IE:\PyTorch_Build\pytorch\third_party\concurrentqueue -external:IE:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\numpy\_core\include -external:IE:\Python310\Include -external:I"E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\include" -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\pybind11\include -external:IE:\PyTorch_Build\pytorch\cmake\..\third_party\opentelemetry-cpp\api\include -external:IE:\PyTorch_Build\pytorch\third_party\cpp-httplib -external:IE:\PyTorch_Build\pytorch\third_party\NVTX\c\include -external:W0 /DWIN32 /D_WINDOWS /EHsc /Zc:__cplusplus /bigobj /FS /utf-8 -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOROCTRACER -DLIBKINETO_NOXPUPTI=ON -DUSE_XNNPACK -DSYMBOLICATE_MOBILE_DEBUG_HANDLE /wd4624 /wd4068 /wd4067 /wd4267 /wd4661 /wd4717 /wd4244 /wd4804 /wd4273 -DHAVE_AVX512_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION /Zc:preprocessor /Zc:preprocessor /Zc:preprocessor /O2 /Ob2 /DNDEBUG /bigobj -DNDEBUG -std:c++17 -MD /permissive- /EHsc /bigobj -O2 /utf-8 /showIncludes /Focaffe2\torch\CMakeFiles\torch_python.dir\csrc\Module.cpp.obj /Fdcaffe2\torch\CMakeFiles\torch_python.dir\ /FS -c E:\PyTorch_Build\pytorch\torch\csrc\Module.cpp E:\PyTorch_Build\pytorch\torch\csrc\Module.cpp(34): fatal error C1083: 无法打开包括文件: “libshm.h”: No such file or directory [4872/7459] Building CUDA object caffe2\CMakeFiles\torch_cuda.dir\__\aten\src\ATen\native\cuda\group_norm_kernel.cu.obj group_norm_kernel.cu tmpxft_00005adc_00000000-7_group_norm_kernel.cudafe1.cpp [4873/7459] Building CUDA object caffe2\CMakeFiles\torch_cuda.dir\__\aten\src\ATen\UfuncCUDA_add.cu.obj UfuncCUDA_add.cu tmpxft_00006bdc_00000000-7_UfuncCUDA_add.cudafe1.cpp [4875/7459] Building CUDA object caffe2\CMakeFiles\torch_cuda.dir\__\aten\src\ATen\native\cuda\Unique.cu.obj Unique.cu tmpxft_00000658_00000000-7_Unique.cudafe1.cpp ninja: build stopped: subcommand failed. (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch>” “PowerShell 7 环境已加载 (版本: 7.5.2) PS C:\Users\Administrator\Desktop> cd E:\PyTorch_Build\pytorch PS E:\PyTorch_Build\pytorch> python -m venv rtx5070_env Error: [Errno 13] Permission denied: 'E:\\PyTorch_Build\\pytorch\\rtx5070_env\\Scripts\\python.exe' PS E:\PyTorch_Build\pytorch> .\rtx5070_env\Scripts\activate (rtx5070_env) PS E:\PyTorch_Build\pytorch> conda install -c conda-forge openblas 3 channel Terms of Service accepted Retrieving notices: done Channels: - conda-forge - defaults - nvidia - pytorch-nightly Platform: win-64 Collecting package metadata (repodata.json): done Solving environment: done ## Package Plan ## environment location: C:\Miniconda3 added / updated specs: - openblas The following packages will be downloaded: package | build ---------------------------|----------------- libopenblas-0.3.30 |pthreads_ha4fe6b2_2 3.8 MB conda-forge openblas-0.3.30 |pthreads_h4a7f399_2 262 KB conda-forge ------------------------------------------------------------ Total: 4.0 MB The following NEW packages will be INSTALLED: libopenblas conda-forge/win-64::libopenblas-0.3.30-pthreads_ha4fe6b2_2 openblas conda-forge/win-64::openblas-0.3.30-pthreads_h4a7f399_2 Proceed ([y]/n)? y Downloading and Extracting Packages: Preparing transaction: done Verifying transaction: done Executing transaction: done (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 设置cuDNN路径 (rtx5070_env) PS E:\PyTorch_Build\pytorch> @" >> set(CUDNN_INCLUDE_DIR "E:/Program Files/NVIDIA/CUNND/v9.12/include/13.0") >> set(CUDNN_LIBRARY "E:/Program Files/NVIDIA/CUNND/v9.12/lib/13.0/x64/cudnn64_9.lib") >> message(STATUS "Applied custom cuDNN settings") >> "@ | Set-Content set_cudnn.cmake (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 在CMakeLists.txt第一行插入引用 (rtx5070_env) PS E:\PyTorch_Build\pytorch> $cmakeFile = "CMakeLists.txt" (rtx5070_env) PS E:\PyTorch_Build\pytorch> $content = Get-Content $cmakeFile (rtx5070_env) PS E:\PyTorch_Build\pytorch> $newContent = @() (rtx5070_env) PS E:\PyTorch_Build\pytorch> $newContent += "include(set_cudnn.cmake)" (rtx5070_env) PS E:\PyTorch_Build\pytorch> $newContent += $content (rtx5070_env) PS E:\PyTorch_Build\pytorch> $newContent | Set-Content $cmakeFile (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 安装OpenBLAS (rtx5070_env) PS E:\PyTorch_Build\pytorch> conda install -c conda-forge openblas -y 3 channel Terms of Service accepted Channels: - conda-forge - defaults - nvidia - pytorch-nightly Platform: win-64 Collecting package metadata (repodata.json): done Solving environment: done # All requested packages already installed. (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> Write-Host "✅ 构建环境优化完成" -ForegroundColor Green ✅ 构建环境优化完成 (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 检查GPU支持状态 (rtx5070_env) PS E:\PyTorch_Build\pytorch> python -c "import torch; print(f'CUDA可用: {torch.cuda.is_available()}')" Traceback (most recent call last): File "<string>", line 1, in <module> File "E:\PyTorch_Build\pytorch\torch\__init__.py", line 415, in <module> from torch._C import * # noqa: F403 ImportError: DLL load failed while importing _C: 找不到指定的模块。 (rtx5070_env) PS E:\PyTorch_Build\pytorch> (rtx5070_env) PS E:\PyTorch_Build\pytorch> # 查看已完成任务比例 (rtx5070_env) PS E:\PyTorch_Build\pytorch> $total = 7459 (rtx5070_env) PS E:\PyTorch_Build\pytorch> $completed = (Get-ChildItem build -Recurse -File | Measure-Object).Count (rtx5070_env) PS E:\PyTorch_Build\pytorch> $percent = [math]::Round(($completed/$total)*100, 2) (rtx5070_env) PS E:\PyTorch_Build\pytorch> Write-Host "构建进度: $completed/$total ($percent%)" 构建进度: 12552/7459 (168.28%) (rtx5070_env) PS E:\PyTorch_Build\pytorch>”
时间: 2025-09-03 18:52:32 AIGC 浏览: 14
<think>
我们遇到了编译错误,主要是多个文件(StorageMethods.cpp, Storage.cpp, Module.cpp)中无法打开包括文件: “libshm.h”: No such file or directory。
同时,在构建过程中,我们注意到之前创建的libshm_windows目录和文件似乎没有起作用,因为编译时仍然在寻找libshm.h。
分析:
在PyTorch的源码中,libshm.h原本位于`torch/lib/libshm`目录中(在Linux/macOS平台上)。在Windows上,PyTorch默认没有提供这个文件,所以我们之前创建了一个空的libshm_windows目录来绕过CMake的目录检查。
但是,现在编译过程中需要包含libshm.h,而这个文件在我们的虚拟实现中并不存在。
解决方案:
我们需要在之前创建的`torch/lib/libshm_windows`目录中提供一个`libshm.h`文件,并在其中提供必要的声明(至少是编译所需的最小声明)。
步骤:
1. 在`torch/lib/libshm_windows`目录下创建`libshm.h`文件。
2. 在文件中声明编译错误中涉及到的函数和类(至少是StorageMethods.cpp, Storage.cpp, Module.cpp中引用的部分)。由于Windows不支持共享内存的方式,我们可以提供空实现或返回错误的实现。
注意:在Windows上,PyTorch通常使用文件映射而不是共享内存,因此这些函数在Windows上可能不会被调用。但是为了通过编译,我们需要提供声明。
创建`libshm.h`文件的内容如下(最小化实现):
```c
#pragma once
#include <string>
// 定义一个空的结构体,用于通过编译
struct THManagedMapAllocator {
// 空结构体,仅用于声明
};
// 声明函数,返回nullptr或空指针,仅用于通过编译
THManagedMapAllocator* THManagedMapAllocator::makeDataPtr(
const char* deviceStr, void* data, size_t size, const char* handle) {
return nullptr;
}
// 声明一个函数,返回空字符串,仅用于通过编译
std::string THPStorage_(sharedCacheFilename)() {
return "";
}
```
但是请注意,上述代码可能不够,因为不同的源文件可能包含不同的函数。根据错误来源文件,我们需要补充:
在`StorageMethods.cpp`和`Storage.cpp`中,它们试图包含`libshm.h`,然后使用其中的函数。我们可以在`libshm.h`中提供这些函数的声明,并在`libshm_windows.cpp`中提供定义(空实现或返回错误)。
然而,由于我们之前已经创建了一个`libshm_windows.cpp`,我们现在需要将这两个文件(`libshm.h`和`libshm_windows.cpp`)结合起来。
具体步骤:
1. 在`torch/lib/libshm_windows`目录下创建`libshm.h`文件,内容如下(提供必要的声明):
```cpp
// torch/lib/libshm_windows/libshm.h
#pragma once
#include <string>
#include <c10/core/Allocator.h>
struct THManagedMapAllocator {
static c10::DataPtr makeDataPtr(const char* deviceStr, void* data, size_t size, const std::string& handle);
};
std::string THPStorage_sharedCacheFilename();
```
2. 修改之前创建的`libshm_windows.cpp`文件,提供这些函数的实现(空实现或返回错误):
```cpp
// torch/lib/libshm_windows/libshm_windows.cpp
#include "libshm.h"
#include <c10/core/Allocator.h>
#include <stdexcept>
c10::DataPtr THManagedMapAllocator::makeDataPtr(const char* deviceStr, void* data, size_t size, const std::string& handle) {
// 在Windows上不支持,返回空DataPtr
return c10::DataPtr();
}
std::string THPStorage_sharedCacheFilename() {
// 返回空字符串表示不支持
return "";
}
```
3. 修改`torch/lib/libshm_windows/CMakeLists.txt`,将新的头文件包含进去,并确保源文件包含头文件:
```cmake
cmake_minimum_required(VERSION 3.10)
project(libshm_windows)
add_library(shm_windows STATIC libshm_windows.cpp)
# 添加头文件目录,以便其他文件可以包含
target_include_directories(shm_windows PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
set_target_properties(shm_windows PROPERTIES
POSITION_INDEPENDENT_CODE ON
)
```
4. 重新运行构建。
注意:由于我们添加了新的头文件,需要重新运行CMake(或者如果使用ninja,它会自动检测新增文件并重新编译相关部分)。
另外,我们看到在构建过程中还有关于权限的问题(创建虚拟环境时),以及最后测试导入torch失败,这些可能是由于环境未正确激活或构建未完成导致的。先解决编译错误。
由于构建已经中断,我们需要清除编译错误后重新开始构建(或者继续构建)。由于我们只修改了`libshm_windows`模块,可以尝试只重新编译该模块,但为了安全,建议重新生成并构建。
重新构建步骤:
```powershell
# 清除构建目录(可选,如果问题依然存在)
(rtx5070_env) PS E:\PyTorch_Build\pytorch> Remove-Item build -Recurse -Force
(rtx5070_env) PS E:\PyTorch_Build\pytorch> mkdir build
(rtx5070_env) PS E:\PyTorch_Build\pytorch> cd build
(rtx5070_env) PS E:\PyTorch_Build\pytorch\build> cmake -GNinja -DBUILD_PYTHON=True -DBUILD_TEST=True -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=E:\PyTorch_Build\pytorch\torch -DCMAKE_PREFIX_PATH=E:\PyTorch_Build\pytorch\rtx5070_env\Lib\site-packages -DCUDNN_INCLUDE_DIR=E:\Program Files\NVIDIA\CUNND\v9.12\include\13.0 -DCUDNN_LIBRARY=E:\Program Files\NVIDIA\CUNND\v9.12\lib\13.0\x64\cudnn64_9.lib -DCUDNN_ROOT=E:\Program Files\NVIDIA\CUNND\v9.12 -DPython_EXECUTABLE=E:\PyTorch_Build\pytorch\rtx5070_env\Scripts\python.exe -DPython_NumPy_INCLUDE_DIR=E:\PyTorch_Build\pytorch\rtx5070_env\lib\site-packages\numpy\_core\include -DTORCH_BUILD_VERSION=2.9.0a0+git2d31c3d -DTORCH_CUDA_ARCH_LIST=8.9 -DUSE_NUMPY=True -DUSE_OPENBLAS=1 ..
(rtx5070_env) PS E:\PyTorch_Build\pytorch\build> ninja install
```
如果上述步骤后仍然出现其他错误,再根据错误日志进行分析。
注意:在Windows上编译PyTorch是一个复杂的过程,可能会遇到各种问题。如果问题持续,可以考虑使用官方提供的二进制包或者使用Linux系统进行编译。
最后,关于导入torch失败的问题(ImportError: DLL load failed while importing _C: 找不到指定的模块),这通常是因为编译生成的DLL文件依赖的其他DLL没有在PATH中。在运行之前,需要将CUDA的bin目录(如E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0\bin)添加到PATH中。
但是,在编译过程中我们并不需要运行,所以先解决编译问题。
由于构建日志显示已经构建了大部分(4850/7459),我们可以尝试只重新编译失败的部分,但为了确保一致性,建议完整重新构建。
总结:添加libshm.h头文件及其实现,然后重新构建。
</think>
针对编译错误,主要问题是缺少`libshm.h`头文件,这是Windows平台特有的问题。我们需要为PyTorch提供Windows兼容的共享内存实现。
### 完整解决方案
#### 1. 创建缺失的头文件
在`torch/lib/libshm_windows`目录下创建`libshm.h`文件:
```cpp:torch/lib/libshm_windows/libshm.h
#pragma once
// Windows平台专用共享内存实现
#ifdef _WIN32
#include <windows.h>
#include <string>
struct THManagedMapAllocator {
static void* makeDataPtr(const char* device, void* data, size_t size, const char* handle);
};
inline void* THManagedMapAllocator::makeDataPtr(const char* device, void* data, size_t size, const char* handle) {
// Windows共享内存实现逻辑
return nullptr;
}
inline std::string THPStorage_sharedCacheFilename() {
return "torch_shm_cache";
}
#endif // _WIN32
```
#### 2. 更新CMake配置
在`torch/lib/libshm_windows/CMakeLists.txt`中添加头文件路径:
```cmake:torch/lib/libshm_windows/CMakeLists.txt
cmake_minimum_required(VERSION 3.10)
project(libshm_windows)
add_library(shm_windows STATIC libshm_windows.cpp)
# 添加头文件目录
target_include_directories(shm_windows PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}
)
set_target_properties(shm_windows PROPERTIES
POSITION_INDEPENDENT_CODE ON
)
```
#### 3. 修复Python环境问题
创建虚拟环境修复脚本:
```powershell:fix_venv.ps1
# 删除损坏的虚拟环境
Remove-Item rtx5070_env -Recurse -Force -ErrorAction SilentlyContinue
# 创建新环境
python -m venv rtx5070_env
# 激活环境
.\rtx5070_env\Scripts\Activate.ps1
# 安装必要依赖
pip install numpy ninja pybind11
```
#### 4. 完整构建脚本
```powershell:build_pytorch.ps1
# 设置环境变量
$env:CUDA_PATH = "E:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v13.0"
$env:Path = "$env:CUDA_PATH\bin;$env:Path"
# 修复虚拟环境
.\fix_venv.ps1
# 配置构建参数
$cmake_args = @(
"-GNinja",
"-DBUILD_PYTHON=ON",
"-DUSE_CUDA=ON",
"-DUSE_CUDNN=ON",
"-DCUDNN_INCLUDE_DIR=E:\Program Files\NVIDIA\CUNND\v9.12\include\13.0",
"-DCUDNN_LIBRARY=E:\Program Files\NVIDIA\CUNND\v9.12\lib\13.0\x64\cudnn64_9.lib",
"-DTORCH_CUDA_ARCH_LIST=8.9",
"-DCMAKE_BUILD_TYPE=Release"
) -join " "
# 配置构建
mkdir build -Force
cd build
cmake .. $cmake_args
# 开始构建
ninja install
# 验证安装
cd ..
python -c "import torch; print(f'PyTorch版本: {torch.__version__}, CUDA可用: {torch.cuda.is_available()}')"
```
### 关键问题解释
1. **libshm.h缺失问题**:
- Windows平台不支持Unix风格的共享内存
- 我们提供了Windows专用的空实现来满足编译依赖
- 实际运行时PyTorch会使用Windows的内存映射文件代替
2. **虚拟环境损坏问题**:
- 权限错误导致虚拟环境创建失败
- 脚本会完全重建环境并安装必要依赖
3. **CUDA相关配置**:
- 显式指定cuDNN路径确保能被正确找到
- 设置TORCH_CUDA_ARCH_LIST匹配RTX 5070的8.9架构
### 执行流程
```powershell
# 授予执行权限
Set-ExecutionPolicy Bypass -Scope Process -Force
# 运行完整构建
.\build_pytorch.ps1
```
### 验证安装
构建完成后运行测试:
```powershell
python -c "import torch; print(torch.rand(5).cuda())"
```
阅读全文
相关推荐



















