https://github.com/jhuber6 updated https://github.com/llvm/llvm-project/pull/119091
>From 4c710e49eea97e542b97e0b5e78b7915acd32383 Mon Sep 17 00:00:00 2001 From: Joseph Huber <hube...@outlook.com> Date: Sat, 7 Dec 2024 13:47:23 -0600 Subject: [PATCH 1/3] [OpenMP] Use generic IR for the OpenMP DeviceRTL Summary: We previously built this for every single architecture to deal with incompatibility. This patch updates it to use the 'generic' IR that `libc` and other projects use. Who knows if this will have any side-effects, probably worth testing more but it passes the tests I expect to pass on my side. --- clang/lib/Driver/ToolChains/CommonArgs.cpp | 3 +- clang/lib/Driver/ToolChains/Cuda.cpp | 1 - offload/DeviceRTL/CMakeLists.txt | 70 ++++------------- offload/DeviceRTL/src/Reduction.cpp | 89 +++++++++++----------- 4 files changed, 62 insertions(+), 101 deletions(-) diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp index 7d3d7f8f03c491..3dd90ecf8bca4c 100644 --- a/clang/lib/Driver/ToolChains/CommonArgs.cpp +++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp @@ -2800,8 +2800,7 @@ void tools::addOpenMPDeviceRTL(const Driver &D, : options::OPT_libomptarget_nvptx_bc_path_EQ; StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgpu" : "nvptx"; - std::string LibOmpTargetName = - ("libomptarget-" + ArchPrefix + "-" + BitcodeSuffix + ".bc").str(); + std::string LibOmpTargetName = ("libomptarget-" + ArchPrefix + ".bc").str(); // First check whether user specifies bc library if (const Arg *A = DriverArgs.getLastArg(LibomptargetBCPathOpt)) { diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp index 102794829795da..214f1e5d83478f 100644 --- a/clang/lib/Driver/ToolChains/Cuda.cpp +++ b/clang/lib/Driver/ToolChains/Cuda.cpp @@ -851,7 +851,6 @@ void CudaToolChain::addClangTargetOptions( HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind); StringRef GpuArch = DriverArgs.getLastArgValue(options::OPT_march_EQ); - assert(!GpuArch.empty() && "Must have an explicit GPU arch."); assert((DeviceOffloadingKind == Action::OFK_OpenMP || DeviceOffloadingKind == Action::OFK_Cuda) && "Only OpenMP or CUDA offloading kinds are supported for NVIDIA GPUs."); diff --git a/offload/DeviceRTL/CMakeLists.txt b/offload/DeviceRTL/CMakeLists.txt index 1bf3eb9da38aa1..cda633c41062b6 100644 --- a/offload/DeviceRTL/CMakeLists.txt +++ b/offload/DeviceRTL/CMakeLists.txt @@ -42,38 +42,6 @@ set(devicertl_base_directory ${CMAKE_CURRENT_SOURCE_DIR}) set(include_directory ${devicertl_base_directory}/include) set(source_directory ${devicertl_base_directory}/src) -set(all_amdgpu_architectures "gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906" - "gfx908;gfx90a;gfx90c;gfx940;gfx941;gfx942;gfx950;gfx1010" - "gfx1012;gfx1030;gfx1031;gfx1032;gfx1033;gfx1034;gfx1035" - "gfx1036;gfx1100;gfx1101;gfx1102;gfx1103;gfx1150" - "gfx1151;gfx1152;gfx1153") -set(all_nvptx_architectures "sm_35;sm_37;sm_50;sm_52;sm_53;sm_60;sm_61;sm_62" - "sm_70;sm_72;sm_75;sm_80;sm_86;sm_87;sm_89;sm_90") -set(all_gpu_architectures - "${all_amdgpu_architectures};${all_nvptx_architectures}") - -set(LIBOMPTARGET_DEVICE_ARCHITECTURES "all" CACHE STRING - "List of device architectures to be used to compile the OpenMP DeviceRTL.") - -if(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "all") - set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_gpu_architectures}) -elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "amdgpu") - set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_amdgpu_architectures}) -elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "nvptx") - set(LIBOMPTARGET_DEVICE_ARCHITECTURES ${all_nvptx_architectures}) -elseif(LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "auto" OR - LIBOMPTARGET_DEVICE_ARCHITECTURES STREQUAL "native") - if(NOT LIBOMPTARGET_NVPTX_ARCH AND NOT LIBOMPTARGET_AMDGPU_ARCH) - message(FATAL_ERROR - "Could not find 'amdgpu-arch' and 'nvptx-arch' tools required for 'auto'") - elseif(NOT LIBOMPTARGET_FOUND_NVIDIA_GPU AND NOT LIBOMPTARGET_FOUND_AMDGPU_GPU) - message(FATAL_ERROR "No AMD or NVIDIA GPU found on the system when using 'auto'") - endif() - set(LIBOMPTARGET_DEVICE_ARCHITECTURES - "${LIBOMPTARGET_NVPTX_DETECTED_ARCH_LIST};${LIBOMPTARGET_AMDGPU_DETECTED_ARCH_LIST}") -endif() -list(REMOVE_DUPLICATES LIBOMPTARGET_DEVICE_ARCHITECTURES) - set(include_files ${include_directory}/Allocator.h ${include_directory}/Configuration.h @@ -141,20 +109,21 @@ set(bc_flags -c -foffload-lto -std=c++17 -fvisibility=hidden # first create an object target add_library(omptarget.devicertl.all_objs OBJECT IMPORTED) -function(compileDeviceRTLLibrary target_cpu target_name target_triple) +function(compileDeviceRTLLibrary target_name target_triple) set(target_bc_flags ${ARGN}) set(bc_files "") foreach(src ${src_files}) get_filename_component(infile ${src} ABSOLUTE) get_filename_component(outfile ${src} NAME) - set(outfile "${outfile}-${target_cpu}.bc") + set(outfile "${outfile}-${target_name}.bc") set(depfile "${outfile}.d") add_custom_command(OUTPUT ${outfile} COMMAND ${CLANG_TOOL} ${bc_flags} - --offload-arch=${target_cpu} + -fopenmp-targets=${target_triple} + -Xopenmp-target=${target_triple} -march= ${target_bc_flags} -MD -MF ${depfile} ${infile} -o ${outfile} @@ -177,7 +146,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) list(APPEND bc_files ${outfile}) endforeach() - set(bclib_name "libomptarget-${target_name}-${target_cpu}.bc") + set(bclib_name "libomptarget-${target_name}.bc") # Link to a bitcode library. add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/linked_${bclib_name} @@ -217,7 +186,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) APPEND) endif() - set(bclib_target_name "omptarget-${target_name}-${target_cpu}-bc") + set(bclib_target_name "omptarget-${target_name}-bc") add_custom_target(${bclib_target_name} DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${bclib_name}) # Copy library to destination. @@ -239,7 +208,7 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) # Package the bitcode in the bitcode and embed it in an ELF for the static library add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name} COMMAND ${PACKAGER_TOOL} -o ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name} - "--image=file=${CMAKE_CURRENT_BINARY_DIR}/${bclib_name},${target_feature},triple=${target_triple},arch=${target_cpu},kind=openmp" + "--image=file=${CMAKE_CURRENT_BINARY_DIR}/${bclib_name},${target_feature},triple=${target_triple},arch=generic,kind=openmp" DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/${bclib_name} COMMENT "Packaging LLVM offloading binary ${bclib_name}.out" ) @@ -249,14 +218,14 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) APPEND) endif() - set(output_name "${CMAKE_CURRENT_BINARY_DIR}/devicertl-${target_name}-${target_cpu}.o") + set(output_name "${CMAKE_CURRENT_BINARY_DIR}/devicertl-${target_name}.o") add_custom_command(OUTPUT ${output_name} COMMAND ${CLANG_TOOL} --std=c++17 -c -nostdlib -Xclang -fembed-offload-object=${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name} -o ${output_name} ${source_directory}/Stub.cpp DEPENDS ${CMAKE_CURRENT_BINARY_DIR}/packaged_${bclib_name} ${source_directory}/Stub.cpp - COMMENT "Embedding LLVM offloading binary in devicertl-${target_name}-${target_cpu}.o" + COMMENT "Embedding LLVM offloading binary in devicertl-${target_name}.o" VERBATIM ) if(TARGET clang) @@ -269,11 +238,11 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) set_property(TARGET omptarget.devicertl.all_objs APPEND PROPERTY IMPORTED_OBJECTS ${output_name}) if (CMAKE_EXPORT_COMPILE_COMMANDS) - set(ide_target_name omptarget-ide-${target_name}-${target_cpu}) + set(ide_target_name omptarget-ide-${target_name}) add_library(${ide_target_name} STATIC EXCLUDE_FROM_ALL ${src_files}) target_compile_options(${ide_target_name} PRIVATE - -fopenmp --offload-arch=${target_cpu} -fopenmp-cuda-mode - -mllvm -openmp-opt-disable + -fopenmp-targets=${target_triple} -Xopenmp-target=${target_triple} -march= + -fopenmp -fopenmp-cuda-mode -mllvm -openmp-opt-disable -foffload-lto -fvisibility=hidden --offload-device-only -nocudalib -nogpulib -nogpuinc -nostdlibinc -Wno-unknown-cuda-version ) @@ -288,18 +257,11 @@ function(compileDeviceRTLLibrary target_cpu target_name target_triple) endif() endfunction() -# Generate a Bitcode library for all the gpu architectures the user requested. -add_custom_target(omptarget.devicertl.nvptx) add_custom_target(omptarget.devicertl.amdgpu) -foreach(gpu_arch ${LIBOMPTARGET_DEVICE_ARCHITECTURES}) - if("${gpu_arch}" IN_LIST all_amdgpu_architectures) - compileDeviceRTLLibrary(${gpu_arch} amdgpu amdgcn-amd-amdhsa -Xclang -mcode-object-version=none) - elseif("${gpu_arch}" IN_LIST all_nvptx_architectures) - compileDeviceRTLLibrary(${gpu_arch} nvptx nvptx64-nvidia-cuda --cuda-feature=+ptx63) - else() - message(FATAL_ERROR "Unknown GPU architecture '${gpu_arch}'") - endif() -endforeach() +compileDeviceRTLLibrary(amdgpu amdgcn-amd-amdhsa -Xclang -mcode-object-version=none) + +add_custom_target(omptarget.devicertl.nvptx) +compileDeviceRTLLibrary(nvptx nvptx64-nvidia-cuda --cuda-feature=+ptx63) # Archive all the object files generated above into a static library add_library(omptarget.devicertl STATIC) diff --git a/offload/DeviceRTL/src/Reduction.cpp b/offload/DeviceRTL/src/Reduction.cpp index 57df159d3f28e5..d1aa9d3e474f71 100644 --- a/offload/DeviceRTL/src/Reduction.cpp +++ b/offload/DeviceRTL/src/Reduction.cpp @@ -44,7 +44,6 @@ void gpu_irregular_warp_reduce(void *reduce_data, ShuffleReductFnTy shflFct, } } -#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ < 700 static uint32_t gpu_irregular_simd_reduce(void *reduce_data, ShuffleReductFnTy shflFct) { uint32_t size, remote_id, physical_lane_id; @@ -63,7 +62,6 @@ static uint32_t gpu_irregular_simd_reduce(void *reduce_data, } while (logical_lane_id % 2 == 0 && size > 1); return (logical_lane_id == 0); } -#endif static int32_t nvptx_parallel_reduce_nowait(void *reduce_data, ShuffleReductFnTy shflFct, @@ -74,49 +72,53 @@ static int32_t nvptx_parallel_reduce_nowait(void *reduce_data, uint32_t NumThreads = omp_get_num_threads(); if (NumThreads == 1) return 1; - /* - * This reduce function handles reduction within a team. It handles - * parallel regions in both L1 and L2 parallelism levels. It also - * supports Generic, SPMD, and NoOMP modes. - * - * 1. Reduce within a warp. - * 2. Warp master copies value to warp 0 via shared memory. - * 3. Warp 0 reduces to a single value. - * 4. The reduced value is available in the thread that returns 1. - */ - -#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 - uint32_t WarpsNeeded = - (NumThreads + mapping::getWarpSize() - 1) / mapping::getWarpSize(); - uint32_t WarpId = mapping::getWarpIdInBlock(); - - // Volta execution model: - // For the Generic execution mode a parallel region either has 1 thread and - // beyond that, always a multiple of 32. For the SPMD execution mode we may - // have any number of threads. - if ((NumThreads % mapping::getWarpSize() == 0) || (WarpId < WarpsNeeded - 1)) - gpu_regular_warp_reduce(reduce_data, shflFct); - else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. - gpu_irregular_warp_reduce(reduce_data, shflFct, - /*LaneCount=*/NumThreads % mapping::getWarpSize(), - /*LaneId=*/mapping::getThreadIdInBlock() % - mapping::getWarpSize()); - // When we have more than [mapping::getWarpSize()] number of threads - // a block reduction is performed here. // - // Only L1 parallel region can enter this if condition. - if (NumThreads > mapping::getWarpSize()) { - // Gather all the reduced values from each warp - // to the first warp. - cpyFct(reduce_data, WarpsNeeded); + // This reduce function handles reduction within a team. It handles + // parallel regions in both L1 and L2 parallelism levels. It also + // supports Generic, SPMD, and NoOMP modes. + // + // 1. Reduce within a warp. + // 2. Warp master copies value to warp 0 via shared memory. + // 3. Warp 0 reduces to a single value. + // 4. The reduced value is available in the thread that returns 1. + // - if (WarpId == 0) - gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, - BlockThreadId); +#if __has_builtin(__nvvm_reflect) + if (__nvvm_reflect("__CUDA_ARCH") >= 700) { + uint32_t WarpsNeeded = + (NumThreads + mapping::getWarpSize() - 1) / mapping::getWarpSize(); + uint32_t WarpId = mapping::getWarpIdInBlock(); + + // Volta execution model: + // For the Generic execution mode a parallel region either has 1 thread and + // beyond that, always a multiple of 32. For the SPMD execution mode we may + // have any number of threads. + if ((NumThreads % mapping::getWarpSize() == 0) || + (WarpId < WarpsNeeded - 1)) + gpu_regular_warp_reduce(reduce_data, shflFct); + else if (NumThreads > 1) // Only SPMD execution mode comes thru this case. + gpu_irregular_warp_reduce( + reduce_data, shflFct, + /*LaneCount=*/NumThreads % mapping::getWarpSize(), + /*LaneId=*/mapping::getThreadIdInBlock() % mapping::getWarpSize()); + + // When we have more than [mapping::getWarpSize()] number of threads + // a block reduction is performed here. + // + // Only L1 parallel region can enter this if condition. + if (NumThreads > mapping::getWarpSize()) { + // Gather all the reduced values from each warp + // to the first warp. + cpyFct(reduce_data, WarpsNeeded); + + if (WarpId == 0) + gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, + BlockThreadId); + } + return BlockThreadId == 0; } - return BlockThreadId == 0; -#else +#endif __kmpc_impl_lanemask_t Liveness = mapping::activemask(); if (Liveness == lanes::All) // Full warp gpu_regular_warp_reduce(reduce_data, shflFct); @@ -150,10 +152,9 @@ static int32_t nvptx_parallel_reduce_nowait(void *reduce_data, return BlockThreadId == 0; } - // Get the OMP thread Id. This is different from BlockThreadId in the case of - // an L2 parallel region. + // Get the OMP thread Id. This is different from BlockThreadId in the case + // of an L2 parallel region. return BlockThreadId == 0; -#endif // __CUDA_ARCH__ >= 700 } uint32_t roundToWarpsize(uint32_t s) { >From ae742d66d1c92d4ef95bbcafa05298fb72dcfa63 Mon Sep 17 00:00:00 2001 From: Joseph Huber <hube...@outlook.com> Date: Sat, 7 Dec 2024 13:56:50 -0600 Subject: [PATCH 2/3] update with comment --- offload/DeviceRTL/CMakeLists.txt | 1 + offload/DeviceRTL/src/Reduction.cpp | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/offload/DeviceRTL/CMakeLists.txt b/offload/DeviceRTL/CMakeLists.txt index cda633c41062b6..22940264f9b19a 100644 --- a/offload/DeviceRTL/CMakeLists.txt +++ b/offload/DeviceRTL/CMakeLists.txt @@ -119,6 +119,7 @@ function(compileDeviceRTLLibrary target_name target_triple) set(outfile "${outfile}-${target_name}.bc") set(depfile "${outfile}.d") + # Passing an empty CPU to -march= suppressed target specific metadata. add_custom_command(OUTPUT ${outfile} COMMAND ${CLANG_TOOL} ${bc_flags} diff --git a/offload/DeviceRTL/src/Reduction.cpp b/offload/DeviceRTL/src/Reduction.cpp index d1aa9d3e474f71..d3b4528401953c 100644 --- a/offload/DeviceRTL/src/Reduction.cpp +++ b/offload/DeviceRTL/src/Reduction.cpp @@ -73,16 +73,16 @@ static int32_t nvptx_parallel_reduce_nowait(void *reduce_data, if (NumThreads == 1) return 1; - // - // This reduce function handles reduction within a team. It handles - // parallel regions in both L1 and L2 parallelism levels. It also - // supports Generic, SPMD, and NoOMP modes. - // - // 1. Reduce within a warp. - // 2. Warp master copies value to warp 0 via shared memory. - // 3. Warp 0 reduces to a single value. - // 4. The reduced value is available in the thread that returns 1. - // + // + // This reduce function handles reduction within a team. It handles + // parallel regions in both L1 and L2 parallelism levels. It also + // supports Generic, SPMD, and NoOMP modes. + // + // 1. Reduce within a warp. + // 2. Warp master copies value to warp 0 via shared memory. + // 3. Warp 0 reduces to a single value. + // 4. The reduced value is available in the thread that returns 1. + // #if __has_builtin(__nvvm_reflect) if (__nvvm_reflect("__CUDA_ARCH") >= 700) { >From f868bbfc0c724dbd985e13c4a9fbe1c66aafc46a Mon Sep 17 00:00:00 2001 From: Joseph Huber <hube...@outlook.com> Date: Sat, 7 Dec 2024 14:04:55 -0600 Subject: [PATCH 3/3] release notes --- clang/docs/ReleaseNotes.rst | 3 +++ openmp/docs/ReleaseNotes.rst | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst index 12d8ebf3251bc0..1b2bb3b98f0dc4 100644 --- a/clang/docs/ReleaseNotes.rst +++ b/clang/docs/ReleaseNotes.rst @@ -1153,6 +1153,9 @@ OpenMP Support - Added support for 'omp assume' directive. - Added support for 'omp scope' directive. - Added support for allocator-modifier in 'allocate' clause. +- Changed the OpenMP DeviceRTL to use 'generic' IR. The + ``LIBOMPTARGET_DEVICE_ARCHITECTURES`` CMake argument is now unused and will + always build support for AMDGPU and NVPTX targets. Improvements ^^^^^^^^^^^^ diff --git a/openmp/docs/ReleaseNotes.rst b/openmp/docs/ReleaseNotes.rst index d4a4b1a99f7813..db594bd71d7e61 100644 --- a/openmp/docs/ReleaseNotes.rst +++ b/openmp/docs/ReleaseNotes.rst @@ -1,5 +1,4 @@ -=========================== -OpenMP 20.0.0 Release Notes + =========================== @@ -19,3 +18,7 @@ from the `LLVM releases web site <https://llvm.org/releases/>`_. Non-comprehensive list of changes in this release ================================================= + +- Changed the OpenMP DeviceRTL to use 'generic' IR. The + ``LIBOMPTARGET_DEVICE_ARCHITECTURES`` CMake argument is now unused and will + always build support for AMDGPU and NVPTX targets. _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits