yaxunl created this revision. Currently block is translated to a structure equivalent to
struct Block { void *isa; int flags; int reserved; void *invoke; void *descriptor; }; Except `invoke`, which is the pointer to the block invoke function, all other fields are useless for OpenCL, which clutter the IR and also waste memory since the block struct is passed to the block invoke function as argument. On the other hand, the size and alignment of the block struct is not stored in the struct, which causes difficulty to implement __enqueue_kernel as library function, since the library function needs to know the size and alignment of the argument which needs to be passed to the kernel. This patch removes the useless fields from the block struct and adds size and align fields. The equivalent block struct will become struct Block { int size; int align; generic void *invoke; }; It also changes the pointer to the invoke function to be a generic pointer since the address space of a function may not be private on certain targets. https://reviews.llvm.org/D37822 Files: lib/CodeGen/CGBlocks.cpp lib/CodeGen/CGOpenCLRuntime.cpp lib/CodeGen/CGOpenCLRuntime.h test/CodeGen/blocks-opencl.cl test/CodeGenOpenCL/blocks.cl test/CodeGenOpenCL/cl20-device-side-enqueue.cl
Index: test/CodeGenOpenCL/cl20-device-side-enqueue.cl =================================================================== --- test/CodeGenOpenCL/cl20-device-side-enqueue.cl +++ test/CodeGenOpenCL/cl20-device-side-enqueue.cl @@ -7,7 +7,7 @@ typedef struct {int a;} ndrange_t; // N.B. The check here only exists to set BL_GLOBAL -// COMMON: @block_G = addrspace(1) constant void (i8 addrspace(3)*) addrspace(4)* addrspacecast (void (i8 addrspace(3)*) addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_GLOBAL:@__block_literal_global(\.[0-9]+)?]] to void (i8 addrspace(3)*) addrspace(1)*) to void (i8 addrspace(3)*) addrspace(4)*) +// COMMON: @block_G = addrspace(1) constant void (i8 addrspace(3)*) addrspace(4)* addrspacecast (void (i8 addrspace(3)*) addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_GLOBAL:@__block_literal_global(\.[0-9]+)?]] to void (i8 addrspace(3)*) addrspace(1)*) to void (i8 addrspace(3)*) addrspace(4)*) const bl_t block_G = (bl_t) ^ (local void *a) {}; kernel void device_side_enqueue(global int *a, global int *b, int i) { @@ -27,9 +27,10 @@ // COMMON: [[NDR:%[a-z0-9]+]] = alloca %struct.ndrange_t, align 4 // COMMON: [[DEF_Q:%[0-9]+]] = load %opencl.queue_t{{.*}}*, %opencl.queue_t{{.*}}** %default_queue // COMMON: [[FLAGS:%[0-9]+]] = load i32, i32* %flags - // COMMON: [[BL:%[0-9]+]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor addrspace(2)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block to void ()* + // B32: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32, i32 addrspace(1)* }>* %block to void ()* + // B64: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32 addrspace(1)*, i32 addrspace(1)*, i32 }>* %block to void ()* // COMMON: [[BL_I8:%[0-9]+]] = addrspacecast void ()* [[BL]] to i8 addrspace(4)* - // COMMON: call i32 @__enqueue_kernel_basic(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* byval [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* [[BL_I8]]) + // COMMON: call i32 @__enqueue_kernel_basic(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* byval [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* [[BL_I8]]) enqueue_kernel(default_queue, flags, ndrange, ^(void) { a[i] = b[i]; @@ -39,7 +40,7 @@ // COMMON: [[FLAGS:%[0-9]+]] = load i32, i32* %flags // COMMON: [[WAIT_EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %event_wait_list to %opencl.clk_event_t{{.*}}* addrspace(4)* // COMMON: [[EVNT:%[0-9]+]] = addrspacecast %opencl.clk_event_t{{.*}}** %clk_event to %opencl.clk_event_t{{.*}}* addrspace(4)* - // COMMON: [[BL:%[0-9]+]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor addrspace(2)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block3 to void ()* + // COMMON: [[BL:%[0-9]+]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32{{.*}}, i32{{.*}}, i32{{.*}} }>* %block3 to void ()* // COMMON: [[BL_I8:%[0-9]+]] = addrspacecast void ()* [[BL]] to i8 addrspace(4)* // COMMON: call i32 @__enqueue_kernel_basic_events(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]], i8 addrspace(4)* [[BL_I8]]) enqueue_kernel(default_queue, flags, ndrange, 2, &event_wait_list, &clk_event, @@ -52,11 +53,11 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 256, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 256, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, ^(local void *p) { return; @@ -68,11 +69,11 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 %{{.*}}, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 %{{.*}}, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, ^(local void *p) { return; @@ -87,11 +88,11 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 256, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}} [[WAIT_EVNT]], %opencl.clk_event_t{{.*}} [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}} [[WAIT_EVNT]], %opencl.clk_event_t{{.*}} [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 256, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}} [[WAIT_EVNT]], %opencl.clk_event_t{{.*}} [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}} [[WAIT_EVNT]], %opencl.clk_event_t{{.*}} [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, 2, event_wait_list2, &clk_event, ^(local void *p) { return; @@ -106,11 +107,11 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 %{{.*}}, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 %{{.*}}, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_events_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* {{.*}}, i32 2, %opencl.clk_event_t{{.*}}* addrspace(4)* [[WAIT_EVNT]], %opencl.clk_event_t{{.*}}* addrspace(4)* [[EVNT]], i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, 2, event_wait_list2, &clk_event, ^(local void *p) { return; @@ -123,11 +124,11 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 %{{.*}}, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 %{{.*}}, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, ^(local void *p) { return; @@ -143,15 +144,15 @@ // B32: store i32 2, i32* %[[TMP2]], align 4 // B32: %[[TMP3:.*]] = getelementptr [3 x i32], [3 x i32]* %[[TMP]], i32 0, i32 2 // B32: store i32 4, i32* %[[TMP3]], align 4 - // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 3, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 3, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [3 x i64] // B64: %[[TMP1:.*]] = getelementptr [3 x i64], [3 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 1, i64* %[[TMP1]], align 8 // B64: %[[TMP2:.*]] = getelementptr [3 x i64], [3 x i64]* %[[TMP]], i32 0, i32 1 // B64: store i64 2, i64* %[[TMP2]], align 8 // B64: %[[TMP3:.*]] = getelementptr [3 x i64], [3 x i64]* %[[TMP]], i32 0, i32 2 // B64: store i64 4, i64* %[[TMP3]], align 8 - // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 3, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 3, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, ^(local void *p1, local void *p2, local void *p3) { return; @@ -163,40 +164,40 @@ // B32: %[[TMP:.*]] = alloca [1 x i32] // B32: %[[TMP1:.*]] = getelementptr [1 x i32], [1 x i32]* %[[TMP]], i32 0, i32 0 // B32: store i32 0, i32* %[[TMP1]], align 4 - // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) + // B32: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i32* %[[TMP1]]) // B64: %[[TMP:.*]] = alloca [1 x i64] // B64: %[[TMP1:.*]] = getelementptr [1 x i64], [1 x i64]* %[[TMP]], i32 0, i32 0 // B64: store i64 4294967296, i64* %[[TMP1]], align 8 - // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{(.[0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) + // B64: call i32 @__enqueue_kernel_vaargs(%opencl.queue_t{{.*}}* [[DEF_Q]], i32 [[FLAGS]], %struct.ndrange_t* [[NDR]]{{([0-9]+)?}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* @__block_literal_global{{(.[0-9]+)?}} to i8 addrspace(1)*) to i8 addrspace(4)*), i32 1, i64* %[[TMP1]]) enqueue_kernel(default_queue, flags, ndrange, ^(local void *p) { return; }, 4294967296L); // The full type of these expressions are long (and repeated elsewhere), so we // capture it as part of the regex for convenience and clarity. - // COMMON: store void () addrspace(4)* addrspacecast (void () addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_A:@__block_literal_global(\.[0-9]+)?]] to void () addrspace(1)*) to void () addrspace(4)*), void () addrspace(4)** %block_A + // COMMON: store void () addrspace(4)* addrspacecast (void () addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_A:@__block_literal_global(\.[0-9]+)?]] to void () addrspace(1)*) to void () addrspace(4)*), void () addrspace(4)** %block_A void (^const block_A)(void) = ^{ return; }; - // COMMON: store void (i8 addrspace(3)*) addrspace(4)* addrspacecast (void (i8 addrspace(3)*) addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_B:@__block_literal_global(\.[0-9]+)?]] to void (i8 addrspace(3)*) addrspace(1)*) to void (i8 addrspace(3)*) addrspace(4)*), void (i8 addrspace(3)*) addrspace(4)** %block_B + // COMMON: store void (i8 addrspace(3)*) addrspace(4)* addrspacecast (void (i8 addrspace(3)*) addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_B:@__block_literal_global(\.[0-9]+)?]] to void (i8 addrspace(3)*) addrspace(1)*) to void (i8 addrspace(3)*) addrspace(4)*), void (i8 addrspace(3)*) addrspace(4)** %block_B void (^const block_B)(local void *) = ^(local void *a) { return; }; - // COMMON: call i32 @__get_kernel_work_group_size_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_A]] to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_work_group_size_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_A]] to i8 addrspace(1)*) to i8 addrspace(4)*)) unsigned size = get_kernel_work_group_size(block_A); - // COMMON: call i32 @__get_kernel_work_group_size_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_B]] to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_work_group_size_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_B]] to i8 addrspace(1)*) to i8 addrspace(4)*)) size = get_kernel_work_group_size(block_B); - // COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_A]] to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_A]] to i8 addrspace(1)*) to i8 addrspace(4)*)) size = get_kernel_preferred_work_group_size_multiple(block_A); - // COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* [[BL_GLOBAL]] to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_preferred_work_group_multiple_impl(i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* [[BL_GLOBAL]] to i8 addrspace(1)*) to i8 addrspace(4)*)) size = get_kernel_preferred_work_group_size_multiple(block_G); - // COMMON: call i32 @__get_kernel_max_sub_group_size_for_ndrange_impl(%struct.ndrange_t* {{.*}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* {{.*}} to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_max_sub_group_size_for_ndrange_impl(%struct.ndrange_t* {{.*}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* {{.*}} to i8 addrspace(1)*) to i8 addrspace(4)*)) size = get_kernel_max_sub_group_size_for_ndrange(ndrange, ^(){}); - // COMMON: call i32 @__get_kernel_sub_group_count_for_ndrange_impl(%struct.ndrange_t* {{.*}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } addrspace(1)* {{.*}} to i8 addrspace(1)*) to i8 addrspace(4)*)) + // COMMON: call i32 @__get_kernel_sub_group_count_for_ndrange_impl(%struct.ndrange_t* {{.*}}, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* bitcast ({ i32, i32, i8 addrspace(4)* } addrspace(1)* {{.*}} to i8 addrspace(1)*) to i8 addrspace(4)*)) size = get_kernel_sub_group_count_for_ndrange(ndrange, ^(){}); } Index: test/CodeGenOpenCL/blocks.cl =================================================================== --- test/CodeGenOpenCL/blocks.cl +++ test/CodeGenOpenCL/blocks.cl @@ -1,17 +1,48 @@ -// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple spir-unknown-unknown | FileCheck -check-prefix=GENERIC -check-prefix=COMMON %s -// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple amdgcn-amd-amdhsa-opencl | FileCheck -check-prefix=AMD -check-prefix=COMMON %s +// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple spir-unknown-unknown | FileCheck -check-prefixes=COMMON,SPIR %s +// RUN: %clang_cc1 %s -cl-std=CL2.0 -emit-llvm -o - -O0 -triple amdgcn-amd-amdhsa-opencl | FileCheck -check-prefixes=COMMON,AMD %s -// Checking for null instead of @__NSConcreteGlobalBlock symbol -// COMMON: @__block_literal_global = internal addrspace(1) constant { i8**, i32, i32, i8*, %struct.__block_descriptor addrspace(2)* } { i8** null +// SPIR: @__block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 12, i32 4, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @block_A_block_invoke to i8*) to i8 addrspace(4)*) } +// AMD: @__block_literal_global = internal addrspace(1) constant { i32, i32, i8 addrspace(4)* } { i32 16, i32 8, i8 addrspace(4)* addrspacecast (i8* bitcast (void (i8 addrspace(4)*, i8 addrspace(3)*)* @block_A_block_invoke to i8*) to i8 addrspace(4)*) } +// COMMON-NOT: .str + +// COMMON-LABEL: define internal {{.*}}void @block_A_block_invoke(i8 addrspace(4)* %.block_descriptor, i8 addrspace(3)* %a) void (^block_A)(local void *) = ^(local void *a) { return; }; +// COMMON-LABEL: define {{.*}}void @foo() void foo(){ int i; -// Checking for null instead of @_NSConcreteStackBlock symbol -// COMMON: store i8* null, i8** %block.isa + // COMMON-NOT: store i8* null, i8** %block.isa + // COMMON: %[[block_size:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 0 + // SPIR: store i32 16, i32* %[[block_size]] + // AMD: store i32 20, i32* %[[block_size]] + // COMMON: %[[block_align:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %block, i32 0, i32 1 + // SPIR: store i32 4, i32* %[[block_align]] + // AMD: store i32 8, i32* %[[block_align]] + // COMMON: %[[block_invoke:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block:.*]], i32 0, i32 2 + // COMMON: store i8 addrspace(4)* addrspacecast (i8* bitcast (i32 (i8 addrspace(4)*)* @__foo_block_invoke to i8*) to i8 addrspace(4)*), i8 addrspace(4)** %[[block_invoke]] + // COMMON: %[[block_captured:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]], i32 0, i32 3 + // COMMON: %[[r0:.*]] = load i32, i32* %i + // COMMON: store i32 %[[r0]], i32* %[[block_captured]], + // COMMON: %[[r1:.*]] = bitcast <{ i32, i32, i8 addrspace(4)*, i32 }>* %[[block]] to i32 ()* + // COMMON: %[[r2:.*]] = addrspacecast i32 ()* %[[r1]] to i32 () addrspace(4)* + // COMMON: store i32 () addrspace(4)* %[[r2]], i32 () addrspace(4)** %[[block_B:.*]], + // COMMON: %[[r3:.*]] = load i32 () addrspace(4)*, i32 () addrspace(4)** %[[block_B]] + // COMMON: %[[block_literal:.*]] = bitcast i32 () addrspace(4)* %3 to %struct.__block_literal_generic addrspace(4)* + // COMMON: %[[r4:.*]] = getelementptr inbounds %struct.__block_literal_generic, %struct.__block_literal_generic addrspace(4)* %[[block_literal]], i32 0, i32 0 + // COMMON: %[[r5:.*]] = bitcast %struct.__block_literal_generic addrspace(4)* %[[block_literal]] to i8 addrspace(4)* + // COMMON: %[[r6:.*]] = load i8*, i8* addrspace(4)* %[[r4]] + // COMMON: %[[r7:.*]] = bitcast i8* %[[r6]] to i32 (i8 addrspace(4)*)* + // COMMON: call {{.*}}i32 %[[r7]](i8 addrspace(4)* %[[r5]]) + int (^ block_B)(void) = ^{ return i; }; + block_B(); } + +// COMMON-LABEL: define internal {{.*}}i32 @__foo_block_invoke(i8 addrspace(4)* %.block_descriptor) +// COMMON: %[[block:.*]] = bitcast i8 addrspace(4)* %.block_descriptor to <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* +// COMMON: %[[block_capture_addr:.*]] = getelementptr inbounds <{ i32, i32, i8 addrspace(4)*, i32 }>, <{ i32, i32, i8 addrspace(4)*, i32 }> addrspace(4)* %[[block]], i32 0, i32 3 +// COMMON: %[[r1:.*]] = load i32, i32 addrspace(4)* %[[block_capture_addr]] Index: test/CodeGen/blocks-opencl.cl =================================================================== --- test/CodeGen/blocks-opencl.cl +++ test/CodeGen/blocks-opencl.cl @@ -1,11 +1,12 @@ // RUN: %clang_cc1 -O0 %s -ffake-address-space-map -emit-llvm -o - -fblocks -triple x86_64-unknown-unknown | FileCheck %s +// RUN: %clang_cc1 -O0 %s -emit-llvm -o - -fblocks -triple amdgcn | FileCheck %s // This used to crash due to trying to generate a bitcase from a cstring // in the constant address space to i8* in AS0. void dummy(float (^const op)(float)) { } -// CHECK: i8 addrspace(2)* getelementptr inbounds ([9 x i8], [9 x i8] addrspace(2)* @.str, i32 0, i32 0) +// CHECK-NOT: @.str kernel void test_block() { Index: lib/CodeGen/CGOpenCLRuntime.h =================================================================== --- lib/CodeGen/CGOpenCLRuntime.h +++ lib/CodeGen/CGOpenCLRuntime.h @@ -59,6 +59,9 @@ // \brief Returnes a value which indicates the alignment in bytes of the pipe // element. virtual llvm::Value *getPipeElemAlign(const Expr *PipeArg); + + /// \return __generic void* type. + llvm::PointerType *getGenericVoidPointerType(); }; } Index: lib/CodeGen/CGOpenCLRuntime.cpp =================================================================== --- lib/CodeGen/CGOpenCLRuntime.cpp +++ lib/CodeGen/CGOpenCLRuntime.cpp @@ -103,3 +103,9 @@ .getQuantity(); return llvm::ConstantInt::get(Int32Ty, TypeSize, false); } + +llvm::PointerType *CGOpenCLRuntime::getGenericVoidPointerType() { + return llvm::IntegerType::getInt8PtrTy( + CGM.getLLVMContext(), + CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic)); +} Index: lib/CodeGen/CGBlocks.cpp =================================================================== --- lib/CodeGen/CGBlocks.cpp +++ lib/CodeGen/CGBlocks.cpp @@ -14,6 +14,7 @@ #include "CGBlocks.h" #include "CGDebugInfo.h" #include "CGObjCRuntime.h" +#include "CGOpenCLRuntime.h" #include "CodeGenFunction.h" #include "CodeGenModule.h" #include "ConstantEmitter.h" @@ -302,21 +303,37 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info, SmallVectorImpl<llvm::Type*> &elementTypes) { - // The header is basically 'struct { void *; int; int; void *; void *; }'. - // Assert that that struct is packed. - assert(CGM.getIntSize() <= CGM.getPointerSize()); - assert(CGM.getIntAlign() <= CGM.getPointerAlign()); - assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign())); - - info.BlockAlign = CGM.getPointerAlign(); - info.BlockSize = 3 * CGM.getPointerSize() + 2 * CGM.getIntSize(); assert(elementTypes.empty()); - elementTypes.push_back(CGM.VoidPtrTy); - elementTypes.push_back(CGM.IntTy); - elementTypes.push_back(CGM.IntTy); - elementTypes.push_back(CGM.VoidPtrTy); - elementTypes.push_back(CGM.getBlockDescriptorType()); + if (CGM.getLangOpts().OpenCL) { + // The header is basically 'struct { int; int; generic void *; }'. + // Assert that that struct is packed. + auto GenPtrAlign = CharUnits::fromQuantity( + CGM.getTarget().getPointerAlign(LangAS::opencl_generic) / 8); + auto GenPtrSize = CharUnits::fromQuantity( + CGM.getTarget().getPointerWidth(LangAS::opencl_generic) / 8); + assert(CGM.getIntSize() <= GenPtrSize); + assert(CGM.getIntAlign() <= GenPtrAlign); + assert((2 * CGM.getIntSize()).isMultipleOf(GenPtrAlign)); + info.BlockAlign = GenPtrAlign; + info.BlockSize = GenPtrSize + 2 * CGM.getIntSize(); + elementTypes.push_back(CGM.IntTy); + elementTypes.push_back(CGM.IntTy); + elementTypes.push_back(CGM.getOpenCLRuntime().getGenericVoidPointerType()); + } else { + // The header is basically 'struct { void *; int; int; void *; void *; }'. + // Assert that that struct is packed. + assert(CGM.getIntSize() <= CGM.getPointerSize()); + assert(CGM.getIntAlign() <= CGM.getPointerAlign()); + assert((2 * CGM.getIntSize()).isMultipleOf(CGM.getPointerAlign())); + info.BlockAlign = CGM.getPointerAlign(); + info.BlockSize = 3 * CGM.getPointerSize() + 2 * CGM.getIntSize(); + elementTypes.push_back(CGM.VoidPtrTy); + elementTypes.push_back(CGM.IntTy); + elementTypes.push_back(CGM.IntTy); + elementTypes.push_back(CGM.VoidPtrTy); + elementTypes.push_back(CGM.getBlockDescriptorType()); + } } static QualType getCaptureFieldType(const CodeGenFunction &CGF, @@ -720,40 +737,50 @@ } llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) { + bool IsOpenCL = CGM.getContext().getLangOpts().OpenCL; + auto GenVoidPtrTy = + IsOpenCL ? CGM.getOpenCLRuntime().getGenericVoidPointerType() : VoidPtrTy; + unsigned GenVoidPtrAddr = IsOpenCL ? LangAS::opencl_generic : LangAS::Default; + auto GenVoidPtrSize = CharUnits::fromQuantity( + CGM.getTarget().getPointerWidth(GenVoidPtrAddr) / 8); // Using the computed layout, generate the actual block function. bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda(); llvm::Constant *blockFn = CodeGenFunction(CGM, true).GenerateBlockFunction(CurGD, blockInfo, LocalDeclMap, isLambdaConv); - blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy); + blockFn = llvm::ConstantExpr::getPointerCast(blockFn, GenVoidPtrTy); // If there is nothing to capture, we can emit this as a global block. if (blockInfo.CanBeGlobal) return buildGlobalBlock(CGM, blockInfo, blockFn); // Otherwise, we have to emit this as a local block. - llvm::Constant *isa = - (!CGM.getContext().getLangOpts().OpenCL) - ? CGM.getNSConcreteStackBlock() - : CGM.getNullPointer(VoidPtrPtrTy, - CGM.getContext().getPointerType( - QualType(CGM.getContext().VoidPtrTy))); - isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy); - - // Build the block descriptor. - llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo); - Address blockAddr = blockInfo.LocalAddress; assert(blockAddr.isValid() && "block has no address!"); - // Compute the initial on-stack block flags. - BlockFlags flags = BLOCK_HAS_SIGNATURE; - if (blockInfo.HasCapturedVariableLayout) flags |= BLOCK_HAS_EXTENDED_LAYOUT; - if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE; - if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ; - if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET; + llvm::Constant *isa; + llvm::Constant *descriptor; + BlockFlags flags; + if (!IsOpenCL) { + isa = llvm::ConstantExpr::getBitCast(CGM.getNSConcreteStackBlock(), + VoidPtrTy); + + // Build the block descriptor. + descriptor = buildBlockDescriptor(CGM, blockInfo); + + // Compute the initial on-stack block flags. + flags = BLOCK_HAS_SIGNATURE; + if (blockInfo.HasCapturedVariableLayout) + flags |= BLOCK_HAS_EXTENDED_LAYOUT; + if (blockInfo.NeedsCopyDispose) + flags |= BLOCK_HAS_COPY_DISPOSE; + if (blockInfo.HasCXXObject) + flags |= BLOCK_HAS_CXX_OBJ; + if (blockInfo.UsesStret) + flags |= BLOCK_USE_STRET; + } auto projectField = [&](unsigned index, CharUnits offset, const Twine &name) -> Address { @@ -777,13 +804,23 @@ index++; }; - addHeaderField(isa, getPointerSize(), "block.isa"); - addHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()), - getIntSize(), "block.flags"); - addHeaderField(llvm::ConstantInt::get(IntTy, 0), - getIntSize(), "block.reserved"); - addHeaderField(blockFn, getPointerSize(), "block.invoke"); - addHeaderField(descriptor, getPointerSize(), "block.descriptor"); + if (!IsOpenCL) { + addHeaderField(isa, getPointerSize(), "block.isa"); + addHeaderField(llvm::ConstantInt::get(IntTy, flags.getBitMask()), + getIntSize(), "block.flags"); + addHeaderField(llvm::ConstantInt::get(IntTy, 0), getIntSize(), + "block.reserved"); + } else { + addHeaderField( + llvm::ConstantInt::get(IntTy, blockInfo.BlockSize.getQuantity()), + getIntSize(), "block.size"); + addHeaderField( + llvm::ConstantInt::get(IntTy, blockInfo.BlockAlign.getQuantity()), + getIntSize(), "block.align"); + } + addHeaderField(blockFn, GenVoidPtrSize, "block.invoke"); + if (!IsOpenCL) + addHeaderField(descriptor, getPointerSize(), "block.descriptor"); } // Finally, capture all the values into the block. @@ -993,7 +1030,7 @@ return GenericBlockLiteralType; } -RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, +RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue) { const BlockPointerType *BPT = E->getCallee()->getType()->getAs<BlockPointerType>(); @@ -1018,17 +1055,16 @@ // Get the function pointer from the literal. llvm::Value *FuncPtr = - Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3); - + Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, + CGM.getLangOpts().OpenCL ? 0 : 3); // Add the block literal. CallArgList Args; QualType VoidPtrQualTy = getContext().VoidPtrTy; llvm::Type *GenericVoidPtrTy = VoidPtrTy; if (getLangOpts().OpenCL) { - GenericVoidPtrTy = Builder.getInt8PtrTy( - getContext().getTargetAddressSpace(LangAS::opencl_generic)); + GenericVoidPtrTy = CGM.getOpenCLRuntime().getGenericVoidPointerType(); VoidPtrQualTy = getContext().getPointerType(getContext().getAddrSpaceQualType( getContext().VoidTy, LangAS::opencl_generic)); @@ -1053,7 +1089,7 @@ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo); llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy); - Func = Builder.CreateBitCast(Func, BlockFTyPtr); + Func = Builder.CreatePointerCast(Func, BlockFTyPtr); // Prepare the callee. CGCallee Callee(CGCalleeInfo(), Func); @@ -1122,7 +1158,11 @@ LocalDeclMap, false); } - blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy); + auto GenVoidPtrTy = getContext().getLangOpts().OpenCL + ? getOpenCLRuntime().getGenericVoidPointerType() + : VoidPtrTy; + + blockFn = llvm::ConstantExpr::getPointerCast(blockFn, GenVoidPtrTy); return buildGlobalBlock(*this, blockInfo, blockFn); } @@ -1141,27 +1181,32 @@ ConstantInitBuilder builder(CGM); auto fields = builder.beginStruct(); - // isa - fields.add((!CGM.getContext().getLangOpts().OpenCL) - ? CGM.getNSConcreteGlobalBlock() - : CGM.getNullPointer(CGM.VoidPtrPtrTy, - CGM.getContext().getPointerType(QualType( - CGM.getContext().VoidPtrTy)))); + bool IsOpenCL = CGM.getLangOpts().OpenCL; + if (!IsOpenCL) { + // isa + fields.add(CGM.getNSConcreteGlobalBlock()); - // __flags - BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE; - if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET; - - fields.addInt(CGM.IntTy, flags.getBitMask()); + // __flags + BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE; + if (blockInfo.UsesStret) + flags |= BLOCK_USE_STRET; - // Reserved - fields.addInt(CGM.IntTy, 0); + fields.addInt(CGM.IntTy, flags.getBitMask()); + + // Reserved + fields.addInt(CGM.IntTy, 0); + } else { + fields.addInt(CGM.IntTy, blockInfo.BlockSize.getQuantity()); + fields.addInt(CGM.IntTy, blockInfo.BlockAlign.getQuantity()); + } // Function fields.add(blockFn); - // Descriptor - fields.add(buildBlockDescriptor(CGM, blockInfo)); + if (!IsOpenCL) { + // Descriptor + fields.add(buildBlockDescriptor(CGM, blockInfo)); + } unsigned AddrSpace = 0; if (CGM.getContext().getLangOpts().OpenCL)
_______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits