arpith-jacob created this revision.
arpith-jacob added reviewers: ABataev, hfinkel.
arpith-jacob added subscribers: cfe-commits, carlo.bertolli, sfantao, kkwli0, 
caomhin, fraggamuffin.
Herald added a subscriber: jholewinski.

This patch adds base support for codegen of the target directive on the NVPTX 
device.

http://reviews.llvm.org/D17877

Files:
  lib/CodeGen/CGOpenMPRuntime.cpp
  lib/CodeGen/CGOpenMPRuntime.h
  lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
  lib/CodeGen/CGOpenMPRuntimeNVPTX.h
  test/OpenMP/nvptx_target_codegen.cpp

Index: test/OpenMP/nvptx_target_codegen.cpp
===================================================================
--- /dev/null
+++ test/OpenMP/nvptx_target_codegen.cpp
@@ -0,0 +1,587 @@
+// Test target codegen - host bc file has to be created first.
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple powerpc64le-unknown-unknown -omptargets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o %t-ppc-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx64-unknown-unknown -omptargets=nvptx64-nvidia-cuda -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple i386-unknown-unknown -omptargets=nvptx-nvidia-cuda -emit-llvm-bc %s -o %t-x86-host.bc
+// RUN: %clang_cc1 -verify -fopenmp -x c++ -triple nvptx-unknown-unknown -omptargets=nvptx-nvidia-cuda -emit-llvm %s -fopenmp-is-device -omp-host-ir-file-path %t-x86-host.bc -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-32
+// expected-no-diagnostics
+#ifndef HEADER
+#define HEADER
+
+// CHECK-DAG: [[OMP_NT:@.+]] = common addrspace(3) global i32 0
+// CHECK-DAG: [[OMP_WID:@.+]] = common addrspace(3) global i64 0
+
+template<typename tx, typename ty>
+struct TT{
+  tx X;
+  ty Y;
+};
+
+int foo(int n) {
+  int a = 0;
+  short aa = 0;
+  float b[10];
+  float bn[n];
+  double c[5][10];
+  double cn[5][n];
+  TT<long long, char> d;
+
+  // CHECK: define {{.*}}void [[T1:@__omp_offloading_.+foo.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T1]]()
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T1]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  // CHECK-NEXT: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+  #pragma omp target
+  {
+  }
+
+  // CHECK-NOT: define {{.*}}void [[T2:@__omp_offloading_.+foo.+]]_worker()
+  #pragma omp target if(0)
+  {
+  }
+
+  // CHECK: define {{.*}}void [[T3:@__omp_offloading_.+foo.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T3]](i[[SZ:32|64]] [[ARG1:%.+]])
+  // CHECK: [[AA_ADDR:%.+]] = alloca i[[SZ]],
+  // CHECK: store i[[SZ]] [[ARG1]], i[[SZ]]* [[AA_ADDR]],
+  // CHECK: [[AA_CADDR:%.+]] = bitcast i[[SZ]]* [[AA_ADDR]] to i16*
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T3]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  // CHECK-NEXT: load i16, i16* [[AA_CADDR]],
+  // CHECK: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+  #pragma omp target if(1)
+  {
+    aa += 1;
+  }
+
+  // CHECK: define {{.*}}void [[T4:@__omp_offloading_.+foo.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T4]](
+  // Create local storage for each capture.
+  // CHECK:    [[LOCAL_A:%.+]] = alloca i[[SZ]]
+  // CHECK:    [[LOCAL_B:%.+]] = alloca [10 x float]*
+  // CHECK:    [[LOCAL_VLA1:%.+]] = alloca i[[SZ]]
+  // CHECK:    [[LOCAL_BN:%.+]] = alloca float*
+  // CHECK:    [[LOCAL_C:%.+]] = alloca [5 x [10 x double]]*
+  // CHECK:    [[LOCAL_VLA2:%.+]] = alloca i[[SZ]]
+  // CHECK:    [[LOCAL_VLA3:%.+]] = alloca i[[SZ]]
+  // CHECK:    [[LOCAL_CN:%.+]] = alloca double*
+  // CHECK:    [[LOCAL_D:%.+]] = alloca [[TT:%.+]]*
+  // CHECK-DAG: store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
+  // CHECK-DAG: store [10 x float]* [[ARG_B:%.+]], [10 x float]** [[LOCAL_B]]
+  // CHECK-DAG: store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]]
+  // CHECK-DAG: store float* [[ARG_BN:%.+]], float** [[LOCAL_BN]]
+  // CHECK-DAG: store [5 x [10 x double]]* [[ARG_C:%.+]], [5 x [10 x double]]** [[LOCAL_C]]
+  // CHECK-DAG: store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]]
+  // CHECK-DAG: store i[[SZ]] [[ARG_VLA3:%.+]], i[[SZ]]* [[LOCAL_VLA3]]
+  // CHECK-DAG: store double* [[ARG_CN:%.+]], double** [[LOCAL_CN]]
+  // CHECK-DAG: store [[TT]]* [[ARG_D:%.+]], [[TT]]** [[LOCAL_D]]
+  //
+  // CHECK-64-DAG: [[REF_A:%.+]] = bitcast i64* [[LOCAL_A]] to i32*
+  // CHECK-DAG:    [[REF_B:%.+]] = load [10 x float]*, [10 x float]** [[LOCAL_B]],
+  // CHECK-DAG:    [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
+  // CHECK-DAG:    [[REF_BN:%.+]] = load float*, float** [[LOCAL_BN]],
+  // CHECK-DAG:    [[REF_C:%.+]] = load [5 x [10 x double]]*, [5 x [10 x double]]** [[LOCAL_C]],
+  // CHECK-DAG:    [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
+  // CHECK-DAG:    [[VAL_VLA3:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA3]],
+  // CHECK-DAG:    [[REF_CN:%.+]] = load double*, double** [[LOCAL_CN]],
+  // CHECK-DAG:    [[REF_D:%.+]] = load [[TT]]*, [[TT]]** [[LOCAL_D]],
+  //
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T4]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  //
+  // Use captures.
+  // CHECK-64-DAG:  load i32, i32* [[REF_A]]
+  // CHECK-32-DAG:  load i32, i32* [[LOCAL_A]]
+  // CHECK-DAG:  getelementptr inbounds [10 x float], [10 x float]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
+  // CHECK-DAG:  getelementptr inbounds float, float* [[REF_BN]], i[[SZ]] 3
+  // CHECK-DAG:  getelementptr inbounds [5 x [10 x double]], [5 x [10 x double]]* [[REF_C]], i[[SZ]] 0, i[[SZ]] 1
+  // CHECK-DAG:  getelementptr inbounds double, double* [[REF_CN]], i[[SZ]] %{{.+}}
+  // CHECK-DAG:     getelementptr inbounds [[TT]], [[TT]]* [[REF_D]], i32 0, i32 0
+  //
+  // CHECK: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+  #pragma omp target if(n>20)
+  {
+    a += 1;
+    b[2] += 1.0;
+    bn[3] += 1.0;
+    c[1][2] += 1.0;
+    cn[1][3] += 1.0;
+    d.X += 1;
+    d.Y += 1;
+  }
+
+  return a;
+}
+
+template<typename tx>
+tx ftemplate(int n) {
+  tx a = 0;
+  short aa = 0;
+  tx b[10];
+
+  #pragma omp target if(n>40)
+  {
+    a += 1;
+    aa += 1;
+    b[2] += 1;
+  }
+
+  return a;
+}
+
+static
+int fstatic(int n) {
+  int a = 0;
+  short aa = 0;
+  char aaa = 0;
+  int b[10];
+
+  #pragma omp target if(n>50)
+  {
+    a += 1;
+    aa += 1;
+    aaa += 1;
+    b[2] += 1;
+  }
+
+  return a;
+}
+
+struct S1 {
+  double a;
+
+  int r1(int n){
+    int b = n+1;
+    short int c[2][n];
+
+    #pragma omp target if(n>60)
+    {
+      this->a = (double)b + 1.5;
+      c[1][1] = ++a;
+    }
+
+    return c[1][1] + (int)b;
+  }
+};
+
+int bar(int n){
+  int a = 0;
+
+  a += foo(n);
+
+  S1 S;
+  a += S.r1(n);
+
+  a += fstatic(n);
+
+  a += ftemplate<int>(n);
+
+  return a;
+}
+
+  // CHECK: define {{.*}}void [[T5:@__omp_offloading_.+static.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T5]](
+  // Create local storage for each capture.
+  // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
+  // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]
+  // CHECK:  [[LOCAL_AAA:%.+]] = alloca i[[SZ]]
+  // CHECK:  [[LOCAL_B:%.+]] = alloca [10 x i32]*
+  // CHECK-DAG:  store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
+  // CHECK-DAG:  store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]]
+  // CHECK-DAG:  store i[[SZ]] [[ARG_AAA:%.+]], i[[SZ]]* [[LOCAL_AAA]]
+  // CHECK-DAG:  store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]]
+  // Store captures in the context.
+  // CHECK-64-DAG:   [[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32*
+  // CHECK-DAG:      [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
+  // CHECK-DAG:      [[REF_AAA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AAA]] to i8*
+  // CHECK-DAG:      [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
+  //
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T5]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  //
+  // CHECK-64-DAG: load i32, i32* [[REF_A]]
+  // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
+  // CHECK-DAG:    load i16, i16* [[REF_AA]]
+  // CHECK-DAG:    getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
+  //
+  // CHECK: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+
+
+  // CHECK: define {{.*}}void [[T6:@__omp_offloading_.+S1.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T6]](
+  // Create local storage for each capture.
+  // CHECK:       [[LOCAL_THIS:%.+]] = alloca [[S1:%struct.*]]*
+  // CHECK:       [[LOCAL_B:%.+]] = alloca i[[SZ]]
+  // CHECK:       [[LOCAL_VLA1:%.+]] = alloca i[[SZ]]
+  // CHECK:       [[LOCAL_VLA2:%.+]] = alloca i[[SZ]]
+  // CHECK:       [[LOCAL_C:%.+]] = alloca i16*
+  // CHECK-DAG:   store [[S1]]* [[ARG_THIS:%.+]], [[S1]]** [[LOCAL_THIS]]
+  // CHECK-DAG:   store i[[SZ]] [[ARG_B:%.+]], i[[SZ]]* [[LOCAL_B]]
+  // CHECK-DAG:   store i[[SZ]] [[ARG_VLA1:%.+]], i[[SZ]]* [[LOCAL_VLA1]]
+  // CHECK-DAG:   store i[[SZ]] [[ARG_VLA2:%.+]], i[[SZ]]* [[LOCAL_VLA2]]
+  // CHECK-DAG:   store i16* [[ARG_C:%.+]], i16** [[LOCAL_C]]
+  // Store captures in the context.
+  // CHECK-DAG:   [[REF_THIS:%.+]] = load [[S1]]*, [[S1]]** [[LOCAL_THIS]],
+  // CHECK-64-DAG:[[REF_B:%.+]] = bitcast i[[SZ]]* [[LOCAL_B]] to i32*
+  // CHECK-DAG:   [[VAL_VLA1:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA1]],
+  // CHECK-DAG:   [[VAL_VLA2:%.+]] = load i[[SZ]], i[[SZ]]* [[LOCAL_VLA2]],
+  // CHECK-DAG:   [[REF_C:%.+]] = load i16*, i16** [[LOCAL_C]],
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T6]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  // Use captures.
+  // CHECK-DAG:   getelementptr inbounds [[S1]], [[S1]]* [[REF_THIS]], i32 0, i32 0
+  // CHECK-64-DAG:load i32, i32* [[REF_B]]
+  // CHECK-32-DAG:load i32, i32* [[LOCAL_B]]
+  // CHECK-DAG:   getelementptr inbounds i16, i16* [[REF_C]], i[[SZ]] %{{.+}}
+  // CHECK: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+
+
+  // CHECK: define {{.*}}void [[T7:@__omp_offloading_.+template.+]]_worker()
+  // CHECK: br label {{%?}}[[AWAIT_WORK:.+]]
+  //
+  // CHECK: [[AWAIT_WORK]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: [[WORK:%.+]] = load i64, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: [[SHOULD_EXIT:%.+]] = icmp eq i64 [[WORK]], 0
+  // CHECK-NEXT: br i1 [[SHOULD_EXIT]], label {{%?}}[[EXIT:.+]], label {{%?}}[[SEL_WORKERS:.+]]
+  //
+  // CHECK: [[SEL_WORKERS]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[NT:%.+]] = load i32, i32 addrspace(3)* [[OMP_NT]]
+  // CHECK-NEXT: [[IS_ACTIVE:%.+]] = icmp slt i32 [[TID]], [[NT]]
+  // CHECK-NEXT: br i1 [[IS_ACTIVE]], label {{%?}}[[EXEC_PARALLEL:.+]], label {{%?}}[[BAR_PARALLEL:.+]]
+  //
+  // CHECK: [[EXEC_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[TERM_PARALLEL:.+]]
+  //
+  // CHECK: [[TERM_PARALLEL]]
+  // CHECK-NEXT: br label {{%?}}[[BAR_PARALLEL]]
+  //
+  // CHECK: [[BAR_PARALLEL]]
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[AWAIT_WORK]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+
+  // CHECK: define {{.*}}void [[T7]](
+  // Create local storage for each capture.
+  // CHECK:  [[LOCAL_A:%.+]] = alloca i[[SZ]]
+  // CHECK:  [[LOCAL_AA:%.+]] = alloca i[[SZ]]
+  // CHECK:  [[LOCAL_B:%.+]] = alloca [10 x i32]*
+  // CHECK-DAG:  store i[[SZ]] [[ARG_A:%.+]], i[[SZ]]* [[LOCAL_A]]
+  // CHECK-DAG:  store i[[SZ]] [[ARG_AA:%.+]], i[[SZ]]* [[LOCAL_AA]]
+  // CHECK-DAG:   store [10 x i32]* [[ARG_B:%.+]], [10 x i32]** [[LOCAL_B]]
+  // Store captures in the context.
+  // CHECK-64-DAG:[[REF_A:%.+]] = bitcast i[[SZ]]* [[LOCAL_A]] to i32*
+  // CHECK-DAG:   [[REF_AA:%.+]] = bitcast i[[SZ]]* [[LOCAL_AA]] to i16*
+  // CHECK-DAG:   [[REF_B:%.+]] = load [10 x i32]*, [10 x i32]** [[LOCAL_B]],
+  //
+  // CHECK: [[NTID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
+  // CHECK-NEXT: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize()
+  // CHECK-NEXT: [[A:%.+]] = sub i32 [[WS]], 1
+  // CHECK-NEXT: [[B:%.+]] = sub i32 [[NTID]], 1
+  // CHECK-NEXT: [[C:%.+]] = xor i32 [[A]], -1
+  // CHECK-NEXT: [[MID:%.+]] = and i32 [[B]], [[C]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: [[EXCESS:%.+]] = icmp ugt i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[EXCESS]], label {{%?}}[[EXIT:.+]], label {{%?}}[[CHECK_WORKER:.+]]
+  //
+  // CHECK: [[CHECK_WORKER]]
+  // CHECK-NEXT: [[IS_WORKER:%.+]] = icmp ult i32 [[TID]], [[MID]]
+  // CHECK-NEXT: br i1 [[IS_WORKER]], label {{%?}}[[WORKER:.+]], label {{%?}}[[MASTER:.+]]
+  //
+  // CHECK: [[WORKER]]
+  // CHECK-NEXT: call void [[T7]]_worker()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[MASTER]]
+  // CHECK-NEXT: [[TID:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.tid.x()
+  // CHECK-NEXT: call void @__kmpc_kernel_init(i32 0, i32 [[TID]])
+  //
+  // CHECK-64-DAG: load i32, i32* [[REF_A]]
+  // CHECK-32-DAG: load i32, i32* [[LOCAL_A]]
+  // CHECK-DAG:    load i16, i16* [[REF_AA]]
+  // CHECK-DAG:    getelementptr inbounds [10 x i32], [10 x i32]* [[REF_B]], i[[SZ]] 0, i[[SZ]] 2
+  //
+  // CHECK: br label {{%?}}[[TERM:.+]]
+  //
+  // CHECK: [[TERM]]
+  // CHECK-NEXT: store i64 0, i64 addrspace(3)* [[OMP_WID]],
+  // CHECK-NEXT: call void @llvm.nvvm.barrier0()
+  // CHECK-NEXT: br label {{%?}}[[EXIT]]
+  //
+  // CHECK: [[EXIT]]
+  // CHECK-NEXT: ret void
+#endif
Index: lib/CodeGen/CGOpenMPRuntimeNVPTX.h
===================================================================
--- lib/CodeGen/CGOpenMPRuntimeNVPTX.h
+++ lib/CodeGen/CGOpenMPRuntimeNVPTX.h
@@ -16,13 +16,176 @@
 #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIMENVPTX_H
 
 #include "CGOpenMPRuntime.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtOpenMP.h"
+#include "llvm/IR/CallSite.h"
 
 namespace clang {
 namespace CodeGen {
 
 class CGOpenMPRuntimeNVPTX : public CGOpenMPRuntime {
 public:
   explicit CGOpenMPRuntimeNVPTX(CodeGenModule &CGM);
+
+private:
+  /// \brief Creates offloading entry for the provided entry ID \a ID,
+  /// address \a Addr and size \a Size.
+  void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
+                          uint64_t Size) override;
+
+  enum OpenMPRTLFunctionNVPTX {
+    /// \brief Call to void __kmpc_kernel_init(kmp_int32 omp_handle,
+    /// kmp_int32 thread_limit);
+    OMPRTL_NVPTX__kmpc_kernel_init = OMPRTL_last + 1,
+  };
+
+  /// \brief Returns specified OpenMP runtime function for the current OpenMP
+  /// implementation.
+  /// \param Function OpenMP runtime function.
+  /// \return Specified function.
+  llvm::Constant *createRuntimeFunction(unsigned Function) override;
+
+  /// \brief Emit outlined function for 'target' directive on the NVPTX
+  /// device.
+  /// \param D Directive to emit.
+  /// \param ParentName Name of the function that encloses the target region.
+  /// \param OutlinedFn Outlined function value to be defined by this call.
+  /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+  /// \param IsOffloadEntry True if the outlined function is an offload entry.
+  /// An outlined function may not be an entry if, e.g. the if clause always
+  /// evaluates to false.
+  virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
+                                          StringRef ParentName,
+                                          llvm::Function *&OutlinedFn,
+                                          llvm::Constant *&OutlinedFnID,
+                                          bool IsOffloadEntry) override;
+
+private:
+  ///
+  /// NVPTX calls
+  ///
+
+  /// \brief Get the GPU warp size.
+  llvm::Value *GetNVPTXWarpSize(CodeGenFunction &CGF) {
+    CGBuilderTy &Bld = CGF.Builder;
+    return Bld.CreateCall(
+        llvm::Intrinsic::getDeclaration(
+            &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
+        {}, "nvptx_warp_size");
+  }
+
+  /// \brief Get the id of the current thread on the GPU.
+  llvm::Value *GetNVPTXThreadID(CodeGenFunction &CGF) {
+    CGBuilderTy &Bld = CGF.Builder;
+    return Bld.CreateCall(
+        llvm::Intrinsic::getDeclaration(
+            &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
+        {}, "nvptx_tid");
+  }
+
+  // \brief Get the maximum number of threads in a block of the GPU.
+  llvm::Value *GetNVPTXNumThreads(CodeGenFunction &CGF) {
+    CGBuilderTy &Bld = CGF.Builder;
+    return Bld.CreateCall(
+        llvm::Intrinsic::getDeclaration(
+            &CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
+        {}, "nvptx_num_threads");
+  }
+
+  /// \brief Get barrier to synchronize all threads in a block.
+  void GetNVPTXCTABarrier(CodeGenFunction &CGF) {
+    CGBuilderTy &Bld = CGF.Builder;
+    Bld.CreateCall(llvm::Intrinsic::getDeclaration(
+                       &CGM.getModule(), llvm::Intrinsic::nvvm_barrier0),
+                   {});
+  }
+
+  // \brief Synchronize all GPU threads in a block.
+  void SyncCTAThreads(CodeGenFunction &CGF) { GetNVPTXCTABarrier(CGF); }
+
+  ///
+  /// OMP calls
+  ///
+
+  /// \brief Get the thread id of the OMP master thread.
+  /// The master thread id is the first thread (lane) of the last warp in the
+  /// GPU block.  Warp size is assumed to be some power of 2.
+  /// Thread id is 0 indexed.
+  /// E.g: If NumThreads is 33, master id is 32.
+  ///      If NumThreads is 64, master id is 32.
+  ///      If NumThreads is 1024, master id is 992.
+  llvm::Value *GetMasterThreadID(CodeGenFunction &CGF) {
+    CGBuilderTy &Bld = CGF.Builder;
+    llvm::Value *NumThreads = GetNVPTXNumThreads(CGF);
+
+    // We assume that the warp size is a power of 2.
+    llvm::Value *Mask = Bld.CreateSub(GetNVPTXWarpSize(CGF), Bld.getInt32(1));
+
+    return Bld.CreateAnd(Bld.CreateSub(NumThreads, Bld.getInt32(1)),
+                         Bld.CreateNot(Mask), "master_tid");
+  }
+
+private:
+  // NVPTX Address space
+  enum ADDRESS_SPACE { GLOBAL_ADDRESS_SPACE = 1, SHARED_ADDRESS_SPACE = 3 };
+
+  // Master-worker control state.
+  // Number of requested OMP threads in parallel region.
+  llvm::GlobalVariable *ActiveWorkers;
+  // Outlined function for the workers to execute.
+  llvm::GlobalVariable *WorkID;
+
+  class EntryFunctionState {
+  public:
+    llvm::BasicBlock *ExitBB;
+
+    EntryFunctionState() : ExitBB(nullptr){};
+  };
+
+  class WorkerFunctionState {
+  public:
+    llvm::Function *WorkerFn;
+    const CGFunctionInfo *CGFI;
+
+    WorkerFunctionState(CodeGenModule &CGM) : WorkerFn(nullptr), CGFI(nullptr) {
+      createWorkerFunction(CGM);
+    };
+
+  private:
+    void createWorkerFunction(CodeGenModule &CGM) {
+      auto &Ctx = CGM.getContext();
+
+      // Create an worker function with no arguments.
+      FunctionType::ExtInfo EI;
+      CGFI = &CGM.getTypes().arrangeFreeFunctionDeclaration(
+          Ctx.VoidTy, {}, EI, /*isVariadic=*/false);
+
+      WorkerFn =
+          llvm::Function::Create(CGM.getTypes().GetFunctionType(*CGFI),
+                                 llvm::GlobalValue::InternalLinkage,
+                                 /* placeholder */ "_worker", &CGM.getModule());
+      CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
+      WorkerFn->setLinkage(llvm::GlobalValue::InternalLinkage);
+      WorkerFn->addFnAttr(llvm::Attribute::NoInline);
+    }
+  };
+
+  /// \brief Initialize master-worker control state.
+  void initializeEnvironment();
+
+  /// \brief Emit the worker function for the current target region.
+  void emitWorkerFunction(WorkerFunctionState &WST);
+
+  /// \brief Helper for worker function. Emit body of worker loop.
+  void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
+
+  /// \brief Helper for target entry function. Guide the master and worker
+  /// threads to their respective locations.
+  void emitEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
+                       WorkerFunctionState &WST);
+
+  /// \brief Signal termination of OMP execution.
+  void emitEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
 };
 
 } // CodeGen namespace.
Index: lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
===================================================================
--- lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
+++ lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp
@@ -18,4 +18,242 @@
 using namespace CodeGen;
 
 CGOpenMPRuntimeNVPTX::CGOpenMPRuntimeNVPTX(CodeGenModule &CGM)
-    : CGOpenMPRuntime(CGM) {}
+    : CGOpenMPRuntime(CGM), ActiveWorkers(nullptr), WorkID(nullptr) {
+  if (!CGM.getLangOpts().OpenMPIsDevice)
+    llvm_unreachable("OpenMP NVPTX can only handle device code.");
+
+  // Called once per module during initialization.
+  initializeEnvironment();
+}
+
+void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
+                                              llvm::Constant *Addr,
+                                              uint64_t Size) {
+  auto *F = dyn_cast<llvm::Function>(Addr);
+  if (!F)
+    return;
+  llvm::Module *M = F->getParent();
+  llvm::LLVMContext &Ctx = M->getContext();
+
+  // Get "nvvm.annotations" metadata node
+  llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
+
+  llvm::Metadata *MDVals[] = {
+      llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, "kernel"),
+      llvm::ConstantAsMetadata::get(
+          llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
+  // Append metadata to nvvm.annotations
+  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
+  return;
+}
+
+/// \brief Returns specified OpenMP runtime function for the current OpenMP
+/// implementation.
+/// \param Function OpenMP runtime function.
+/// \return Specified function.
+llvm::Constant *CGOpenMPRuntimeNVPTX::createRuntimeFunction(unsigned Function) {
+  llvm::Constant *RTLFn = nullptr;
+  switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
+  case OMPRTL_NVPTX__kmpc_kernel_init: {
+    // Build void __kmpc_kernel_init(kmp_int32 omp_handle,
+    // kmp_int32 thread_limit);
+    llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int32Ty};
+    llvm::FunctionType *FnTy =
+        llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+    RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
+    break;
+  }
+  }
+
+  // If we do not have a specialized version of this function, attempt to get
+  // a default one.
+  if (!RTLFn)
+    RTLFn = CGOpenMPRuntime::createRuntimeFunction(Function);
+
+  return RTLFn;
+}
+
+void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
+    const OMPExecutableDirective &D, StringRef ParentName,
+    llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+    bool IsOffloadEntry) {
+  if (!IsOffloadEntry) // Nothing to do.
+    return;
+
+  assert(!ParentName.empty() && "Invalid target region parent name!");
+
+  const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+
+  EntryFunctionState EST;
+  WorkerFunctionState WST(CGM);
+
+  // Emit target region as a standalone region.
+  auto &&CodeGen = [&EST, &WST, &CS, this](CodeGenFunction &CGF) {
+    emitEntryHeader(CGF, EST, WST);
+    CGF.EmitStmt(CS.getCapturedStmt());
+    emitEntryFooter(CGF, EST);
+  };
+  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
+                                   IsOffloadEntry, CodeGen);
+
+  // Create the worker function
+  emitWorkerFunction(WST);
+
+  // Now change the name of the worker function to correspond to this target
+  // region's entry function.
+  WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
+
+  return;
+}
+
+void CGOpenMPRuntimeNVPTX::initializeEnvironment() {
+  //
+  // Initialize master-worker control state in shared memory.
+  //
+
+  auto DL = CGM.getDataLayout();
+  ActiveWorkers = new llvm::GlobalVariable(
+      CGM.getModule(), CGM.Int32Ty, /*isConstant=*/false,
+      llvm::GlobalValue::CommonLinkage,
+      llvm::Constant::getNullValue(CGM.Int32Ty), "__omp_num_threads", 0,
+      llvm::GlobalVariable::NotThreadLocal, SHARED_ADDRESS_SPACE, false);
+  ActiveWorkers->setAlignment(DL.getPrefTypeAlignment(CGM.Int32Ty));
+
+  WorkID = new llvm::GlobalVariable(
+      CGM.getModule(), CGM.Int64Ty, /*isConstant=*/false,
+      llvm::GlobalValue::CommonLinkage,
+      llvm::Constant::getNullValue(CGM.Int64Ty), "__tgt_work_id", 0,
+      llvm::GlobalVariable::NotThreadLocal, SHARED_ADDRESS_SPACE, false);
+  WorkID->setAlignment(DL.getPrefTypeAlignment(CGM.Int64Ty));
+}
+
+void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
+  auto &Ctx = CGM.getContext();
+
+  CodeGenFunction CGF(CGM, true);
+  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
+  emitWorkerLoop(CGF, WST);
+  CGF.FinishFunction();
+}
+
+void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
+                                          WorkerFunctionState &WST) {
+  //
+  // The workers enter this loop and wait for parallel work from the master.
+  // When the master encounters a parallel region it sets up the work + variable
+  // arguments, and wakes up the workers.  The workers first check to see if
+  // they are required for the parallel region, i.e., within the # of requested
+  // parallel threads.  The activated workers load the variable arguments and
+  // execute the parallel work.
+  //
+
+  CGBuilderTy &Bld = CGF.Builder;
+
+  llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
+  llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
+  llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
+  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
+  llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
+  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".sleepy.hollow");
+
+  CGF.EmitBranch(AwaitBB);
+
+  // Workers wait for work from master.
+  CGF.EmitBlock(AwaitBB);
+  // Wait for parallel work
+  SyncCTAThreads(CGF);
+  // On termination condition (workid == 0), exit loop.
+  llvm::Value *ShouldTerminate = Bld.CreateICmpEQ(
+      Bld.CreateAlignedLoad(WorkID, WorkID->getAlignment()),
+      llvm::Constant::getNullValue(WorkID->getType()->getElementType()),
+      "should_terminate");
+  Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
+
+  // Activate requested workers.
+  CGF.EmitBlock(SelectWorkersBB);
+  llvm::Value *ThreadID = GetNVPTXThreadID(CGF);
+  llvm::Value *ActiveThread = Bld.CreateICmpSLT(
+      ThreadID,
+      Bld.CreateAlignedLoad(ActiveWorkers, ActiveWorkers->getAlignment()),
+      "active_thread");
+  Bld.CreateCondBr(ActiveThread, ExecuteBB, BarrierBB);
+
+  // Signal start of parallel region.
+  CGF.EmitBlock(ExecuteBB);
+  // TODO: Add parallel work.
+
+  // Signal end of parallel region.
+  CGF.EmitBlock(TerminateBB);
+  CGF.EmitBranch(BarrierBB);
+
+  // All active and inactive workers wait at a barrier after parallel region.
+  CGF.EmitBlock(BarrierBB);
+  // Barrier after parallel region.
+  SyncCTAThreads(CGF);
+  CGF.EmitBranch(AwaitBB);
+
+  // Exit target region.
+  CGF.EmitBlock(ExitBB);
+}
+
+// Setup CUDA threads for master-worker OpenMP scheme.
+void CGOpenMPRuntimeNVPTX::emitEntryHeader(CodeGenFunction &CGF,
+                                           EntryFunctionState &EST,
+                                           WorkerFunctionState &WST) {
+  CGBuilderTy &Bld = CGF.Builder;
+
+  // Get the master thread id.
+  llvm::Value *MasterID = GetMasterThreadID(CGF);
+  // Current thread's identifier.
+  llvm::Value *ThreadID = GetNVPTXThreadID(CGF);
+
+  // Setup BBs in entry function.
+  llvm::BasicBlock *WorkerCheckBB = CGF.createBasicBlock(".check.for.worker");
+  llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
+  llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
+  EST.ExitBB = CGF.createBasicBlock(".sleepy.hollow");
+
+  // The head (master thread) marches on while its body of companion threads in
+  // the warp go to sleep.
+  llvm::Value *ShouldDie =
+      Bld.CreateICmpUGT(ThreadID, MasterID, "excess_in_master_warp");
+  Bld.CreateCondBr(ShouldDie, EST.ExitBB, WorkerCheckBB);
+
+  // Select worker threads...
+  CGF.EmitBlock(WorkerCheckBB);
+  llvm::Value *IsWorker = Bld.CreateICmpULT(ThreadID, MasterID, "is_worker");
+  Bld.CreateCondBr(IsWorker, WorkerBB, MasterBB);
+
+  // ... and send to worker loop, awaiting parallel invocation.
+  CGF.EmitBlock(WorkerBB);
+  CGF.EmitCallOrInvoke(WST.WorkerFn, {});
+  CGF.EmitBranch(EST.ExitBB);
+
+  // Only master thread executes subsequent serial code.
+  CGF.EmitBlock(MasterBB);
+
+  // First action in sequential region:
+  // Initialize the state of the OpenMP runtime library on the GPU.
+  llvm::Value *Args[] = {Bld.getInt32(/*OmpHandle=*/0), GetNVPTXThreadID(CGF)};
+  CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init),
+                      Args);
+}
+
+void CGOpenMPRuntimeNVPTX::emitEntryFooter(CodeGenFunction &CGF,
+                                           EntryFunctionState &EST) {
+  CGBuilderTy &Bld = CGF.Builder;
+  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
+  CGF.EmitBranch(TerminateBB);
+
+  CGF.EmitBlock(TerminateBB);
+  // Signal termination condition.
+  Bld.CreateAlignedStore(
+      llvm::Constant::getNullValue(WorkID->getType()->getElementType()), WorkID,
+      WorkID->getAlignment());
+  // Barrier to terminate worker threads.
+  SyncCTAThreads(CGF);
+  // Master thread jumps to exit point.
+  CGF.EmitBranch(EST.ExitBB);
+
+  CGF.EmitBlock(EST.ExitBB);
+}
Index: lib/CodeGen/CGOpenMPRuntime.h
===================================================================
--- lib/CodeGen/CGOpenMPRuntime.h
+++ lib/CodeGen/CGOpenMPRuntime.h
@@ -47,7 +47,136 @@
 typedef llvm::function_ref<void(CodeGenFunction &)> RegionCodeGenTy;
 
 class CGOpenMPRuntime {
+protected:
   CodeGenModule &CGM;
+
+  enum OpenMPRTLFunction {
+    /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
+    /// kmpc_micro microtask, ...);
+    OMPRTL__kmpc_fork_call,
+    /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
+    /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
+    OMPRTL__kmpc_threadprivate_cached,
+    /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
+    /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+    OMPRTL__kmpc_threadprivate_register,
+    // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
+    OMPRTL__kmpc_global_thread_num,
+    // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+    // kmp_critical_name *crit);
+    OMPRTL__kmpc_critical,
+    // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
+    // global_tid, kmp_critical_name *crit, uintptr_t hint);
+    OMPRTL__kmpc_critical_with_hint,
+    // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+    // kmp_critical_name *crit);
+    OMPRTL__kmpc_end_critical,
+    // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+    // global_tid);
+    OMPRTL__kmpc_cancel_barrier,
+    // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_barrier,
+    // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_for_static_fini,
+    // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+    // global_tid);
+    OMPRTL__kmpc_serialized_parallel,
+    // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+    // global_tid);
+    OMPRTL__kmpc_end_serialized_parallel,
+    // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+    // kmp_int32 num_threads);
+    OMPRTL__kmpc_push_num_threads,
+    // Call to void __kmpc_flush(ident_t *loc);
+    OMPRTL__kmpc_flush,
+    // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
+    OMPRTL__kmpc_master,
+    // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
+    OMPRTL__kmpc_end_master,
+    // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
+    // int end_part);
+    OMPRTL__kmpc_omp_taskyield,
+    // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
+    OMPRTL__kmpc_single,
+    // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
+    OMPRTL__kmpc_end_single,
+    // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
+    // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
+    // kmp_routine_entry_t *task_entry);
+    OMPRTL__kmpc_omp_task_alloc,
+    // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
+    // new_task);
+    OMPRTL__kmpc_omp_task,
+    // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
+    // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
+    // kmp_int32 didit);
+    OMPRTL__kmpc_copyprivate,
+    // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
+    // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
+    // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
+    OMPRTL__kmpc_reduce,
+    // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
+    // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
+    // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
+    // *lck);
+    OMPRTL__kmpc_reduce_nowait,
+    // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
+    // kmp_critical_name *lck);
+    OMPRTL__kmpc_end_reduce,
+    // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
+    // kmp_critical_name *lck);
+    OMPRTL__kmpc_end_reduce_nowait,
+    // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
+    // kmp_task_t * new_task);
+    OMPRTL__kmpc_omp_task_begin_if0,
+    // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
+    // kmp_task_t * new_task);
+    OMPRTL__kmpc_omp_task_complete_if0,
+    // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_ordered,
+    // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_end_ordered,
+    // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
+    // global_tid);
+    OMPRTL__kmpc_omp_taskwait,
+    // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_taskgroup,
+    // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
+    OMPRTL__kmpc_end_taskgroup,
+    // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
+    // int proc_bind);
+    OMPRTL__kmpc_push_proc_bind,
+    // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
+    // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
+    // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
+    OMPRTL__kmpc_omp_task_with_deps,
+    // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
+    // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
+    // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
+    OMPRTL__kmpc_omp_wait_deps,
+    // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
+    // global_tid, kmp_int32 cncl_kind);
+    OMPRTL__kmpc_cancellationpoint,
+    // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
+    // kmp_int32 cncl_kind);
+    OMPRTL__kmpc_cancel,
+
+    //
+    // Offloading related calls
+    //
+    // Call to int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
+    // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
+    // *arg_types);
+    OMPRTL__tgt_target,
+    // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
+    OMPRTL__tgt_register_lib,
+    // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
+    OMPRTL__tgt_unregister_lib,
+
+    OMPRTL_last = OMPRTL__tgt_unregister_lib,
+  };
+
+private:
   /// \brief Default const ident_t object used for initialization of all other
   /// ident_t objects.
   llvm::Constant *DefaultOpenMPPSource;
@@ -251,11 +380,13 @@
   /// compilation unit. The function that does the registration is returned.
   llvm::Function *createOffloadingBinaryDescriptorRegistration();
 
+protected:
   /// \brief Creates offloading entry for the provided entry ID \a ID,
   /// address \a Addr and size \a Size.
-  void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
-                          uint64_t Size);
+  virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
+                                  uint64_t Size);
 
+private:
   /// \brief Creates all the offload entries in the current compilation unit
   /// along with the associated metadata.
   void createOffloadEntriesAndInfoMetadata();
@@ -294,11 +425,29 @@
   /// \brief Returns pointer to kmpc_micro type.
   llvm::Type *getKmpc_MicroPointerTy();
 
+protected:
   /// \brief Returns specified OpenMP runtime function.
   /// \param Function OpenMP runtime function.
   /// \return Specified function.
-  llvm::Constant *createRuntimeFunction(unsigned Function);
+  virtual llvm::Constant *createRuntimeFunction(unsigned Function);
 
+  /// \brief Helper to emit outlined function for 'target' directive.
+  /// \param D Directive to emit.
+  /// \param ParentName Name of the function that encloses the target region.
+  /// \param OutlinedFn Outlined function value to be defined by this call.
+  /// \param OutlinedFnID Outlined function ID value to be defined by this call.
+  /// \param IsOffloadEntry True if the outlined function is an offload entry.
+  /// \param CodeGen Lambda codegen specific to an accelerator device.
+  /// An oulined function may not be an entry if, e.g. the if clause always
+  /// evaluates to false.
+  void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
+                                        StringRef ParentName,
+                                        llvm::Function *&OutlinedFn,
+                                        llvm::Constant *&OutlinedFnID,
+                                        bool IsOffloadEntry,
+                                        const RegionCodeGenTy &CodeGen);
+
+private:
   /// \brief Returns __kmpc_for_static_init_* runtime function for the specified
   /// size \a IVSize and sign \a IVSigned.
   llvm::Constant *createForStaticInitFunction(unsigned IVSize, bool IVSigned);
Index: lib/CodeGen/CGOpenMPRuntime.cpp
===================================================================
--- lib/CodeGen/CGOpenMPRuntime.cpp
+++ lib/CodeGen/CGOpenMPRuntime.cpp
@@ -363,130 +363,6 @@
   OMP_sch_default = OMP_sch_static,
 };
 
-enum OpenMPRTLFunction {
-  /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
-  /// kmpc_micro microtask, ...);
-  OMPRTL__kmpc_fork_call,
-  /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
-  /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
-  OMPRTL__kmpc_threadprivate_cached,
-  /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
-  /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
-  OMPRTL__kmpc_threadprivate_register,
-  // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
-  OMPRTL__kmpc_global_thread_num,
-  // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
-  // kmp_critical_name *crit);
-  OMPRTL__kmpc_critical,
-  // Call to void __kmpc_critical_with_hint(ident_t *loc, kmp_int32
-  // global_tid, kmp_critical_name *crit, uintptr_t hint);
-  OMPRTL__kmpc_critical_with_hint,
-  // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
-  // kmp_critical_name *crit);
-  OMPRTL__kmpc_end_critical,
-  // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
-  // global_tid);
-  OMPRTL__kmpc_cancel_barrier,
-  // Call to void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_barrier,
-  // Call to void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_for_static_fini,
-  // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
-  // global_tid);
-  OMPRTL__kmpc_serialized_parallel,
-  // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
-  // global_tid);
-  OMPRTL__kmpc_end_serialized_parallel,
-  // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
-  // kmp_int32 num_threads);
-  OMPRTL__kmpc_push_num_threads,
-  // Call to void __kmpc_flush(ident_t *loc);
-  OMPRTL__kmpc_flush,
-  // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
-  OMPRTL__kmpc_master,
-  // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
-  OMPRTL__kmpc_end_master,
-  // Call to kmp_int32 __kmpc_omp_taskyield(ident_t *, kmp_int32 global_tid,
-  // int end_part);
-  OMPRTL__kmpc_omp_taskyield,
-  // Call to kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid);
-  OMPRTL__kmpc_single,
-  // Call to void __kmpc_end_single(ident_t *, kmp_int32 global_tid);
-  OMPRTL__kmpc_end_single,
-  // Call to kmp_task_t * __kmpc_omp_task_alloc(ident_t *, kmp_int32 gtid,
-  // kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
-  // kmp_routine_entry_t *task_entry);
-  OMPRTL__kmpc_omp_task_alloc,
-  // Call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, kmp_task_t *
-  // new_task);
-  OMPRTL__kmpc_omp_task,
-  // Call to void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid,
-  // size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *),
-  // kmp_int32 didit);
-  OMPRTL__kmpc_copyprivate,
-  // Call to kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid,
-  // kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void
-  // (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck);
-  OMPRTL__kmpc_reduce,
-  // Call to kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32
-  // global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data,
-  // void (*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name
-  // *lck);
-  OMPRTL__kmpc_reduce_nowait,
-  // Call to void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
-  // kmp_critical_name *lck);
-  OMPRTL__kmpc_end_reduce,
-  // Call to void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
-  // kmp_critical_name *lck);
-  OMPRTL__kmpc_end_reduce_nowait,
-  // Call to void __kmpc_omp_task_begin_if0(ident_t *, kmp_int32 gtid,
-  // kmp_task_t * new_task);
-  OMPRTL__kmpc_omp_task_begin_if0,
-  // Call to void __kmpc_omp_task_complete_if0(ident_t *, kmp_int32 gtid,
-  // kmp_task_t * new_task);
-  OMPRTL__kmpc_omp_task_complete_if0,
-  // Call to void __kmpc_ordered(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_ordered,
-  // Call to void __kmpc_end_ordered(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_end_ordered,
-  // Call to kmp_int32 __kmpc_omp_taskwait(ident_t *loc, kmp_int32
-  // global_tid);
-  OMPRTL__kmpc_omp_taskwait,
-  // Call to void __kmpc_taskgroup(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_taskgroup,
-  // Call to void __kmpc_end_taskgroup(ident_t *loc, kmp_int32 global_tid);
-  OMPRTL__kmpc_end_taskgroup,
-  // Call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
-  // int proc_bind);
-  OMPRTL__kmpc_push_proc_bind,
-  // Call to kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32
-  // gtid, kmp_task_t * new_task, kmp_int32 ndeps, kmp_depend_info_t
-  // *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
-  OMPRTL__kmpc_omp_task_with_deps,
-  // Call to void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32
-  // gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32
-  // ndeps_noalias, kmp_depend_info_t *noalias_dep_list);
-  OMPRTL__kmpc_omp_wait_deps,
-  // Call to kmp_int32 __kmpc_cancellationpoint(ident_t *loc, kmp_int32
-  // global_tid, kmp_int32 cncl_kind);
-  OMPRTL__kmpc_cancellationpoint,
-  // Call to kmp_int32 __kmpc_cancel(ident_t *loc, kmp_int32 global_tid,
-  // kmp_int32 cncl_kind);
-  OMPRTL__kmpc_cancel,
-
-  //
-  // Offloading related calls
-  //
-  // Call to int32_t __tgt_target(int32_t device_id, void *host_ptr, int32_t
-  // arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
-  // *arg_types);
-  OMPRTL__tgt_target,
-  // Call to void __tgt_register_lib(__tgt_bin_desc *desc);
-  OMPRTL__tgt_register_lib,
-  // Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
-  OMPRTL__tgt_unregister_lib,
-};
-
 } // anonymous namespace
 
 LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
@@ -3902,42 +3778,34 @@
   LineNum = PLoc.getLine();
 }
 
-void CGOpenMPRuntime::emitTargetOutlinedFunction(
+void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
     const OMPExecutableDirective &D, StringRef ParentName,
     llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
-    bool IsOffloadEntry) {
-  assert(!ParentName.empty() && "Invalid target region parent name!");
-
-  const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
-
-  // Emit target region as a standalone region.
-  auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
-    CGF.EmitStmt(CS.getCapturedStmt());
-  };
-
-  // Create a unique name for the entry function using the source location
-  // information of the current target region. The name will be something like:
+    bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
+  // The name of the entry function is something like:
   //
   // __omp_offloading_DD_FFFF_PP_lBB
   //
   // where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
-  // mangled name of the function that encloses the target region and BB is the
+  // mangled name of the function that encloses the target region, BB is the
   // line number of the target region.
 
   unsigned DeviceID;
   unsigned FileID;
   unsigned Line;
+  SmallString<256> OutlinedFnName;
   getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
                            Line);
-  SmallString<64> EntryFnName;
   {
-    llvm::raw_svector_ostream OS(EntryFnName);
+    llvm::raw_svector_ostream OS(OutlinedFnName);
     OS << "__omp_offloading" << llvm::format("_%x", DeviceID)
        << llvm::format("_%x_", FileID) << ParentName << "_l" << Line;
   }
 
+  const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+
   CodeGenFunction CGF(CGM, true);
-  CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
+  CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, OutlinedFnName);
   CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
 
   OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
@@ -3972,6 +3840,23 @@
       DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID);
 }
 
+void CGOpenMPRuntime::emitTargetOutlinedFunction(
+    const OMPExecutableDirective &D, StringRef ParentName,
+    llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
+    bool IsOffloadEntry) {
+  assert(!ParentName.empty() && "Invalid target region parent name!");
+
+  const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
+
+  // Emit target region as a standalone region.
+  auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
+    CGF.EmitStmt(CS.getCapturedStmt());
+  };
+
+  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
+                                   IsOffloadEntry, CodeGen);
+}
+
 void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
                                      const OMPExecutableDirective &D,
                                      llvm::Value *OutlinedFn,
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to