erichkeane created this revision.
erichkeane added reviewers: magabari, zvi, craig.topper, DavidKreitzer, hsaito, 
nlopes, reames, MatzeB, eli.friedman, rengolin, hfinkel.
Herald added subscribers: kbarton, nemanjai.

See: https://reviews.llvm.org/D41944
These are the Clang CodeGen test changes required for
the no-overflow flag.

See RFC discussion here: 
https://groups.google.com/forum/#!msg/llvm-dev/eFtnCwpMMhs/eAHQj8rJCAAJ;context-place=searchin/llvm-dev/magabari%7Csort:date


Repository:
  rC Clang

https://reviews.llvm.org/D41952

Files:
  test/CodeGen/atomic_ops.c
  test/CodeGen/builtins-ppc-altivec.c
  test/CodeGen/builtins-ppc-vsx.c
  test/CodeGen/compound-type.c
  test/CodeGen/exceptions-seh.c
  test/CodeGen/ext-vector.c
  test/CodeGen/sanitize-trap.c
  test/CodeGen/ubsan-pass-object-size.c
  test/CodeGen/vla.c
  test/CodeGen/zvector.c
  test/CodeGenCXX/vla.cpp
  test/OpenMP/atomic_capture_codegen.cpp
  test/OpenMP/atomic_update_codegen.cpp
  test/OpenMP/declare_reduction_codegen.cpp
  test/OpenMP/for_lastprivate_codegen.cpp
  test/OpenMP/for_linear_codegen.cpp
  test/OpenMP/for_reduction_codegen.cpp
  test/OpenMP/for_reduction_codegen_UDR.cpp
  test/OpenMP/for_simd_codegen.cpp
  test/OpenMP/loops_explicit_clauses_codegen.cpp
  test/OpenMP/nvptx_teams_reduction_codegen.cpp
  test/OpenMP/parallel_firstprivate_codegen.cpp
  test/OpenMP/parallel_for_simd_codegen.cpp
  test/OpenMP/parallel_private_codegen.cpp
  test/OpenMP/parallel_reduction_codegen.cpp
  test/OpenMP/simd_codegen.cpp
  test/OpenMP/single_codegen.cpp
  test/OpenMP/taskgroup_task_reduction_codegen.cpp
  test/OpenMP/taskloop_reduction_codegen.cpp
  test/OpenMP/taskloop_simd_reduction_codegen.cpp
  test/OpenMP/teams_private_codegen.cpp

Index: test/CodeGen/ext-vector.c
===================================================================
--- test/CodeGen/ext-vector.c
+++ test/CodeGen/ext-vector.c
@@ -134,7 +134,7 @@
   // CHECK: add <4 x i32>
   // CHECK: sub <4 x i32>
   // CHECK: mul <4 x i32>
-  // CHECK: sdiv <4 x i32>
+  // CHECK: sdiv nof <4 x i32>
   // CHECK: srem <4 x i32>
   a = a + b;
   a = a - b;
@@ -145,7 +145,7 @@
   // CHECK: add <4 x i32>
   // CHECK: sub <4 x i32>
   // CHECK: mul <4 x i32>
-  // CHECK: sdiv <4 x i32>
+  // CHECK: sdiv nof <4 x i32>
   // CHECK: srem <4 x i32>
   a = a + c;
   a = a - c;
@@ -156,7 +156,7 @@
   // CHECK: add <4 x i32>
   // CHECK: sub <4 x i32>
   // CHECK: mul <4 x i32>
-  // CHECK: sdiv <4 x i32>
+  // CHECK: sdiv nof <4 x i32>
   // CHECK: srem <4 x i32>
   a += b;
   a -= b;
@@ -167,7 +167,7 @@
   // CHECK: add <4 x i32>
   // CHECK: sub <4 x i32>
   // CHECK: mul <4 x i32>
-  // CHECK: sdiv <4 x i32>
+  // CHECK: sdiv nof <4 x i32>
   // CHECK: srem <4 x i32>
   a += c;
   a -= c;
@@ -254,12 +254,12 @@
   uint4 b = *bp;
   int4 d;
   
-  // CHECK: udiv <4 x i32>
+  // CHECK: udiv nof <4 x i32>
   // CHECK: urem <4 x i32>
   a = a / b;
   a = a % b;
 
-  // CHECK: udiv <4 x i32>
+  // CHECK: udiv nof <4 x i32>
   // CHECK: urem <4 x i32>
   a = a / c;
   a = a % c;
Index: test/CodeGen/builtins-ppc-altivec.c
===================================================================
--- test/CodeGen/builtins-ppc-altivec.c
+++ test/CodeGen/builtins-ppc-altivec.c
@@ -1244,28 +1244,28 @@
 
   /* vec_div */
   res_vsc = vec_div(vsc, vsc);
-// CHECK: sdiv <16 x i8>
-// CHECK-LE: sdiv <16 x i8>
+// CHECK: sdiv nof <16 x i8>
+// CHECK-LE: sdiv nof <16 x i8>
 
   res_vuc = vec_div(vuc, vuc);
-// CHECK: udiv <16 x i8>
-// CHECK-LE: udiv <16 x i8>
+// CHECK: udiv nof <16 x i8>
+// CHECK-LE: udiv nof <16 x i8>
 
   res_vs = vec_div(vs, vs);
-// CHECK: sdiv <8 x i16>
-// CHECK-LE: sdiv <8 x i16>
+// CHECK: sdiv nof <8 x i16>
+// CHECK-LE: sdiv nof <8 x i16>
 
   res_vus = vec_div(vus, vus);
-// CHECK: udiv <8 x i16>
-// CHECK-LE: udiv <8 x i16>
+// CHECK: udiv nof <8 x i16>
+// CHECK-LE: udiv nof <8 x i16>
 
   res_vi = vec_div(vi, vi);
-// CHECK: sdiv <4 x i32>
-// CHECK-LE: sdiv <4 x i32>
+// CHECK: sdiv nof <4 x i32>
+// CHECK-LE: sdiv nof <4 x i32>
 
   res_vui = vec_div(vui, vui);
-// CHECK: udiv <4 x i32>
-// CHECK-LE: udiv <4 x i32>
+// CHECK: udiv nof <4 x i32>
+// CHECK-LE: udiv nof <4 x i32>
 
   /* vec_dss */
   vec_dss(0);
Index: test/CodeGen/vla.c
===================================================================
--- test/CodeGen/vla.c
+++ test/CodeGen/vla.c
@@ -118,12 +118,12 @@
 
   // CHECK-NEXT: [[T0:%.*]] = load [6 x i8]*, [6 x i8]** [[P]], align 4
   // CHECK-NEXT: [[T1:%.*]] = load i32, i32* [[N]], align 4
-  // CHECK-NEXT: [[T2:%.*]] = udiv i32 [[T1]], 2
+  // CHECK-NEXT: [[T2:%.*]] = udiv nof i32 [[T1]], 2
   // CHECK-NEXT: [[T3:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
   // CHECK-NEXT: [[T4:%.*]] = mul nsw i32 [[T2]], [[T3]]
   // CHECK-NEXT: [[T5:%.*]] = getelementptr inbounds [6 x i8], [6 x i8]* [[T0]], i32 [[T4]]
   // CHECK-NEXT: [[T6:%.*]] = load i32, i32* [[N]], align 4
-  // CHECK-NEXT: [[T7:%.*]] = udiv i32 [[T6]], 4
+  // CHECK-NEXT: [[T7:%.*]] = udiv nof i32 [[T6]], 4
   // CHECK-NEXT: [[T8:%.*]] = sub i32 0, [[T7]]
   // CHECK-NEXT: [[T9:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
   // CHECK-NEXT: [[T10:%.*]] = mul nsw i32 [[T8]], [[T9]]
@@ -138,7 +138,7 @@
   // CHECK-NEXT: [[T4:%.*]] = sub i32 [[T2]], [[T3]]
   // CHECK-NEXT: [[T5:%.*]] = mul nuw i32 [[DIM0]], [[DIM1]]
   // CHECK-NEXT: [[T6:%.*]] = mul nuw i32 6, [[T5]]
-  // CHECK-NEXT: [[T7:%.*]] = sdiv exact i32 [[T4]], [[T6]]
+  // CHECK-NEXT: [[T7:%.*]] = sdiv exact nof i32 [[T4]], [[T6]]
   // CHECK-NEXT: ret i32 [[T7]]
   return p2 - p;
 }
Index: test/CodeGen/zvector.c
===================================================================
--- test/CodeGen/zvector.c
+++ test/CodeGen/zvector.c
@@ -877,35 +877,35 @@
 // CHECK-LABEL: define void @test_div() #0 {
 // CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
 // CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
-// CHECK:   [[DIV:%.*]] = sdiv <16 x i8> [[TMP0]], [[TMP1]]
+// CHECK:   [[DIV:%.*]] = sdiv nof <16 x i8> [[TMP0]], [[TMP1]]
 // CHECK:   store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
 // CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
 // CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
-// CHECK:   [[DIV1:%.*]] = udiv <16 x i8> [[TMP2]], [[TMP3]]
+// CHECK:   [[DIV1:%.*]] = udiv nof <16 x i8> [[TMP2]], [[TMP3]]
 // CHECK:   store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
 // CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
 // CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
-// CHECK:   [[DIV2:%.*]] = sdiv <8 x i16> [[TMP4]], [[TMP5]]
+// CHECK:   [[DIV2:%.*]] = sdiv nof <8 x i16> [[TMP4]], [[TMP5]]
 // CHECK:   store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
 // CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
 // CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
-// CHECK:   [[DIV3:%.*]] = udiv <8 x i16> [[TMP6]], [[TMP7]]
+// CHECK:   [[DIV3:%.*]] = udiv nof <8 x i16> [[TMP6]], [[TMP7]]
 // CHECK:   store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
 // CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
 // CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
-// CHECK:   [[DIV4:%.*]] = sdiv <4 x i32> [[TMP8]], [[TMP9]]
+// CHECK:   [[DIV4:%.*]] = sdiv nof <4 x i32> [[TMP8]], [[TMP9]]
 // CHECK:   store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
 // CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
 // CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
-// CHECK:   [[DIV5:%.*]] = udiv <4 x i32> [[TMP10]], [[TMP11]]
+// CHECK:   [[DIV5:%.*]] = udiv nof <4 x i32> [[TMP10]], [[TMP11]]
 // CHECK:   store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
 // CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
 // CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
-// CHECK:   [[DIV6:%.*]] = sdiv <2 x i64> [[TMP12]], [[TMP13]]
+// CHECK:   [[DIV6:%.*]] = sdiv nof <2 x i64> [[TMP12]], [[TMP13]]
 // CHECK:   store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
 // CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
 // CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
-// CHECK:   [[DIV7:%.*]] = udiv <2 x i64> [[TMP14]], [[TMP15]]
+// CHECK:   [[DIV7:%.*]] = udiv nof <2 x i64> [[TMP14]], [[TMP15]]
 // CHECK:   store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
 // CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
 // CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
@@ -932,35 +932,35 @@
 // CHECK-LABEL: define void @test_div_assign() #0 {
 // CHECK:   [[TMP0:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc2, align 8
 // CHECK:   [[TMP1:%.*]] = load volatile <16 x i8>, <16 x i8>* @sc, align 8
-// CHECK:   [[DIV:%.*]] = sdiv <16 x i8> [[TMP1]], [[TMP0]]
+// CHECK:   [[DIV:%.*]] = sdiv nof <16 x i8> [[TMP1]], [[TMP0]]
 // CHECK:   store volatile <16 x i8> [[DIV]], <16 x i8>* @sc, align 8
 // CHECK:   [[TMP2:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc2, align 8
 // CHECK:   [[TMP3:%.*]] = load volatile <16 x i8>, <16 x i8>* @uc, align 8
-// CHECK:   [[DIV1:%.*]] = udiv <16 x i8> [[TMP3]], [[TMP2]]
+// CHECK:   [[DIV1:%.*]] = udiv nof <16 x i8> [[TMP3]], [[TMP2]]
 // CHECK:   store volatile <16 x i8> [[DIV1]], <16 x i8>* @uc, align 8
 // CHECK:   [[TMP4:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss2, align 8
 // CHECK:   [[TMP5:%.*]] = load volatile <8 x i16>, <8 x i16>* @ss, align 8
-// CHECK:   [[DIV2:%.*]] = sdiv <8 x i16> [[TMP5]], [[TMP4]]
+// CHECK:   [[DIV2:%.*]] = sdiv nof <8 x i16> [[TMP5]], [[TMP4]]
 // CHECK:   store volatile <8 x i16> [[DIV2]], <8 x i16>* @ss, align 8
 // CHECK:   [[TMP6:%.*]] = load volatile <8 x i16>, <8 x i16>* @us2, align 8
 // CHECK:   [[TMP7:%.*]] = load volatile <8 x i16>, <8 x i16>* @us, align 8
-// CHECK:   [[DIV3:%.*]] = udiv <8 x i16> [[TMP7]], [[TMP6]]
+// CHECK:   [[DIV3:%.*]] = udiv nof <8 x i16> [[TMP7]], [[TMP6]]
 // CHECK:   store volatile <8 x i16> [[DIV3]], <8 x i16>* @us, align 8
 // CHECK:   [[TMP8:%.*]] = load volatile <4 x i32>, <4 x i32>* @si2, align 8
 // CHECK:   [[TMP9:%.*]] = load volatile <4 x i32>, <4 x i32>* @si, align 8
-// CHECK:   [[DIV4:%.*]] = sdiv <4 x i32> [[TMP9]], [[TMP8]]
+// CHECK:   [[DIV4:%.*]] = sdiv nof <4 x i32> [[TMP9]], [[TMP8]]
 // CHECK:   store volatile <4 x i32> [[DIV4]], <4 x i32>* @si, align 8
 // CHECK:   [[TMP10:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui2, align 8
 // CHECK:   [[TMP11:%.*]] = load volatile <4 x i32>, <4 x i32>* @ui, align 8
-// CHECK:   [[DIV5:%.*]] = udiv <4 x i32> [[TMP11]], [[TMP10]]
+// CHECK:   [[DIV5:%.*]] = udiv nof <4 x i32> [[TMP11]], [[TMP10]]
 // CHECK:   store volatile <4 x i32> [[DIV5]], <4 x i32>* @ui, align 8
 // CHECK:   [[TMP12:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl2, align 8
 // CHECK:   [[TMP13:%.*]] = load volatile <2 x i64>, <2 x i64>* @sl, align 8
-// CHECK:   [[DIV6:%.*]] = sdiv <2 x i64> [[TMP13]], [[TMP12]]
+// CHECK:   [[DIV6:%.*]] = sdiv nof <2 x i64> [[TMP13]], [[TMP12]]
 // CHECK:   store volatile <2 x i64> [[DIV6]], <2 x i64>* @sl, align 8
 // CHECK:   [[TMP14:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul2, align 8
 // CHECK:   [[TMP15:%.*]] = load volatile <2 x i64>, <2 x i64>* @ul, align 8
-// CHECK:   [[DIV7:%.*]] = udiv <2 x i64> [[TMP15]], [[TMP14]]
+// CHECK:   [[DIV7:%.*]] = udiv nof <2 x i64> [[TMP15]], [[TMP14]]
 // CHECK:   store volatile <2 x i64> [[DIV7]], <2 x i64>* @ul, align 8
 // CHECK:   [[TMP16:%.*]] = load volatile <2 x double>, <2 x double>* @fd2, align 8
 // CHECK:   [[TMP17:%.*]] = load volatile <2 x double>, <2 x double>* @fd, align 8
Index: test/CodeGen/exceptions-seh.c
===================================================================
--- test/CodeGen/exceptions-seh.c
+++ test/CodeGen/exceptions-seh.c
@@ -11,7 +11,7 @@
   *myres = numerator / denominator;
 }
 // CHECK-LABEL: define void @try_body(i32 %numerator, i32 %denominator, i32* %myres)
-// CHECK: sdiv i32
+// CHECK: sdiv nof i32
 // CHECK: store i32 %{{.*}}, i32*
 // CHECK: ret void
 
Index: test/CodeGen/ubsan-pass-object-size.c
===================================================================
--- test/CodeGen/ubsan-pass-object-size.c
+++ test/CodeGen/ubsan-pass-object-size.c
@@ -7,7 +7,7 @@
   // CHECK: [[SIZE_ALLOCA:%.*]] = alloca i64, align 8
   // CHECK: store i64 %{{.*}}, i64* [[SIZE_ALLOCA]], align 8
   // CHECK: [[LOAD_SIZE:%.*]] = load i64, i64* [[SIZE_ALLOCA]], align 8, !nosanitize
-  // CHECK-NEXT: [[SCALED_SIZE:%.*]] = udiv i64 [[LOAD_SIZE]], 4, !nosanitize
+  // CHECK-NEXT: [[SCALED_SIZE:%.*]] = udiv nof i64 [[LOAD_SIZE]], 4, !nosanitize
   // CHECK-NEXT: [[SEXT_N:%.*]] = sext i32 %{{.*}} to i64, !nosanitize
   // CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[SEXT_N]], [[SCALED_SIZE]], !nosanitize
   // CHECK-NEXT: br i1 [[ICMP]], {{.*}} !nosanitize
Index: test/CodeGen/atomic_ops.c
===================================================================
--- test/CodeGen/atomic_ops.c
+++ test/CodeGen/atomic_ops.c
@@ -12,10 +12,10 @@
   // CHECK: mul nsw i32
   // CHECK: {{(cmpxchg i32*|i1 @__atomic_compare_exchange\(i32 4,)}}
   i /= 2;
-  // CHECK: sdiv i32
+  // CHECK: sdiv nof i32
   // CHECK: {{(cmpxchg i32*|i1 @__atomic_compare_exchange\(i32 4, )}}
   j /= x;
-  // CHECK: sdiv i32
+  // CHECK: sdiv nof i32
   // CHECK: {{(cmpxchg i16*|i1 @__atomic_compare_exchange\(i32 2, )}}
 
 }
Index: test/CodeGen/sanitize-trap.c
===================================================================
--- test/CodeGen/sanitize-trap.c
+++ test/CodeGen/sanitize-trap.c
@@ -25,6 +25,6 @@
   // CHECK-NEXT: __ubsan_handle_divrem_overflow
 
   // CHECK: {{^|:}}[[L3]]
-  // CHECK-NEXT: sdiv i32 %[[N]], %[[D]]
+  // CHECK-NEXT: sdiv nof i32 %[[N]], %[[D]]
   return x / y;
 }
Index: test/CodeGen/builtins-ppc-vsx.c
===================================================================
--- test/CodeGen/builtins-ppc-vsx.c
+++ test/CodeGen/builtins-ppc-vsx.c
@@ -208,12 +208,12 @@
 
   /* vec_div */
   res_vsll = vec_div(vsll, vsll);
-// CHECK: sdiv <2 x i64>
-// CHECK-LE: sdiv <2 x i64>
+// CHECK: sdiv nof <2 x i64>
+// CHECK-LE: sdiv nof <2 x i64>
 
   res_vull = vec_div(vull, vull);
-// CHECK: udiv <2 x i64>
-// CHECK-LE: udiv <2 x i64>
+// CHECK: udiv nof <2 x i64>
+// CHECK-LE: udiv nof <2 x i64>
 
   res_vf = vec_div(vf, vf);
 // CHECK: fdiv <4 x float>
Index: test/CodeGen/compound-type.c
===================================================================
--- test/CodeGen/compound-type.c
+++ test/CodeGen/compound-type.c
@@ -1,5 +1,5 @@
 // RUN: %clang_cc1 < %s -emit-llvm -triple i686-pc-linux-gnu > %t
-// RUN: grep "div i32" %t
+// RUN: grep "div nof i32" %t
 // RUN: grep "shl i32" %t
 
 unsigned char a,b;
Index: test/OpenMP/simd_codegen.cpp
===================================================================
--- test/OpenMP/simd_codegen.cpp
+++ test/OpenMP/simd_codegen.cpp
@@ -272,7 +272,7 @@
 // CHECK: [[T1_BODY]]:
 // Loop counters i and j updates:
 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]{{.*}}!llvm.mem.parallel_loop_access ![[T1_ID]]
-// CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
+// CHECK-NEXT: [[I_1:%.+]] = sdiv nof i64 [[IV1]], 4
 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
@@ -332,7 +332,7 @@
 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
-// CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
+// CHECK-NEXT: [[DIFF4:%.+]] = sdiv nof i32 [[DIFF3]], 1
 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
 // CHECK: store i32 0, i32* [[IT_OMP_IV:%[^,]+]]
@@ -389,18 +389,18 @@
 // Start of body: calculate i from index:
 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
 // Calculation of the loop counters values.
-// CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
+// CHECK: [[CALC_I_1:%.+]] = udiv nof i32 [[IV1]], 60
 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
-// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
+// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv nof i32 [[IV1_2]], 20
 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]{{.+}}!llvm.mem.parallel_loop_access ![[COLL1_LOOP_ID]]
-// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
+// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv nof i32 [[IV1_3]], 5
 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
Index: test/OpenMP/parallel_for_simd_codegen.cpp
===================================================================
--- test/OpenMP/parallel_for_simd_codegen.cpp
+++ test/OpenMP/parallel_for_simd_codegen.cpp
@@ -422,7 +422,7 @@
 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
-// CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
+// CHECK-NEXT: [[DIFF4:%.+]] = sdiv nof i32 [[DIFF3]], 1
 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
   #pragma omp parallel for simd
@@ -509,18 +509,18 @@
 // Start of body: calculate i from index:
 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]
 // Calculation of the loop counters values.
-// CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
+// CHECK: [[CALC_I_1:%.+]] = udiv nof i32 [[IV1]], 60
 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
-// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
+// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv nof i32 [[IV1_2]], 20
 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]
-// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
+// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv nof i32 [[IV1_3]], 5
 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
@@ -649,7 +649,7 @@
 // CHECK: [[T1_BODY]]:
 // Loop counters i and j updates:
 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]
-// CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
+// CHECK-NEXT: [[I_1:%.+]] = sdiv nof i64 [[IV1]], 4
 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
Index: test/OpenMP/loops_explicit_clauses_codegen.cpp
===================================================================
--- test/OpenMP/loops_explicit_clauses_codegen.cpp
+++ test/OpenMP/loops_explicit_clauses_codegen.cpp
@@ -42,16 +42,16 @@
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
 // CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
-// CHECK: sdiv i32
+// CHECK: sdiv nof i32
 // CHECK: store i32 %{{.+}}, i32* @k,
 #pragma omp simd linear(k : 2)
   for (k = 0; k < argc; k++)
     bar();
 // CHECK: @{{.+}}foo
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
 // CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
-// CHECK: sdiv i32
+// CHECK: sdiv nof i32
 // CHECK: store i32 %{{.+}}, i32* @k,
   foo();
 #pragma omp simd lastprivate(k) collapse(2)
@@ -63,7 +63,7 @@
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
 // CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
-// CHECK: sdiv i32
+// CHECK: sdiv nof i32
 // CHECK: store i32 %{{.+}}, i32* @k,
 #pragma omp simd
   for (k = 0; k < argc; k++)
@@ -73,7 +73,7 @@
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
 // CHECK: i32 @{{.+}}bar{{.+}}!llvm.mem.parallel_loop_access
 // CHECK-NOT: @k{{.+}}!llvm.mem.parallel_loop_access
-// CHECK: sdiv i32
+// CHECK: sdiv nof i32
 // CHECK: store i32 %{{.+}}, i32* @k,
 #pragma omp simd collapse(2)
   for (int i = 0; i < 2; ++i)
Index: test/OpenMP/for_reduction_codegen_UDR.cpp
===================================================================
--- test/OpenMP/for_reduction_codegen_UDR.cpp
+++ test/OpenMP/for_reduction_codegen_UDR.cpp
@@ -325,7 +325,7 @@
 // CHECK: [[UB_CAST:%.+]] = ptrtoint i32* [[UB1_UP:%.+]] to i64
 // CHECK: [[LB_CAST:%.+]] = ptrtoint i32* [[LB1_0:%.+]] to i64
 // CHECK: [[DIFF:%.+]] = sub i64 [[UB_CAST]], [[LB_CAST]]
-// CHECK: [[SIZE_1:%.+]] = sdiv exact i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
+// CHECK: [[SIZE_1:%.+]] = sdiv exact nof i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
 // CHECK: [[ARR_SIZE:%.+]] = add nuw i64 [[SIZE_1]], 1
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
@@ -517,7 +517,7 @@
 
 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
 
-// CHECK: [[ARR_SIZE:%.+]] = udiv exact i64
+// CHECK: [[ARR_SIZE:%.+]] = udiv exact nof i64
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
 
@@ -708,16 +708,16 @@
 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64
 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
-// CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
+// CHECK: [[DIF:%.+]] = sdiv exact nof i64 [[BYTE_DIF]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[VAR2_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
 // CHECK: [[LD:%.+]] = load [[S_FLOAT_TY]]**, [[S_FLOAT_TY]]*** [[VAR2_ORIG]],
 // CHECK: [[ORIG_START:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[LD]],
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
 // CHECK: [[PSEUDO_VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV]], i64 [[OFFSET]]
 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
 // CHECK: store [[S_FLOAT_TY]]* [[PSEUDO_VAR2_PRIV]], [[S_FLOAT_TY]]** [[REF]]
@@ -739,7 +739,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
 // CHECK: [[VVAR2_PRIV_PTR:%.+]] = bitcast [5 x [[S_FLOAT_TY]]]* [[VVAR2_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[VVAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VVAR2_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: ret void
@@ -766,7 +766,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint ([[S_FLOAT_TY]]* getelementptr ([[S_FLOAT_TY]], [[S_FLOAT_TY]]* null, i32 1) to i64)
 // CHECK: [[VAR3_PRIV_PTR:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [4 x [[S_FLOAT_TY]]]*
@@ -838,7 +838,7 @@
 // CHECK: call void @_Z5init2R6BaseS1RKS_(
 
 // For min reduction operation initial value of private variable is largest repesentable value.
-// CHECK: sdiv i32 432, %
+// CHECK: sdiv nof i32 432, %
 
 // CHECK: [[GTID_REF:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[GTID_ADDR_ADDR]]
 // CHECK: [[GTID:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[GTID_REF]]
@@ -991,7 +991,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_INT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_INT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint ([[S_INT_TY]]* getelementptr ([[S_INT_TY]], [[S_INT_TY]]* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint ([[S_INT_TY]]* getelementptr ([[S_INT_TY]], [[S_INT_TY]]* null, i32 1) to i64)
 // CHECK: [[ARR_PRIV_PTR:%.+]] = bitcast [40 x [[S_INT_TY]]]* [[ARR_PRIV]] to [[S_INT_TY]]*
 // CHECK: [[PSEUDO_ARR_PRIV:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[ARR_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: [[ARR_PRIV:%.+]] = bitcast [[S_INT_TY]]* [[PSEUDO_ARR_PRIV]] to [42 x [[S_INT_TY]]]*
Index: test/OpenMP/taskloop_simd_reduction_codegen.cpp
===================================================================
--- test/OpenMP/taskloop_simd_reduction_codegen.cpp
+++ test/OpenMP/taskloop_simd_reduction_codegen.cpp
@@ -89,7 +89,7 @@
 // CHECK-DAG:    [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
 // CHECK-DAG:    [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
 // CHECK-DAG:    [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
-// CHECK-DAG:    [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK-DAG:    [[TMP35:%.*]] = sdiv exact nof i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
 // CHECK-DAG:    [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
@@ -121,7 +121,7 @@
 // CHECK-DAG:    store i8* [[TMP53]], i8** [[TMP52:%[^,]+]],
 // CHECK-DAG:    [[TMP52]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8:%.+]], i32 0, i32 0
 // CHECK-DAG:    [[TMP54:%.*]] = mul nuw i64 [[TMP2]], 4
-// CHECK-DAG:    [[TMP55:%.*]] = udiv exact i64 [[TMP54]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK-DAG:    [[TMP55:%.*]] = udiv exact nof i64 [[TMP54]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    store i64 [[TMP54]], i64* [[TMP56:%[^,]+]],
 // CHECK-DAG:    [[TMP56]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8]], i32 0, i32 1
 // CHECK-DAG:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8]], i32 0, i32 2
@@ -144,7 +144,7 @@
 // CHECK:    [[SUB:%.*]] = sub nsw i32 [[TMP64]], 0
 // CHECK:    [[SUB10:%.*]] = sub nsw i32 [[SUB]], 1
 // CHECK:    [[ADD11:%.*]] = add nsw i32 [[SUB10]], 1
-// CHECK:    [[DIV:%.*]] = sdiv i32 [[ADD11]], 1
+// CHECK:    [[DIV:%.*]] = sdiv nof i32 [[ADD11]], 1
 // CHECK:    [[SUB12:%.*]] = sub nsw i32 [[DIV]], 1
 // CHECK:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]],
 // CHECK:    [[TMP65:%.*]] = call i8* @__kmpc_omp_task_alloc(%ident_t* %{{.+}}, i32 [[TMP0]], i32 1, i64 888, i64 72, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @{{.+}} to i32 (i32, i8*)*))
Index: test/OpenMP/parallel_private_codegen.cpp
===================================================================
--- test/OpenMP/parallel_private_codegen.cpp
+++ test/OpenMP/parallel_private_codegen.cpp
@@ -140,7 +140,7 @@
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
     // LAMBDA-NEXT: ret void
 
@@ -240,7 +240,7 @@
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // BLOCKS-NEXT: ret void
 #else
@@ -314,7 +314,7 @@
 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // CHECK-NEXT: ret void
 
Index: test/OpenMP/atomic_update_codegen.cpp
===================================================================
--- test/OpenMP/atomic_update_codegen.cpp
+++ test/OpenMP/atomic_update_codegen.cpp
@@ -169,7 +169,7 @@
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
-// CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = sdiv nof i64 [[EXPECTED]], [[EXPR]]
 // CHECK: store i64 [[DESIRED]], i64* [[TEMP:%.+]],
 // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP]],
 // CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
Index: test/OpenMP/single_codegen.cpp
===================================================================
--- test/OpenMP/single_codegen.cpp
+++ test/OpenMP/single_codegen.cpp
@@ -303,7 +303,7 @@
 // CHECK-NEXT: getelementptr inbounds [[CAP_TY]], [[CAP_TY]]* [[CAP]], i32 0, i32 3
 // CHECK-NEXT: load i32*, i32** %
 // CHECK-NEXT: load i32, i32* %
-// CHECK-NEXT: sdiv i32 %{{.+}}, 1
+// CHECK-NEXT: sdiv nof i32 %{{.+}}, 1
 // CHECK-NEXT: store i32 %
 // CHECK-NEXT: getelementptr inbounds [[CAP_TY]], [[CAP_TY]]* [[CAP]], i32 0, i32 1
 // CHECK-NEXT: load i32*, i32** %
@@ -360,7 +360,7 @@
 // CHECK-NOT: getelementptr inbounds
 // CHECK: load i32*, i32** %
 // CHECK-NEXT: load i32, i32* %
-// CHECK-NEXT: sdiv i32 %{{.+}}, 1
+// CHECK-NEXT: sdiv nof i32 %{{.+}}, 1
 // CHECK-NEXT: store i32 %
 // CHECK-NEXT: call void @__kmpc_end_single([[IDENT_T_TY]]* @{{.+}}, i32 %{{.+}})
 // CHECK-NEXT: store i32 1, i32* [[DID_IT]],
Index: test/OpenMP/parallel_reduction_codegen.cpp
===================================================================
--- test/OpenMP/parallel_reduction_codegen.cpp
+++ test/OpenMP/parallel_reduction_codegen.cpp
@@ -163,7 +163,7 @@
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
     // LAMBDA: call i32 @__kmpc_reduce_nowait(
     // LAMBDA: ret void
@@ -307,7 +307,7 @@
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // BLOCKS: call i32 @__kmpc_reduce_nowait(
 // BLOCKS: ret void
@@ -663,7 +663,7 @@
 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // CHECK: call i32 @__kmpc_reduce_nowait(
 // CHECK: ret void
Index: test/OpenMP/declare_reduction_codegen.cpp
===================================================================
--- test/OpenMP/declare_reduction_codegen.cpp
+++ test/OpenMP/declare_reduction_codegen.cpp
@@ -201,12 +201,12 @@
 // CHECK-LOAD-NEXT: }
 
 // CHECK: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
-// CHECK: [[DIV:%.+]] = sdiv i32
+// CHECK: [[DIV:%.+]] = sdiv nof i32
 // CHECK-NEXT: store i32 [[DIV]], i32*
 // CHECK-NEXT: ret void
 // CHECK-NEXT: }
 // CHECK-LOAD: define internal {{.*}}void @{{[^(]+}}(i32* noalias, i32* noalias)
-// CHECK-LOAD: [[DIV:%.+]] = sdiv i32
+// CHECK-LOAD: [[DIV:%.+]] = sdiv nof i32
 // CHECK-LOAD-NEXT: store i32 [[DIV]], i32*
 // CHECK-LOAD-NEXT: ret void
 // CHECK-LOAD-NEXT: }
Index: test/OpenMP/for_simd_codegen.cpp
===================================================================
--- test/OpenMP/for_simd_codegen.cpp
+++ test/OpenMP/for_simd_codegen.cpp
@@ -402,7 +402,7 @@
 // CHECK: [[T1_BODY]]:
 // Loop counters i and j updates:
 // CHECK: [[IV1:%.+]] = load i64, i64* [[T1_OMP_IV]]
-// CHECK-NEXT: [[I_1:%.+]] = sdiv i64 [[IV1]], 4
+// CHECK-NEXT: [[I_1:%.+]] = sdiv nof i64 [[IV1]], 4
 // CHECK-NEXT: [[I_1_MUL1:%.+]] = mul nsw i64 [[I_1]], 1
 // CHECK-NEXT: [[I_1_ADD0:%.+]] = add nsw i64 0, [[I_1_MUL1]]
 // CHECK-NEXT: [[I_2:%.+]] = trunc i64 [[I_1_ADD0]] to i32
@@ -464,7 +464,7 @@
 // CHECK: [[DIFF1:%.+]] = invoke {{.*}}i32 @{{.*}}IterDouble{{.*}}
 // CHECK: [[DIFF2:%.+]] = sub nsw i32 [[DIFF1]], 1
 // CHECK-NEXT: [[DIFF3:%.+]] = add nsw i32 [[DIFF2]], 1
-// CHECK-NEXT: [[DIFF4:%.+]] = sdiv i32 [[DIFF3]], 1
+// CHECK-NEXT: [[DIFF4:%.+]] = sdiv nof i32 [[DIFF3]], 1
 // CHECK-NEXT: [[DIFF5:%.+]] = sub nsw i32 [[DIFF4]], 1
 // CHECK-NEXT: store i32 [[DIFF5]], i32* [[OMP_LAST_IT:%[^,]+]]{{.+}}
   #pragma omp for simd
@@ -552,18 +552,18 @@
 // Start of body: calculate i from index:
 // CHECK: [[IV1:%.+]] = load i32, i32* [[OMP_IV]]
 // Calculation of the loop counters values.
-// CHECK: [[CALC_I_1:%.+]] = udiv i32 [[IV1]], 60
+// CHECK: [[CALC_I_1:%.+]] = udiv nof i32 [[IV1]], 60
 // CHECK-NEXT: [[CALC_I_1_MUL1:%.+]] = mul i32 [[CALC_I_1]], 1
 // CHECK-NEXT: [[CALC_I_2:%.+]] = add i32 1, [[CALC_I_1_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_I_2]], i32* [[LC_I:.+]]
 // CHECK: [[IV1_2:%.+]] = load i32, i32* [[OMP_IV]]
-// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv i32 [[IV1_2]], 20
+// CHECK-NEXT: [[CALC_J_1:%.+]] = udiv nof i32 [[IV1_2]], 20
 // CHECK-NEXT: [[CALC_J_2:%.+]] = urem i32 [[CALC_J_1]], 3
 // CHECK-NEXT: [[CALC_J_2_MUL1:%.+]] = mul i32 [[CALC_J_2]], 1
 // CHECK-NEXT: [[CALC_J_3:%.+]] = add i32 2, [[CALC_J_2_MUL1]]
 // CHECK-NEXT: store i32 [[CALC_J_3]], i32* [[LC_J:.+]]
 // CHECK: [[IV1_3:%.+]] = load i32, i32* [[OMP_IV]]
-// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv i32 [[IV1_3]], 5
+// CHECK-NEXT: [[CALC_K_1:%.+]] = udiv nof i32 [[IV1_3]], 5
 // CHECK-NEXT: [[CALC_K_2:%.+]] = urem i32 [[CALC_K_1]], 4
 // CHECK-NEXT: [[CALC_K_2_MUL1:%.+]] = mul i32 [[CALC_K_2]], 1
 // CHECK-NEXT: [[CALC_K_3:%.+]] = add i32 3, [[CALC_K_2_MUL1]]
Index: test/OpenMP/for_lastprivate_codegen.cpp
===================================================================
--- test/OpenMP/for_lastprivate_codegen.cpp
+++ test/OpenMP/for_lastprivate_codegen.cpp
@@ -236,7 +236,7 @@
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
     // LAMBDA: call void @__kmpc_for_static_fini(
     // LAMBDA: br i1
@@ -431,7 +431,7 @@
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // BLOCKS: call void @__kmpc_for_static_fini(
 // BLOCKS: br i1
@@ -700,7 +700,7 @@
 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // CHECK: call void @__kmpc_for_static_fini(
 // CHECK: br i1
Index: test/OpenMP/taskloop_reduction_codegen.cpp
===================================================================
--- test/OpenMP/taskloop_reduction_codegen.cpp
+++ test/OpenMP/taskloop_reduction_codegen.cpp
@@ -89,7 +89,7 @@
 // CHECK-DAG:    [[TMP32:%.*]] = ptrtoint %struct.S* [[ARRAYIDX6]] to i64
 // CHECK-DAG:    [[TMP33:%.*]] = ptrtoint %struct.S* [[ARRAYIDX5]] to i64
 // CHECK-DAG:    [[TMP34:%.*]] = sub i64 [[TMP32]], [[TMP33]]
-// CHECK-DAG:    [[TMP35:%.*]] = sdiv exact i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK-DAG:    [[TMP35:%.*]] = sdiv exact nof i64 [[TMP34]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    [[TMP36:%.*]] = add nuw i64 [[TMP35]], 1
 // CHECK-DAG:    [[TMP37:%.*]] = mul nuw i64 [[TMP36]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    store i64 [[TMP37]], i64* [[TMP38:%[^,]+]],
@@ -121,7 +121,7 @@
 // CHECK-DAG:    store i8* [[TMP53]], i8** [[TMP52:%[^,]+]],
 // CHECK-DAG:    [[TMP52]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8:%.+]], i32 0, i32 0
 // CHECK-DAG:    [[TMP54:%.*]] = mul nuw i64 [[TMP2]], 4
-// CHECK-DAG:    [[TMP55:%.*]] = udiv exact i64 [[TMP54]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK-DAG:    [[TMP55:%.*]] = udiv exact nof i64 [[TMP54]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK-DAG:    store i64 [[TMP54]], i64* [[TMP56:%[^,]+]],
 // CHECK-DAG:    [[TMP56]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8]], i32 0, i32 1
 // CHECK-DAG:    [[TMP57:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_RED_INPUT_T]], %struct.kmp_task_red_input_t* [[DOTRD_INPUT_GEP_8]], i32 0, i32 2
@@ -144,7 +144,7 @@
 // CHECK:    [[SUB:%.*]] = sub nsw i32 [[TMP64]], 0
 // CHECK:    [[SUB10:%.*]] = sub nsw i32 [[SUB]], 1
 // CHECK:    [[ADD11:%.*]] = add nsw i32 [[SUB10]], 1
-// CHECK:    [[DIV:%.*]] = sdiv i32 [[ADD11]], 1
+// CHECK:    [[DIV:%.*]] = sdiv nof i32 [[ADD11]], 1
 // CHECK:    [[SUB12:%.*]] = sub nsw i32 [[DIV]], 1
 // CHECK:    store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]],
 // CHECK:    [[TMP65:%.*]] = call i8* @__kmpc_omp_task_alloc(%ident_t* %{{.+}}, i32 [[TMP0]], i32 1, i64 888, i64 72, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @[[TASK:.+]] to i32 (i32, i8*)*))
Index: test/OpenMP/teams_private_codegen.cpp
===================================================================
--- test/OpenMP/teams_private_codegen.cpp
+++ test/OpenMP/teams_private_codegen.cpp
@@ -266,7 +266,7 @@
 // CHECK: store i{{[0-9]+}} [[B_DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK: [[C_REF_VAL:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[C_REF]]
 // CHECK: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_REF_VAL]]
-// CHECK: [[C_DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK: [[C_DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK: store i{{[0-9]+}} [[C_DIV]], i{{[0-9]+}}* [[C_REF_VAL]],
 // CHECK: ret
 
Index: test/OpenMP/nvptx_teams_reduction_codegen.cpp
===================================================================
--- test/OpenMP/nvptx_teams_reduction_codegen.cpp
+++ test/OpenMP/nvptx_teams_reduction_codegen.cpp
@@ -597,7 +597,7 @@
   // CHECK: [[OF:%.+]] = mul i[[SZ]] [[NUM_TEAMS]], 1
   // CHECK: [[POS1:%.+]] = add i[[SZ]] [[SCRATCHPAD]], [[OF]]
   // CHECK: [[POS2:%.+]] = sub i[[SZ]] [[POS1]], 1
-  // CHECK: [[POS3:%.+]] = sdiv i[[SZ]] [[POS2]], 256
+  // CHECK: [[POS3:%.+]] = sdiv nof i[[SZ]] [[POS2]], 256
   // CHECK: [[POS4:%.+]] = add i[[SZ]] [[POS3]], 1
   // CHECK: [[SCRATCHPAD_NEXT:%.+]] = mul i[[SZ]] [[POS4]], 256
   //
@@ -643,7 +643,7 @@
   // CHECK: [[OF:%.+]] = mul i[[SZ]] [[NUM_TEAMS]], 1
   // CHECK: [[POS1:%.+]] = add i[[SZ]] [[SCRATCHPAD]], [[OF]]
   // CHECK: [[POS2:%.+]] = sub i[[SZ]] [[POS1]], 1
-  // CHECK: [[POS3:%.+]] = sdiv i[[SZ]] [[POS2]], 256
+  // CHECK: [[POS3:%.+]] = sdiv nof i[[SZ]] [[POS2]], 256
   // CHECK: [[POS4:%.+]] = add i[[SZ]] [[POS3]], 1
   // CHECK: [[SCRATCHPAD_NEXT:%.+]] = mul i[[SZ]] [[POS4]], 256
   //
@@ -1024,7 +1024,7 @@
   // CHECK: [[OF:%.+]] = mul i[[SZ]] [[NUM_TEAMS]], 4
   // CHECK: [[POS1:%.+]] = add i[[SZ]] [[SCRATCHPAD]], [[OF]]
   // CHECK: [[POS2:%.+]] = sub i[[SZ]] [[POS1]], 1
-  // CHECK: [[POS3:%.+]] = sdiv i[[SZ]] [[POS2]], 256
+  // CHECK: [[POS3:%.+]] = sdiv nof i[[SZ]] [[POS2]], 256
   // CHECK: [[POS4:%.+]] = add i[[SZ]] [[POS3]], 1
   // CHECK: [[SCRATCHPAD_NEXT:%.+]] = mul i[[SZ]] [[POS4]], 256
   //
@@ -1072,7 +1072,7 @@
   // CHECK: [[OF:%.+]] = mul i[[SZ]] [[NUM_TEAMS]], 4
   // CHECK: [[POS1:%.+]] = add i[[SZ]] [[SCRATCHPAD]], [[OF]]
   // CHECK: [[POS2:%.+]] = sub i[[SZ]] [[POS1]], 1
-  // CHECK: [[POS3:%.+]] = sdiv i[[SZ]] [[POS2]], 256
+  // CHECK: [[POS3:%.+]] = sdiv nof i[[SZ]] [[POS2]], 256
   // CHECK: [[POS4:%.+]] = add i[[SZ]] [[POS3]], 1
   // CHECK: [[SCRATCHPAD_NEXT:%.+]] = mul i[[SZ]] [[POS4]], 256
   //
Index: test/OpenMP/for_linear_codegen.cpp
===================================================================
--- test/OpenMP/for_linear_codegen.cpp
+++ test/OpenMP/for_linear_codegen.cpp
@@ -180,7 +180,7 @@
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
     // LAMBDA: call void @__kmpc_for_static_fini(
     // LAMBDA: br i1
@@ -333,7 +333,7 @@
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // BLOCKS: call void @__kmpc_for_static_fini(
 // BLOCKS: br i1
@@ -447,7 +447,7 @@
 // CHECK-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // CHECK: call void @__kmpc_for_static_fini(
 // CHECK: br i1
Index: test/OpenMP/for_reduction_codegen.cpp
===================================================================
--- test/OpenMP/for_reduction_codegen.cpp
+++ test/OpenMP/for_reduction_codegen.cpp
@@ -550,7 +550,7 @@
 // CHECK: [[UB_CAST:%.+]] = ptrtoint i32* [[UB1_UP:%.+]] to i64
 // CHECK: [[LB_CAST:%.+]] = ptrtoint i32* [[LB1_0:%.+]] to i64
 // CHECK: [[DIFF:%.+]] = sub i64 [[UB_CAST]], [[LB_CAST]]
-// CHECK: [[SIZE_1:%.+]] = sdiv exact i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
+// CHECK: [[SIZE_1:%.+]] = sdiv exact nof i64 [[DIFF]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
 // CHECK: [[ARR_SIZE:%.+]] = add nuw i64 [[SIZE_1]], 1
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
@@ -747,7 +747,7 @@
 
 // CHECK: store i{{[0-9]+}}* [[GTID_ADDR]], i{{[0-9]+}}** [[GTID_ADDR_ADDR:%.+]],
 
-// CHECK: [[ARR_SIZE:%.+]] = udiv exact i64
+// CHECK: [[ARR_SIZE:%.+]] = udiv exact nof i64
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[ARR_PRIV:%.+]] = alloca i32, i64 [[ARR_SIZE]],
 
@@ -950,7 +950,7 @@
 // CHECK: [[START:%.+]] = ptrtoint i32* [[ARR_ORIG]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint i32* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
 // CHECK: [[ARR_PRIV_PTR:%.+]] = bitcast [1 x [2 x i32]]* [[ARR_PRIV]] to i32*
 // CHECK: [[ARR_PRIV:%.+]] = getelementptr i32, i32* [[ARR_PRIV_PTR]], i64 [[OFFSET]]
 
@@ -969,16 +969,16 @@
 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64
 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
-// CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[DIF:%.+]] = sdiv exact nof i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[VAR2_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
 // CHECK: [[LD:%.+]] = load [[S_FLOAT_TY]]**, [[S_FLOAT_TY]]*** [[VAR2_ORIG]],
 // CHECK: [[ORIG_START:%.+]] = load [[S_FLOAT_TY]]*, [[S_FLOAT_TY]]** [[LD]],
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[PSEUDO_VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV]], i64 [[OFFSET]]
 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
 // CHECK: store [[S_FLOAT_TY]]* [[PSEUDO_VAR2_PRIV]], [[S_FLOAT_TY]]** [[REF]]
@@ -1006,7 +1006,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[VAR2_PRIV_PTR:%.+]] = bitcast [1 x [6 x [[S_FLOAT_TY]]]]* [[VAR2_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
@@ -1035,7 +1035,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[VAR2_PRIV_PTR:%.+]] = bitcast [1 x [6 x [[S_FLOAT_TY]]]]* [[VAR2_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
@@ -1064,7 +1064,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[PSEUDO_VAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR2_PRIV]], i64 [[OFFSET]]
 // CHECK: store [[S_FLOAT_TY]]** [[REF:.+]], [[S_FLOAT_TY]]*** %
 // CHECK: store [[S_FLOAT_TY]]* [[PSEUDO_VAR2_PRIV]], [[S_FLOAT_TY]]** [[REF]]
@@ -1086,7 +1086,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[VVAR2_PRIV_PTR:%.+]] = bitcast [5 x [[S_FLOAT_TY]]]* [[VVAR2_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[VVAR2_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VVAR2_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: ret void
@@ -1113,7 +1113,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[VAR3_PRIV_PTR:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [4 x [[S_FLOAT_TY]]]*
@@ -1144,7 +1144,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[VAR3_PRIV_PTR:%.+]] = bitcast [2 x [[S_FLOAT_TY]]]* [[VAR3_PRIV]] to [[S_FLOAT_TY]]*
 // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [4 x [[S_FLOAT_TY]]]*
@@ -1167,16 +1167,16 @@
 // CHECK: [[LAST:%.+]] = ptrtoint [[S_FLOAT_TY]]* %{{.+}} to i64
 // CHECK: [[FIRST:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW:%.+]] to i64
 // CHECK: [[BYTE_DIF:%.+]] = sub i64 [[LAST]], [[FIRST]]
-// CHECK: [[DIF:%.+]] = sdiv exact i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[DIF:%.+]] = sdiv exact nof i64 [[BYTE_DIF]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[SIZE:%.+]] = add nuw i64 [[DIF]], 1
 // CHECK: call i8* @llvm.stacksave()
 // CHECK: [[VAR3_PRIV:%.+]] = alloca [[S_FLOAT_TY]], i64 [[SIZE]],
 // CHECK: [[VAR3_ORIG:%.+]] = load [4 x [[S_FLOAT_TY]]]*, [4 x [[S_FLOAT_TY]]]** [[VAR3_ORIG_ADDR]],
 // CHECK: [[ORIG_START:%.+]] = bitcast [4 x [[S_FLOAT_TY]]]* [[VAR3_ORIG]] to [[S_FLOAT_TY]]*
 // CHECK: [[START:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_FLOAT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (float* getelementptr (float, float* null, i32 1) to i64)
 // CHECK: [[PSEUDO_VAR3_PRIV:%.+]] = getelementptr [[S_FLOAT_TY]], [[S_FLOAT_TY]]* [[VAR3_PRIV]], i64 [[OFFSET]]
 // CHECK: [[VAR3_PRIV:%.+]] = bitcast [[S_FLOAT_TY]]* [[PSEUDO_VAR3_PRIV]] to [4 x [[S_FLOAT_TY]]]*
 
@@ -1462,7 +1462,7 @@
 // CHECK: [[START:%.+]] = ptrtoint [[S_INT_TY]]* [[ORIG_START]] to i64
 // CHECK: [[LOW_BOUND:%.+]] = ptrtoint [[S_INT_TY]]* [[LOW]] to i64
 // CHECK: [[OFFSET_BYTES:%.+]] = sub i64 [[START]], [[LOW_BOUND]]
-// CHECK: [[OFFSET:%.+]] = sdiv exact i64 [[OFFSET_BYTES]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
+// CHECK: [[OFFSET:%.+]] = sdiv exact nof i64 [[OFFSET_BYTES]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64)
 // CHECK: [[ARR_PRIV_PTR:%.+]] = bitcast [40 x [[S_INT_TY]]]* [[ARR_PRIV]] to [[S_INT_TY]]*
 // CHECK: [[PSEUDO_ARR_PRIV:%.+]] = getelementptr [[S_INT_TY]], [[S_INT_TY]]* [[ARR_PRIV_PTR]], i64 [[OFFSET]]
 // CHECK: [[ARR_PRIV:%.+]] = bitcast [[S_INT_TY]]* [[PSEUDO_ARR_PRIV]] to [42 x [[S_INT_TY]]]*
Index: test/OpenMP/parallel_firstprivate_codegen.cpp
===================================================================
--- test/OpenMP/parallel_firstprivate_codegen.cpp
+++ test/OpenMP/parallel_firstprivate_codegen.cpp
@@ -184,7 +184,7 @@
     // LAMBDA-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
     // LAMBDA-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
     // LAMBDA-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+    // LAMBDA-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
     // LAMBDA-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
     // LAMBDA-NEXT: ret void
 
@@ -307,7 +307,7 @@
 // BLOCKS-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // BLOCKS-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // BLOCKS-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// BLOCKS-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// BLOCKS-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // BLOCKS-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // BLOCKS-NEXT: ret void
 #else
@@ -436,7 +436,7 @@
 // CHECK-32-NEXT: store i{{[0-9]+}} [[DEC]], i{{[0-9]+}}* [[B_PRIV]],
 // CHECK-NEXT: [[C_PRIV:%.+]] = load i{{[0-9]+}}*, i{{[0-9]+}}** [[REFC]],
 // CHECK-NEXT: [[C_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[C_PRIV]],
-// CHECK-NEXT: [[DIV:%.+]] = sdiv i{{[0-9]+}} [[C_VAL]], 1
+// CHECK-NEXT: [[DIV:%.+]] = sdiv nof i{{[0-9]+}} [[C_VAL]], 1
 // CHECK-NEXT: store i{{[0-9]+}} [[DIV]], i{{[0-9]+}}* [[C_PRIV]],
 // CHECK-NEXT: [[E_PRIV:%.+]] = load [4 x i{{[0-9]+}}]*, [4 x i{{[0-9]+}}]** [[REFE]],
 // CHECK-NEXT: [[E_PRIV_2:%.+]] = getelementptr inbounds [4 x i{{[0-9]+}}], [4 x i{{[0-9]+}}]* [[E_PRIV]], i{{[0-9]+}} 0, i{{[0-9]+}} 2
Index: test/OpenMP/atomic_capture_codegen.cpp
===================================================================
--- test/OpenMP/atomic_capture_codegen.cpp
+++ test/OpenMP/atomic_capture_codegen.cpp
@@ -177,7 +177,7 @@
 // CHECK: br label %[[CONT:.+]]
 // CHECK: [[CONT]]
 // CHECK: [[EXPECTED:%.+]] = phi i64 [ [[X]], %{{.+}} ], [ [[OLD_X:%.+]], %[[CONT]] ]
-// CHECK: [[DESIRED:%.+]] = sdiv i64 [[EXPECTED]], [[EXPR]]
+// CHECK: [[DESIRED:%.+]] = sdiv nof i64 [[EXPECTED]], [[EXPR]]
 // CHECK: store i64 [[DESIRED]], i64* [[TEMP:%.+]],
 // CHECK: [[DESIRED:%.+]] = load i64, i64* [[TEMP]],
 // CHECK: [[RES:%.+]] = cmpxchg i64* [[X_ADDR]], i64 [[EXPECTED]], i64 [[DESIRED]] monotonic monotonic
Index: test/OpenMP/taskgroup_task_reduction_codegen.cpp
===================================================================
--- test/OpenMP/taskgroup_task_reduction_codegen.cpp
+++ test/OpenMP/taskgroup_task_reduction_codegen.cpp
@@ -120,7 +120,7 @@
 // CHECK-DAG:   [[TMP38]] = getelementptr inbounds [[T2]], [[T2]]* [[GEPVLA:%[^,]+]], i32 0, i32 0
 // CHECK-DAG:   [[GEPVLA]] = getelementptr inbounds [2 x [[T2]]], [2 x [[T2]]]* [[RD_IN2]], i64 0, i64
 // CHECK-DAG:   [[TMP40:%.+]] = mul nuw i64 [[VLA_SIZE]], 2
-// CHECK-DAG:   [[TMP41:%.+]] = udiv exact i64 [[TMP40]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
+// CHECK-DAG:   [[TMP41:%.+]] = udiv exact nof i64 [[TMP40]], ptrtoint (i16* getelementptr (i16, i16* null, i32 1) to i64)
 // CHECK-DAG:   [[TMP42:%.+]] = getelementptr inbounds [[T2]], [[T2]]* [[GEPVLA]], i32 0, i32 1
 // CHECK-DAG:   store i64 [[TMP40]], i64* [[TMP42]],
 // CHECK-DAG:   [[TMP43:%.+]] = getelementptr inbounds [[T2]], [[T2]]* [[GEPVLA]], i32 0, i32 2
Index: test/CodeGenCXX/vla.cpp
===================================================================
--- test/CodeGenCXX/vla.cpp
+++ test/CodeGenCXX/vla.cpp
@@ -84,7 +84,7 @@
   
   
   //CHECK: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_NUM_ELEMENTS_PRE]]
-  //CHECK-NEXT: [[VLA_NUM_ELEMENTS_POST:%.*]] = udiv i64 [[VLA_SIZEOF]], 4
+  //CHECK-NEXT: [[VLA_NUM_ELEMENTS_POST:%.*]] = udiv nof i64 [[VLA_SIZEOF]], 4
   //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds i32, i32* {{%.*}}, i64 [[VLA_NUM_ELEMENTS_POST]]
   //X64-NEXT: store i32* [[VLA_END_PTR]], i32** %__end
   //AMD-NEXT: store i32* [[VLA_END_PTR]], i32** [[END]]
@@ -116,7 +116,7 @@
   //CHECK: [[VLA_DIM1_X_DIM2:%.*]] = mul nuw i64 [[VLA_DIM1_PRE]], [[VLA_DIM2_PRE]]
   //CHECK-NEXT: [[VLA_SIZEOF:%.*]] = mul nuw i64 4, [[VLA_DIM1_X_DIM2]]
   //CHECK-NEXT: [[VLA_SIZEOF_DIM2:%.*]] = mul nuw i64 4, [[VLA_DIM2_PRE]]
-  //CHECK-NEXT: [[VLA_NUM_ELEMENTS:%.*]] = udiv i64 [[VLA_SIZEOF]], [[VLA_SIZEOF_DIM2]]
+  //CHECK-NEXT: [[VLA_NUM_ELEMENTS:%.*]] = udiv nof i64 [[VLA_SIZEOF]], [[VLA_SIZEOF_DIM2]]
   //CHECK-NEXT: [[VLA_END_INDEX:%.*]] = mul nsw i64 [[VLA_NUM_ELEMENTS]], [[VLA_DIM2_PRE]]
   //CHECK-NEXT: [[VLA_END_PTR:%.*]] = getelementptr inbounds i32, i32* {{%.*}}, i64 [[VLA_END_INDEX]]
   //X64-NEXT: store i32* [[VLA_END_PTR]], i32** %__end
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to