DavidEGrayson updated this revision to Diff 36117.
DavidEGrayson added a comment.

I changed the patch in two ways: it now returns ExprError() is the third 
argument is wrong (for consistency with the other argument checking code).  I 
removed an outdated comment for EncompassingIntegerType.

Thanks for reviewing this, John!  I think it is finally ready to be committed.  
Please let me know if I have to do anything else to get it committed.

This patch is still based on 248284, which is from 8 days ago.  Hopefully it 
can still be committed cleanly.


http://reviews.llvm.org/D12793

Files:
  docs/LanguageExtensions.rst
  include/clang/Basic/Builtins.def
  include/clang/Basic/DiagnosticSemaKinds.td
  lib/CodeGen/CGBuiltin.cpp
  lib/Sema/SemaChecking.cpp
  test/CodeGen/builtins-overflow-error.c
  test/CodeGen/builtins-overflow.c

Index: test/CodeGen/builtins-overflow.c
===================================================================
--- test/CodeGen/builtins-overflow.c
+++ test/CodeGen/builtins-overflow.c
@@ -11,7 +11,159 @@
 extern int IntErrorCode;
 extern long LongErrorCode;
 extern long long LongLongErrorCode;
+void overflowed(void);
 
+unsigned test_add_overflow_uint_uint_uint(unsigned x, unsigned y) {
+  // CHECK: @test_add_overflow_uint_uint_uint
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  unsigned r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+int test_add_overflow_int_int_int(int x, int y) {
+  // CHECK: @test_add_overflow_int_int_int
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  int r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+unsigned test_sub_overflow_uint_uint_uint(unsigned x, unsigned y) {
+  // CHECK: @test_sub_overflow_uint_uint_uint
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  unsigned r;
+  if (__builtin_sub_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+int test_sub_overflow_int_int_int(int x, int y) {
+  // CHECK: @test_sub_overflow_int_int_int
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  int r;
+  if (__builtin_sub_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+unsigned test_mul_overflow_uint_uint_uint(unsigned x, unsigned y) {
+  // CHECK: @test_mul_overflow_uint_uint_uint
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  unsigned r;
+  if (__builtin_mul_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+int test_mul_overflow_int_int_int(int x, int y) {
+  // CHECK: @test_mul_overflow_int_int_int
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  int r;
+  if (__builtin_mul_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+int test_add_overflow_uint_int_int(unsigned x, int y) {
+  // CHECK: @test_add_overflow_uint_int_int
+  // CHECK: [[XE:%.+]] = zext i32 %{{.+}} to i33
+  // CHECK: [[YE:%.+]] = sext i32 %{{.+}} to i33
+  // CHECK: [[S:%.+]] = call { i33, i1 } @llvm.sadd.with.overflow.i33(i33 [[XE]], i33 [[YE]])
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i33, i1 } [[S]], 0
+  // CHECK-DAG: [[C1:%.+]] = extractvalue { i33, i1 } [[S]], 1
+  // CHECK: [[QT:%.+]] = trunc i33 [[Q]] to i32
+  // CHECK: [[QTE:%.+]] = sext i32 [[QT]] to i33
+  // CHECK: [[C2:%.+]] = icmp ne i33 [[Q]], [[QTE]]
+  // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
+  // CHECK: store i32 [[QT]], i32* %r
+  // CHECK: br i1 [[C3]]
+  int r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+_Bool test_add_overflow_uint_uint_bool(unsigned x, unsigned y) {
+  // CHECK: @test_add_overflow_uint_uint_bool
+  // CHECK-NOT: ext
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK-DAG: [[C1:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK: [[QT:%.+]] = trunc i32 [[Q]] to i1
+  // CHECK: [[QTE:%.+]] = zext i1 [[QT]] to i32
+  // CHECK: [[C2:%.+]] = icmp ne i32 [[Q]], [[QTE]]
+  // CHECK: [[C3:%.+]] = or i1 [[C1]], [[C2]]
+  // CHECK: [[QT2:%.+]] = zext i1 [[QT]] to i8
+  // CHECK: store i8 [[QT2]], i8* %r
+  // CHECK: br i1 [[C3]]
+  _Bool r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+unsigned test_add_overflow_bool_bool_uint(_Bool x, _Bool y) {
+  // CHECK: @test_add_overflow_bool_bool_uint
+  // CHECK: [[XE:%.+]] = zext i1 %{{.+}} to i32
+  // CHECK: [[YE:%.+]] = zext i1 %{{.+}} to i32
+  // CHECK: [[S:%.+]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[XE]], i32 [[YE]])
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i32, i1 } [[S]], 0
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i32, i1 } [[S]], 1
+  // CHECK: store i32 [[Q]], i32* %r
+  // CHECK: br i1 [[C]]
+  unsigned r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
+_Bool test_add_overflow_bool_bool_bool(_Bool x, _Bool y) {
+  // CHECK: @test_add_overflow_bool_bool_bool
+  // CHECK: [[S:%.+]] = call { i1, i1 } @llvm.uadd.with.overflow.i1(i1 %{{.+}}, i1 %{{.+}})
+  // CHECK-DAG: [[Q:%.+]] = extractvalue { i1, i1 } [[S]], 0
+  // CHECK-DAG: [[C:%.+]] = extractvalue { i1, i1 } [[S]], 1
+  // CHECK: [[QT2:%.+]] = zext i1 [[Q]] to i8
+  // CHECK: store i8 [[QT2]], i8* %r
+  // CHECK: br i1 [[C]]
+  _Bool r;
+  if (__builtin_add_overflow(x, y, &r))
+    overflowed();
+  return r;
+}
+
 unsigned test_uadd_overflow(unsigned x, unsigned y) {
 // CHECK: @test_uadd_overflow
 // CHECK: %{{.+}} = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %{{.+}})
Index: test/CodeGen/builtins-overflow-error.c
===================================================================
--- test/CodeGen/builtins-overflow-error.c
+++ test/CodeGen/builtins-overflow-error.c
@@ -0,0 +1,17 @@
+// RUN: %clang_cc1 -Wall -Werror -fsyntax-only -verify %s
+
+void test(void) {
+  unsigned r;
+  const char * c;
+  float f;
+  const unsigned q;
+
+  __builtin_add_overflow();  // expected-error {{too few arguments to function call, expected 3, have 0}}
+  __builtin_add_overflow(1, 1, 1, 1);  // expected-error {{too many arguments to function call, expected 3, have 4}}
+
+  __builtin_add_overflow(c, 1, &r);  // expected-error {{operand argument to overflow builtin must be an integer ('const char *' invalid)}}
+  __builtin_add_overflow(1, c, &r);  // expected-error {{operand argument to overflow builtin must be an integer ('const char *' invalid)}}
+  __builtin_add_overflow(1, 1, 3);  // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('int' invalid)}}
+  __builtin_add_overflow(1, 1, &f);  // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('float *' invalid)}}
+  __builtin_add_overflow(1, 1, &q);  // expected-error {{result argument to overflow builtin must be a pointer to a non-const integer ('const unsigned int *' invalid)}}
+}
Index: lib/Sema/SemaChecking.cpp
===================================================================
--- lib/Sema/SemaChecking.cpp
+++ lib/Sema/SemaChecking.cpp
@@ -112,6 +112,37 @@
   return false;
 }
 
+static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall) {
+  if (checkArgCount(S, TheCall, 3))
+    return true;
+
+  // First two arguments should be integers.
+  for (unsigned I = 0; I < 2; ++I) {
+    Expr *Arg = TheCall->getArg(I);
+    QualType Ty = Arg->getType();
+    if (!Ty->isIntegerType()) {
+      S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_int)
+          << Ty << Arg->getSourceRange();
+      return true;
+    }
+  }
+
+  // Third argument should be a pointer to a non-const integer.
+  {
+    Expr *Arg = TheCall->getArg(2);
+    QualType Ty = Arg->getType();
+    const auto *PtrTy = Ty->getAs<PointerType>();
+    if (!(PtrTy && PtrTy->getPointeeType()->isIntegerType() &&
+          !PtrTy->getPointeeType().isConstQualified())) {
+      S.Diag(Arg->getLocStart(), diag::err_overflow_builtin_must_be_ptr_int)
+          << Ty << Arg->getSourceRange();
+      return true;
+    }
+  }
+
+  return false;
+}
+
 static void SemaBuiltinMemChkCall(Sema &S, FunctionDecl *FDecl,
 		                  CallExpr *TheCall, unsigned SizeIdx,
                                   unsigned DstSizeIdx) {
@@ -457,6 +488,12 @@
     if (SemaBuiltinAddressof(*this, TheCall))
       return ExprError();
     break;
+  case Builtin::BI__builtin_add_overflow:
+  case Builtin::BI__builtin_sub_overflow:
+  case Builtin::BI__builtin_mul_overflow:
+      if (SemaBuiltinOverflow(*this, TheCall))
+          return ExprError();
+      break;
   case Builtin::BI__builtin_operator_new:
   case Builtin::BI__builtin_operator_delete:
     if (!getLangOpts().CPlusPlus) {
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -278,6 +278,50 @@
   return CGF.Builder.CreateExtractValue(Tmp, 0);
 }
 
+namespace {
+  struct WidthAndSignedness {
+    unsigned Width;
+    bool Signed;
+  };
+}
+
+static WidthAndSignedness
+getIntegerWidthAndSignedness(const clang::ASTContext &context,
+                             const clang::QualType Type) {
+  assert(Type->isIntegerType() && "Given type is not an integer.");
+  unsigned Width = Type->isBooleanType() ? 1 : context.getTypeInfo(Type).Width;
+  bool Signed = Type->isSignedIntegerType();
+  return {Width, Signed};
+}
+
+// Given one or more integer types, this function produces an integer type that
+// encompasses them: any value in one of the given types could be expressed in
+// the encompassing type.
+static struct WidthAndSignedness
+EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
+  assert(Types.size() > 0 && "Empty list of types.");
+
+  // If any of the given types is signed, we must return a signed type.
+  bool Signed = false;
+  for (const auto &Type : Types) {
+    Signed |= Type.Signed;
+  }
+
+  // The encompassing type must have a width greater than or equal to the width
+  // of the specified types.  Aditionally, if the encompassing type is signed,
+  // its width must be strictly greater than the width of any unsigned types
+  // given.
+  unsigned Width = 0;
+  for (const auto &Type : Types) {
+    unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
+    if (Width < MinWidth) {
+      Width = MinWidth;
+    }
+  }
+
+  return {Width, Signed};
+}
+
 Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
   llvm::Type *DestType = Int8PtrTy;
   if (ArgValue->getType() != DestType)
@@ -1544,6 +1588,86 @@
     Builder.CreateStore(CarryOut, CarryOutPtr);
     return RValue::get(Sum2);
   }
+
+  case Builtin::BI__builtin_add_overflow:
+  case Builtin::BI__builtin_sub_overflow:
+  case Builtin::BI__builtin_mul_overflow: {
+    const clang::Expr *LeftArg = E->getArg(0);
+    const clang::Expr *RightArg = E->getArg(1);
+    const clang::Expr *ResultArg = E->getArg(2);
+
+    clang::QualType ResultQTy =
+        ResultArg->getType()->castAs<PointerType>()->getPointeeType();
+
+    WidthAndSignedness LeftInfo =
+        getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
+    WidthAndSignedness RightInfo =
+        getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
+    WidthAndSignedness ResultInfo =
+        getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
+    WidthAndSignedness EncompassingInfo =
+        EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
+
+    llvm::Type *EncompassingLLVMTy =
+        llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
+
+    llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
+
+    llvm::Intrinsic::ID IntrinsicId;
+    switch (BuiltinID) {
+    default:
+      llvm_unreachable("Unknown overflow builtin id.");
+    case Builtin::BI__builtin_add_overflow:
+      IntrinsicId = EncompassingInfo.Signed
+                        ? llvm::Intrinsic::sadd_with_overflow
+                        : llvm::Intrinsic::uadd_with_overflow;
+      break;
+    case Builtin::BI__builtin_sub_overflow:
+      IntrinsicId = EncompassingInfo.Signed
+                        ? llvm::Intrinsic::ssub_with_overflow
+                        : llvm::Intrinsic::usub_with_overflow;
+      break;
+    case Builtin::BI__builtin_mul_overflow:
+      IntrinsicId = EncompassingInfo.Signed
+                        ? llvm::Intrinsic::smul_with_overflow
+                        : llvm::Intrinsic::umul_with_overflow;
+      break;
+    }
+
+    llvm::Value *Left = EmitScalarExpr(LeftArg);
+    llvm::Value *Right = EmitScalarExpr(RightArg);
+    Address ResultPtr = EmitPointerWithAlignment(ResultArg);
+
+    // Extend each operand to the encompassing type.
+    Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
+    Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
+
+    // Perform the operation on the extended values.
+    llvm::Value *Overflow, *Result;
+    Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
+
+    if (EncompassingInfo.Width > ResultInfo.Width) {
+      // The encompassing type is wider than the result type, so we need to
+      // truncate it.
+      llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
+
+      // To see if the truncation caused an overflow, we will extend
+      // the result and then compare it to the original result.
+      llvm::Value *ResultTruncExt = Builder.CreateIntCast(
+          ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
+      llvm::Value *TruncationOverflow =
+          Builder.CreateICmpNE(Result, ResultTruncExt);
+
+      Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
+      Result = ResultTrunc;
+    }
+
+    // Finally, store the result using the pointer.
+    Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr);
+
+    return RValue::get(Overflow);
+  }
+
   case Builtin::BI__builtin_uadd_overflow:
   case Builtin::BI__builtin_uaddl_overflow:
   case Builtin::BI__builtin_uaddll_overflow:
@@ -1573,7 +1697,7 @@
     // Decide which of the overflow intrinsics we are lowering to:
     llvm::Intrinsic::ID IntrinsicId;
     switch (BuiltinID) {
-    default: llvm_unreachable("Unknown security overflow builtin id.");
+    default: llvm_unreachable("Unknown overflow builtin id.");
     case Builtin::BI__builtin_uadd_overflow:
     case Builtin::BI__builtin_uaddl_overflow:
     case Builtin::BI__builtin_uaddll_overflow:
Index: include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- include/clang/Basic/DiagnosticSemaKinds.td
+++ include/clang/Basic/DiagnosticSemaKinds.td
@@ -6207,6 +6207,12 @@
   "memory order argument to atomic operation is invalid">,
   InGroup<DiagGroup<"atomic-memory-ordering">>;
 
+def err_overflow_builtin_must_be_int : Error<
+  "operand argument to overflow builtin must be an integer (%0 invalid)">;
+def err_overflow_builtin_must_be_ptr_int : Error<
+  "result argument to overflow builtin must be a pointer "
+  "to a non-const integer (%0 invalid)">;
+
 def err_atomic_load_store_uses_lib : Error<
   "atomic %select{load|store}0 requires runtime support that is not "
   "available for this target">;
Index: include/clang/Basic/Builtins.def
===================================================================
--- include/clang/Basic/Builtins.def
+++ include/clang/Basic/Builtins.def
@@ -1217,6 +1217,9 @@
 BUILTIN(__builtin_subcll, "ULLiULLiCULLiCULLiCULLi*", "n")
 
 // Checked Arithmetic Builtins for Security.
+BUILTIN(__builtin_add_overflow, "v.", "nt")
+BUILTIN(__builtin_sub_overflow, "v.", "nt")
+BUILTIN(__builtin_mul_overflow, "v.", "nt")
 BUILTIN(__builtin_uadd_overflow, "bUiCUiCUi*", "n")
 BUILTIN(__builtin_uaddl_overflow, "bULiCULiCULi*", "n")
 BUILTIN(__builtin_uaddll_overflow, "bULLiCULLiCULLi*", "n")
Index: docs/LanguageExtensions.rst
===================================================================
--- docs/LanguageExtensions.rst
+++ docs/LanguageExtensions.rst
@@ -1679,7 +1679,7 @@
   errorcode_t security_critical_application(...) {
     unsigned x, y, result;
     ...
-    if (__builtin_umul_overflow(x, y, &result))
+    if (__builtin_mul_overflow(x, y, &result))
       return kErrorCodeHackers;
     ...
     use_multiply(result);
@@ -1686,10 +1686,13 @@
     ...
   }
 
-A complete enumeration of the builtins are:
+Clang provides the following checked arithmetic builtins:
 
 .. code-block:: c
 
+  bool __builtin_add_overflow   (type1 x, type2 y, type3 *sum);
+  bool __builtin_sub_overflow   (type1 x, type2 y, type3 *diff);
+  bool __builtin_mul_overflow   (type1 x, type2 y, type3 *prod);
   bool __builtin_uadd_overflow  (unsigned x, unsigned y, unsigned *sum);
   bool __builtin_uaddl_overflow (unsigned long x, unsigned long y, unsigned long *sum);
   bool __builtin_uaddll_overflow(unsigned long long x, unsigned long long y, unsigned long long *sum);
@@ -1709,7 +1712,17 @@
   bool __builtin_smull_overflow (long x, long y, long *prod);
   bool __builtin_smulll_overflow(long long x, long long y, long long *prod);
 
+Each builtin performs the specified mathematical operation on the
+first two arguments and stores the result in the third argument.  If
+possible, the result will be equal to mathematically-correct result
+and the builtin will return 0.  Otherwise, the builtin will return
+1 and the result will be equal to the unique value that is equivalent
+to the mathematically-correct result modulo two raised to the *k*
+power, where *k* is the number of bits in the result type.  The
+behavior of these builtins is well-defined for all argument values.
 
+The first three builtins can operate on any integer types (including booleans).
+
 .. _langext-__c11_atomic:
 
 __c11_atomic builtins
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to