erik.pilkington updated this revision to Diff 204609.
erik.pilkington marked 46 inline comments as done.
erik.pilkington added a comment.

Address review comments.


CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D62825/new/

https://reviews.llvm.org/D62825

Files:
  clang/include/clang-c/Index.h
  clang/include/clang/AST/ExprCXX.h
  clang/include/clang/AST/RecursiveASTVisitor.h
  clang/include/clang/Basic/DiagnosticASTKinds.td
  clang/include/clang/Basic/DiagnosticSemaKinds.td
  clang/include/clang/Basic/StmtNodes.td
  clang/include/clang/Basic/TokenKinds.def
  clang/include/clang/Parse/Parser.h
  clang/include/clang/Sema/Sema.h
  clang/lib/AST/Expr.cpp
  clang/lib/AST/ExprClassification.cpp
  clang/lib/AST/ExprConstant.cpp
  clang/lib/AST/ItaniumMangle.cpp
  clang/lib/AST/StmtPrinter.cpp
  clang/lib/AST/StmtProfile.cpp
  clang/lib/CodeGen/CGExpr.cpp
  clang/lib/CodeGen/CGExprAgg.cpp
  clang/lib/CodeGen/CGExprComplex.cpp
  clang/lib/CodeGen/CGExprScalar.cpp
  clang/lib/Lex/PPMacroExpansion.cpp
  clang/lib/Parse/ParseExpr.cpp
  clang/lib/Parse/ParseExprCXX.cpp
  clang/lib/Sema/SemaCast.cpp
  clang/lib/Sema/SemaExceptionSpec.cpp
  clang/lib/Sema/TreeTransform.h
  clang/lib/Serialization/ASTReaderStmt.cpp
  clang/lib/Serialization/ASTWriterStmt.cpp
  clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
  clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp
  clang/test/CodeGenCXX/builtin-bit-cast.cpp
  clang/test/SemaCXX/builtin-bit-cast.cpp
  clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
  clang/tools/libclang/CIndex.cpp
  clang/tools/libclang/CXCursor.cpp
  llvm/include/llvm/ADT/APInt.h
  llvm/lib/ExecutionEngine/ExecutionEngine.cpp
  llvm/lib/Support/APInt.cpp

Index: llvm/lib/Support/APInt.cpp
===================================================================
--- llvm/lib/Support/APInt.cpp
+++ llvm/lib/Support/APInt.cpp
@@ -2929,3 +2929,56 @@
   LLVM_DEBUG(dbgs() << __func__ << ": solution (wrap): " << X << '\n');
   return X;
 }
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+void llvm::StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
+                            unsigned StoreBytes) {
+  assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
+  const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
+
+  if (sys::IsLittleEndianHost) {
+    // Little-endian host - the source is ordered from LSB to MSB.  Order the
+    // destination from LSB to MSB: Do a straight copy.
+    memcpy(Dst, Src, StoreBytes);
+  } else {
+    // Big-endian host - the source is an array of 64 bit words ordered from
+    // LSW to MSW.  Each word is ordered from MSB to LSB.  Order the destination
+    // from MSB to LSB: Reverse the word order, but not the bytes in a word.
+    while (StoreBytes > sizeof(uint64_t)) {
+      StoreBytes -= sizeof(uint64_t);
+      // May not be aligned so use memcpy.
+      memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
+      Src += sizeof(uint64_t);
+    }
+
+    memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
+  }
+}
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+void llvm::LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
+  assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
+  uint8_t *Dst = reinterpret_cast<uint8_t *>(
+                   const_cast<uint64_t *>(IntVal.getRawData()));
+
+  if (sys::IsLittleEndianHost)
+    // Little-endian host - the destination must be ordered from LSB to MSB.
+    // The source is ordered from LSB to MSB: Do a straight copy.
+    memcpy(Dst, Src, LoadBytes);
+  else {
+    // Big-endian - the destination is an array of 64 bit words ordered from
+    // LSW to MSW.  Each word must be ordered from MSB to LSB.  The source is
+    // ordered from MSB to LSB: Reverse the word order, but not the bytes in
+    // a word.
+    while (LoadBytes > sizeof(uint64_t)) {
+      LoadBytes -= sizeof(uint64_t);
+      // May not be aligned so use memcpy.
+      memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
+      Dst += sizeof(uint64_t);
+    }
+
+    memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
+  }
+}
Index: llvm/lib/ExecutionEngine/ExecutionEngine.cpp
===================================================================
--- llvm/lib/ExecutionEngine/ExecutionEngine.cpp
+++ llvm/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -1019,32 +1019,6 @@
   return Result;
 }
 
-/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
-/// with the integer held in IntVal.
-static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
-                             unsigned StoreBytes) {
-  assert((IntVal.getBitWidth()+7)/8 >= StoreBytes && "Integer too small!");
-  const uint8_t *Src = (const uint8_t *)IntVal.getRawData();
-
-  if (sys::IsLittleEndianHost) {
-    // Little-endian host - the source is ordered from LSB to MSB.  Order the
-    // destination from LSB to MSB: Do a straight copy.
-    memcpy(Dst, Src, StoreBytes);
-  } else {
-    // Big-endian host - the source is an array of 64 bit words ordered from
-    // LSW to MSW.  Each word is ordered from MSB to LSB.  Order the destination
-    // from MSB to LSB: Reverse the word order, but not the bytes in a word.
-    while (StoreBytes > sizeof(uint64_t)) {
-      StoreBytes -= sizeof(uint64_t);
-      // May not be aligned so use memcpy.
-      memcpy(Dst + StoreBytes, Src, sizeof(uint64_t));
-      Src += sizeof(uint64_t);
-    }
-
-    memcpy(Dst, Src + sizeof(uint64_t) - StoreBytes, StoreBytes);
-  }
-}
-
 void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
                                          GenericValue *Ptr, Type *Ty) {
   const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
@@ -1092,33 +1066,6 @@
     std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
 }
 
-/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
-/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
-static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
-  assert((IntVal.getBitWidth()+7)/8 >= LoadBytes && "Integer too small!");
-  uint8_t *Dst = reinterpret_cast<uint8_t *>(
-                   const_cast<uint64_t *>(IntVal.getRawData()));
-
-  if (sys::IsLittleEndianHost)
-    // Little-endian host - the destination must be ordered from LSB to MSB.
-    // The source is ordered from LSB to MSB: Do a straight copy.
-    memcpy(Dst, Src, LoadBytes);
-  else {
-    // Big-endian - the destination is an array of 64 bit words ordered from
-    // LSW to MSW.  Each word must be ordered from MSB to LSB.  The source is
-    // ordered from MSB to LSB: Reverse the word order, but not the bytes in
-    // a word.
-    while (LoadBytes > sizeof(uint64_t)) {
-      LoadBytes -= sizeof(uint64_t);
-      // May not be aligned so use memcpy.
-      memcpy(Dst, Src + LoadBytes, sizeof(uint64_t));
-      Dst += sizeof(uint64_t);
-    }
-
-    memcpy(Dst + sizeof(uint64_t) - LoadBytes, Src, LoadBytes);
-  }
-}
-
 /// FIXME: document
 ///
 void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
Index: llvm/include/llvm/ADT/APInt.h
===================================================================
--- llvm/include/llvm/ADT/APInt.h
+++ llvm/include/llvm/ADT/APInt.h
@@ -2212,6 +2212,15 @@
 // See friend declaration above. This additional declaration is required in
 // order to compile LLVM with IBM xlC compiler.
 hash_code hash_value(const APInt &Arg);
-} // End of llvm namespace
+
+/// StoreIntToMemory - Fills the StoreBytes bytes of memory starting from Dst
+/// with the integer held in IntVal.
+void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst, unsigned StoreBytes);
+
+/// LoadIntFromMemory - Loads the integer stored in the LoadBytes bytes starting
+/// from Src into IntVal, which is assumed to be wide enough and to hold zero.
+void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes);
+
+} // namespace llvm
 
 #endif
Index: clang/tools/libclang/CXCursor.cpp
===================================================================
--- clang/tools/libclang/CXCursor.cpp
+++ clang/tools/libclang/CXCursor.cpp
@@ -716,6 +716,8 @@
   case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
     K = CXCursor_OMPTargetTeamsDistributeSimdDirective;
     break;
+  case Stmt::BuiltinBitCastExprClass:
+    K = CXCursor_BuiltinBitCastExpr;
   }
 
   CXCursor C = { K, 0, { Parent, S, TU } };
Index: clang/tools/libclang/CIndex.cpp
===================================================================
--- clang/tools/libclang/CIndex.cpp
+++ clang/tools/libclang/CIndex.cpp
@@ -5190,6 +5190,8 @@
       return cxstring::createRef("CallExpr");
   case CXCursor_ObjCMessageExpr:
       return cxstring::createRef("ObjCMessageExpr");
+  case CXCursor_BuiltinBitCastExpr:
+    return cxstring::createRef("BuiltinBitCastExpr");
   case CXCursor_UnexposedStmt:
       return cxstring::createRef("UnexposedStmt");
   case CXCursor_DeclStmt:
Index: clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/SemaCXX/constexpr-builtin-bit-cast.cpp
@@ -0,0 +1,275 @@
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple aarch64_be-linux-gnu %s
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#  define LITTLE_END 1
+#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#  define LITTLE_END 0
+#else
+#  error "huh?"
+#endif
+
+template <class T, class V> struct is_same {
+  static constexpr bool value = false;
+};
+template <class T> struct is_same<T, T> {
+  static constexpr bool value = true;
+};
+
+static_assert(sizeof(int) == 4);
+static_assert(sizeof(long long) == 8);
+
+template <class To, class From>
+constexpr To bit_cast(const From &from) {
+  static_assert(sizeof(To) == sizeof(From));
+  static_assert(!is_same<To, From>::value);
+  return __builtin_bit_cast(To, from);
+}
+
+template <class Intermediate, class Init>
+constexpr int round_trip(const Init &init) {
+  return bit_cast<Init>(bit_cast<Intermediate>(init)) == init;
+}
+
+void test_int() {
+  static_assert(round_trip<unsigned>((int)-1));
+  static_assert(round_trip<unsigned>((int)0x12345678));
+  static_assert(round_trip<unsigned>((int)0x87654321));
+  static_assert(round_trip<unsigned>((int)0x0C05FEFE));
+}
+
+void test_array() {
+  constexpr unsigned char input[] = {0xCA, 0xFE, 0xBA, 0xBE};
+  constexpr unsigned expected = LITTLE_END ? 0xBEBAFECA : 0xCAFEBABE;
+  static_assert(bit_cast<unsigned>(input) == expected);
+}
+
+void test_record() {
+  struct int_splicer {
+    unsigned x;
+    unsigned y;
+
+    constexpr bool operator==(const int_splicer &other) const {
+      return other.x == x && other.y == y;
+    }
+  };
+
+  constexpr int_splicer splice{0x0C05FEFE, 0xCAFEBABE};
+
+  static_assert(bit_cast<unsigned long long>(splice) == LITTLE_END
+                ? 0xCAFEBABE0C05FEFE
+                : 0x0C05FEFECAFEBABE);
+
+  static_assert(bit_cast<int_splicer>(0xCAFEBABE0C05FEFE).x == LITTLE_END
+                ? 0x0C05FEFE
+                : 0xCAFEBABE);
+
+  static_assert(round_trip<unsigned long long>(splice));
+  static_assert(round_trip<long long>(splice));
+
+  struct base2 {
+  };
+
+  struct base3 {
+    unsigned z;
+  };
+
+  struct bases : int_splicer, base2, base3 {
+    unsigned doublez;
+  };
+
+  struct tuple4 {
+    unsigned x, y, z, doublez;
+
+    constexpr bool operator==(tuple4 const &other) const {
+      return x == other.x && y == other.y &&
+             z == other.z && doublez == other.doublez;
+    }
+  };
+  constexpr bases b = {{1, 2}, {}, {3}, 4};
+  constexpr tuple4 t4 = bit_cast<tuple4>(b);
+  static_assert(t4 == tuple4{1, 2, 3, 4});
+  static_assert(round_trip<tuple4>(b));
+}
+
+void test_partially_initialized() {
+  struct pad {
+    char x;
+    int y;
+  };
+
+  struct no_pad {
+    char x;
+    char p1, p2, p3; // expected-note {{subobject declared here}}
+    int y;
+  };
+
+  static_assert(sizeof(pad) == sizeof(no_pad));
+
+  constexpr pad pir{4, 4};
+  constexpr int piw = bit_cast<no_pad>(pir).x;
+  static_assert(piw == 4);
+
+  // expected-error@+2 {{constexpr variable 'bad' must be initialized by a constant expression}}
+  // expected-note@+1 {{subobject of type 'char' is not initialized}}
+  constexpr no_pad bad = bit_cast<no_pad>(pir);
+
+  constexpr pad fine = bit_cast<pad>(no_pad{1, 2, 3, 4, 5});
+  static_assert(fine.x == 1 && fine.y == 5);
+}
+
+void no_bitfields() {
+  // FIXME!
+  struct S {
+    unsigned char x : 8;
+  };
+
+  struct G {
+    unsigned char x : 8;
+  };
+
+  constexpr S s{0};
+  // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
+  // expected-note@+1 {{constexpr bit_cast involving bit-field is not yet supported}}
+  constexpr G g = __builtin_bit_cast(G, s);
+}
+
+void array_members() {
+  struct S {
+    int ar[3];
+
+    constexpr bool operator==(const S &rhs) {
+      return ar[0] == rhs.ar[0] && ar[1] == rhs.ar[1] && ar[2] == rhs.ar[2];
+    }
+  };
+
+  struct G {
+    int a, b, c;
+
+    constexpr bool operator==(const G &rhs) {
+      return a == rhs.a && b == rhs.b && c == rhs.c;
+    }
+  };
+
+  constexpr S s{{1, 2, 3}};
+  constexpr G g = bit_cast<G>(s);
+  static_assert(g.a == 1 && g.b == 2 && g.c == 3);
+
+  static_assert(round_trip<G>(s));
+  static_assert(round_trip<S>(g));
+}
+
+void bad_types() {
+  union X {
+    int x;
+  };
+
+  struct G {
+    int g;
+  };
+  // expected-error@+2 {{constexpr variable 'g' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a union type 'X'}}
+  constexpr G g = __builtin_bit_cast(G, X{0});
+  // expected-error@+2 {{constexpr variable 'x' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a union type 'X'}}
+  constexpr X x = __builtin_bit_cast(X, G{0});
+
+  struct has_pointer {
+    // expected-note@+1 2 {{invalid type 'int *' is a member of 'has_pointer'}}
+    int *ptr;
+  };
+
+  // expected-error@+2 {{constexpr variable 'ptr' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr unsigned long ptr = __builtin_bit_cast(unsigned long, has_pointer{0});
+  // expected-error@+2 {{constexpr variable 'hptr' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr has_pointer hptr =  __builtin_bit_cast(has_pointer, 0ul);
+}
+
+void backtrace() {
+  struct A {
+    // expected-note@+1 {{invalid type 'int *' is a member of 'A'}}
+    int *ptr;
+  };
+
+  struct B {
+    // expected-note@+1 {{invalid type 'A [10]' is a member of 'B'}}
+    A as[10];
+  };
+
+  // expected-note@+1 {{invalid type 'B' is a base of 'C'}}
+  struct C : B {
+  };
+
+  struct E {
+    unsigned long ar[10];
+  };
+
+  // expected-error@+2 {{constexpr variable 'e' must be initialized by a constant expression}}
+  // expected-note@+1 {{cannot constexpr evaluate a bit_cast with a pointer type 'int *'}}
+  constexpr E e = __builtin_bit_cast(E, C{});
+}
+
+void test_array_fill() {
+  constexpr unsigned char a[4] = {1, 2};
+  constexpr unsigned int i = bit_cast<unsigned int>(a);
+  static_assert(i == LITTLE_END ? 0x00000201 : 0x01020000, "");
+}
+
+typedef decltype(nullptr) nullptr_t;
+
+constexpr int test_from_nullptr() {
+  unsigned long ul = __builtin_bit_cast(unsigned long, nullptr);
+  (void)ul;
+
+  // expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}}
+  auto read = ul;
+
+  return 0;
+}
+
+// expected-error@+2 {{constexpr variable 'tfn' must be initialized by a constant expression}}
+// expected-note@+1 {{in call to 'test_from_nullptr()'}}
+constexpr int tfn = test_from_nullptr();
+
+constexpr int test_to_nullptr() {
+  nullptr_t npt = __builtin_bit_cast(nullptr_t, 0ul);
+  void *ptr = npt;
+  return 0;
+}
+
+constexpr int ttn = test_to_nullptr();
+
+constexpr int test_indeterminate(bool read_indet) {
+  struct pad {
+    char a;
+    int b;
+  };
+
+  struct no_pad {
+    char a, p1, p2, p3;
+    int b;
+  };
+
+  pad p{1, 2};
+  no_pad np = bit_cast<no_pad>(p);
+
+  int tmp = np.a + np.b;
+
+  char& indet_ref = np.p1;
+
+  if (read_indet) {
+    // expected-note@+1 {{read of uninitialized object is not allowed in a constant expression}}
+    tmp = indet_ref;
+  }
+
+  indet_ref = 0;
+
+  return 0;
+}
+
+constexpr int run_test_indeterminate = test_indeterminate(false);
+// expected-error@+2 {{constexpr variable 'run_test_indeterminate2' must be initialized by a constant expression}}
+// expected-note@+1 {{in call to 'test_indeterminate(true)'}}
+constexpr int run_test_indeterminate2 = test_indeterminate(true);
Index: clang/test/SemaCXX/builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/SemaCXX/builtin-bit-cast.cpp
@@ -0,0 +1,35 @@
+// RUN: %clang_cc1 -verify -std=c++2a -fsyntax-only -triple x86_64-apple-macosx10.14.0 %s
+
+#if !__has_builtin(__builtin_bit_cast)
+#error
+#endif
+
+template <class T, T v>
+T instantiate() {
+  return __builtin_bit_cast(T, v);
+}
+
+int x = instantiate<int, 32>();
+
+struct secret_ctor {
+  char member;
+
+private: secret_ctor() = default;
+};
+
+void test1() {
+  secret_ctor c = __builtin_bit_cast(secret_ctor, (char)0);
+}
+
+void test2() {
+  constexpr int i = 0;
+  // expected-error@+1{{__builtin_bit_cast source size does not equal destination size (4 and 1)}}
+  constexpr char c = __builtin_bit_cast(char, i);
+}
+
+struct not_trivially_copyable {
+  virtual void foo() {}
+};
+
+// expected-error@+1{{__builtin_bit_cast source type must be trivially copyable}}
+constexpr unsigned long ul = __builtin_bit_cast(unsigned long, not_trivially_copyable{});
Index: clang/test/CodeGenCXX/builtin-bit-cast.cpp
===================================================================
--- /dev/null
+++ clang/test/CodeGenCXX/builtin-bit-cast.cpp
@@ -0,0 +1,112 @@
+// RUN: %clang_cc1 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
+
+void test_scalar() {
+  // CHECK-LABEL: define void @_Z11test_scalarv
+  __builtin_bit_cast(float, 42);
+
+  // CHECK: [[TEMP:%.*]] = alloca i32, align 4
+  // CHECK-NEXT: store i32 42, i32* [[TEMP]], align 4
+  // CHECK-NEXT: [[CASTED:%.*]] = bitcast i32* [[TEMP]] to float*
+  // CHECK-NEXT: load float, float* [[CASTED]], align 4
+}
+
+struct two_ints {
+  int x;
+  int y;
+};
+
+void test_aggregate_to_scalar() {
+  // CHECK-LABEL: define void @_Z24test_aggregate_to_scalarv
+  __builtin_bit_cast(unsigned long, two_ints{11, 12});
+
+  // CHECK: [[TEMP:%.*]] = alloca %struct.two_ints, align 8
+  // CHECK: store i32
+  // CHECK: store i32
+  // CHECK-NEXT: [[CASTED_PTR:%.*]] = bitcast %struct.two_ints* [[TEMP]] to i64*
+  // CHECK-NEXT: load i64, i64* [[CASTED_PTR]], align 8
+}
+
+void test_aggregate_to_scalar_lv() {
+  // CHECK-LABEL: define void @_Z27test_aggregate_to_scalar_lvv
+  two_ints lv{11, 12};
+  __builtin_bit_cast(unsigned long, lv);
+
+  // CHECK: [[LV:%.*]] = alloca %struct.two_ints, align 4
+  // CHECK: [[TEMP:%.*]] = alloca %struct.two_ints, align 8
+  // CHECK: [[CAST_TEMP:%.*]] = bitcast %struct.two_ints* [[TEMP]] to i8*
+  // CHECK: [[CAST_LV:%.*]] = bitcast %struct.two_ints* [[LV]] to i8*
+  // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[CAST_TEMP]], i8* align 4 [[CAST_LV]]
+  // CHECK: [[TEMP_I64:%.*]] = bitcast %struct.two_ints* [[TEMP]] to i64*
+  // CHECK: load i64, i64* [[TEMP_I64]], align 8
+}
+
+struct two_floats {
+  float x;
+  float y;
+};
+
+void test_aggregate_record() {
+  // CHECK-LABEL: define void @_Z21test_aggregate_recordv
+  two_floats tf = __builtin_bit_cast(two_floats, two_ints{1, 2});
+
+  // CHECK: [[TF:%.*]] = alloca %struct.two_floats, align 4
+  // CHECK-NEXT: [[TFTMP:%.*]] = bitcast %struct.two_floats* [[TF]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TFTMP]],
+}
+
+void test_aggregate_array() {
+  // CHECK-LABEL: define void @_Z20test_aggregate_arrayv
+  two_floats tf = __builtin_bit_cast(two_floats, (int [2]) {42, 32});
+
+  // CHECK: [[TF:%.*]] = alloca %struct.two_floats, align 4
+  // CHECK-NEXT: [[TFTMP:%.*]] = bitcast %struct.two_floats* [[TF]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TFTMP]],
+}
+
+void test_scalar_to_aggregate() {
+  // CHECK-LABEL: define void @_Z24test_scalar_to_aggregatev
+  two_ints ti = __builtin_bit_cast(two_ints, 24ul);
+
+  // CHECK: [[TI:%.*]] = alloca %struct.two_ints, align 4
+  // CHECK-NEXT: [[TITMP:%.*]] = bitcast %struct.two_ints* [[TI]] to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TITMP]]
+}
+
+void test_complex() {
+  // CHECK-LABEL: define void @_Z12test_complexv
+  using complex_t = _Complex unsigned;
+  __builtin_bit_cast(unsigned long, complex_t(43));
+
+  // CHECK: [[TEMP:%.*]] = alloca { i32, i32 }, align 8
+  // ... store complex 43 into temp ...
+  // CHECK: [[CASTED:%.*]] = bitcast { i32, i32 }* [[TEMP]] to i64*
+  // CHECK-NEXT: load i64, i64* [[CASTED]]
+}
+
+void test_to_complex() {
+  // CHECK-LABEL: define void @_Z15test_to_complexv
+
+  using complex_t = _Complex unsigned;
+  auto ul = __builtin_bit_cast(complex_t, 32ul);
+
+  // CHECK: [[UL:%.*]] = alloca { i32, i32 }
+  // CHECK-NEXT: [[TEMP:%.*]] = alloca i64, align 8
+  // CHECK-NEXT: store i64 32, i64* [[TEMP]], align 8
+  // CHECK-NEXT: [[CASTED:%.*]] = bitcast i64* [[TEMP]] to { i32, i32 }*
+  // CHECK: load
+  // CHECK: load
+}
+
+void test_array(int (&ary)[2]) {
+  // CHECK-LABEL: define void @_Z10test_arrayRA2_i
+  __builtin_bit_cast(unsigned long, ary);
+
+  // CHECK: [[ARY_PTR:%.*]] = alloca [2 x i32]*, align 8
+  // CHECK-NEXT: [[TEMP]] = alloca [2 x i32], align 8
+  // store ary into ARY_PTR
+  // CHECK: [[MEMCPY_DEST:%.*]] = bitcast [2 x i32]* [[TEMP]] to i8*
+  // CHECK-NEXT: [[MEMCPY_SRC:%.*]] = bitcast [2 x i32]* {{.*}} to i8*
+  // CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[MEMCPY_DEST]], i8* align 4 [[MEMCPY_SRC]]
+  // CHECK-NEXT: [[CAST:%.*]] = bitcast [2 x i32]* [[TEMP]] to i64*
+  // CHECK-NEXT: load i64, i64* [[CAST]]
+}
Index: clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp
===================================================================
--- /dev/null
+++ clang/test/CodeGenCXX/builtin-bit-cast-no-tbaa.cpp
@@ -0,0 +1,19 @@
+// RUN: %clang_cc1 -O3 -std=c++2a -S -emit-llvm -o - -disable-llvm-passes -triple x86_64-apple-macos10.14 %s | FileCheck %s
+
+void test_scalar() {
+  // CHECK-LABEL: define void @_Z11test_scalarv
+  __builtin_bit_cast(float, 42);
+
+  // CHECK: load float, float* {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA:.*]]
+}
+
+void test_scalar2() {
+  // CHECK-LABEL: define void @_Z12test_scalar2v
+  struct S {int m;};
+  __builtin_bit_cast(int, S{42});
+
+  // CHECK: load i32, i32* {{.*}}, align 4, !tbaa ![[MAY_ALIAS_TBAA]]
+}
+
+// CHECK: ![[CHAR_TBAA:.*]] = !{!"omnipotent char", {{.*}}, i64 0}
+// CHECK: ![[MAY_ALIAS_TBAA]] = !{![[CHAR_TBAA]], ![[CHAR_TBAA]], i64 0}
Index: clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
===================================================================
--- clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1694,6 +1694,7 @@
     case Stmt::CXXReinterpretCastExprClass:
     case Stmt::CXXConstCastExprClass:
     case Stmt::CXXFunctionalCastExprClass:
+    case Stmt::BuiltinBitCastExprClass:
     case Stmt::ObjCBridgedCastExprClass: {
       Bldr.takeNodes(Pred);
       const auto *C = cast<CastExpr>(S);
Index: clang/lib/Serialization/ASTWriterStmt.cpp
===================================================================
--- clang/lib/Serialization/ASTWriterStmt.cpp
+++ clang/lib/Serialization/ASTWriterStmt.cpp
@@ -1438,6 +1438,12 @@
   Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
 }
 
+void ASTStmtWriter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+  VisitExplicitCastExpr(E);
+  Record.AddSourceLocation(E->getBeginLoc());
+  Record.AddSourceLocation(E->getEndLoc());
+}
+
 void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
   VisitCallExpr(E);
   Record.AddSourceLocation(E->UDSuffixLoc);
Index: clang/lib/Serialization/ASTReaderStmt.cpp
===================================================================
--- clang/lib/Serialization/ASTReaderStmt.cpp
+++ clang/lib/Serialization/ASTReaderStmt.cpp
@@ -1495,6 +1495,12 @@
   E->setRParenLoc(ReadSourceLocation());
 }
 
+void ASTStmtReader::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *E) {
+  VisitExplicitCastExpr(E);
+  E->KWLoc = ReadSourceLocation();
+  E->RParenLoc = ReadSourceLocation();
+}
+
 void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
   VisitCallExpr(E);
   E->UDSuffixLoc = ReadSourceLocation();
Index: clang/lib/Sema/TreeTransform.h
===================================================================
--- clang/lib/Sema/TreeTransform.h
+++ clang/lib/Sema/TreeTransform.h
@@ -2652,6 +2652,16 @@
                                                ListInitialization);
   }
 
+  /// Build a new C++ __builtin_bit_cast expression.
+  ///
+  /// By default, performs semantic analysis to build the new expression.
+  /// Subclasses may override this routine to provide different behavior.
+  ExprResult RebuildBuiltinBitCastExpr(SourceLocation KWLoc,
+                                       TypeSourceInfo *TSI, Expr *Sub,
+                                       SourceLocation RParenLoc) {
+    return getSema().BuildBuiltinBitCastExpr(KWLoc, TSI, Sub, RParenLoc);
+  }
+
   /// Build a new C++ typeid(type) expression.
   ///
   /// By default, performs semantic analysis to build the new expression.
@@ -10247,6 +10257,22 @@
       E->getAngleBrackets().getEnd(), SubExpr.get(), E->getRParenLoc());
 }
 
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBuiltinBitCastExpr(BuiltinBitCastExpr *BCE) {
+  TypeSourceInfo *TSI =
+      getDerived().TransformType(BCE->getTypeInfoAsWritten());
+  if (!TSI)
+    return ExprError();
+
+  ExprResult Sub = getDerived().TransformExpr(BCE->getSubExpr());
+  if (Sub.isInvalid())
+    return ExprError();
+
+  return getDerived().RebuildBuiltinBitCastExpr(BCE->getBeginLoc(), TSI,
+                                                Sub.get(), BCE->getEndLoc());
+}
+
 template<typename Derived>
 ExprResult
 TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
Index: clang/lib/Sema/SemaExceptionSpec.cpp
===================================================================
--- clang/lib/Sema/SemaExceptionSpec.cpp
+++ clang/lib/Sema/SemaExceptionSpec.cpp
@@ -1201,6 +1201,7 @@
   case Expr::CoyieldExprClass:
   case Expr::CXXConstCastExprClass:
   case Expr::CXXReinterpretCastExprClass:
+  case Expr::BuiltinBitCastExprClass:
   case Expr::CXXStdInitializerListExprClass:
   case Expr::DesignatedInitExprClass:
   case Expr::DesignatedInitUpdateExprClass:
Index: clang/lib/Sema/SemaCast.cpp
===================================================================
--- clang/lib/Sema/SemaCast.cpp
+++ clang/lib/Sema/SemaCast.cpp
@@ -87,6 +87,7 @@
     void CheckDynamicCast();
     void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
     void CheckCStyleCast();
+    void CheckBuiltinBitCast();
 
     void updatePartOfExplicitCastFlags(CastExpr *CE) {
       // Walk down from the CE to the OrigSrcExpr, and mark all immediate
@@ -331,6 +332,38 @@
   }
 }
 
+ExprResult Sema::ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &D,
+                                         ExprResult Operand,
+                                         SourceLocation RParenLoc) {
+  assert(!D.isInvalidType());
+
+  TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, Operand.get()->getType());
+  if (D.isInvalidType())
+    return ExprError();
+
+  return BuildBuiltinBitCastExpr(KWLoc, TInfo, Operand.get(), RParenLoc);
+}
+
+ExprResult Sema::BuildBuiltinBitCastExpr(SourceLocation KWLoc,
+                                         TypeSourceInfo *TSI, Expr *Operand,
+                                         SourceLocation RParenLoc) {
+  CastOperation Op(*this, TSI->getType(), Operand);
+  Op.OpRange = SourceRange(KWLoc, RParenLoc);
+  TypeLoc TL = TSI->getTypeLoc();
+  Op.DestRange = SourceRange(TL.getBeginLoc(), TL.getEndLoc());
+
+  if (!Operand->isTypeDependent() && !TSI->getType()->isDependentType()) {
+    Op.CheckBuiltinBitCast();
+    if (Op.SrcExpr.isInvalid())
+      return ExprError();
+  }
+
+  BuiltinBitCastExpr *BCE =
+      new (Context) BuiltinBitCastExpr(Op.ResultType, Op.ValueKind, Op.Kind,
+                                       Op.SrcExpr.get(), TSI, KWLoc, RParenLoc);
+  return Op.complete(BCE);
+}
+
 /// Try to diagnose a failed overloaded cast.  Returns true if
 /// diagnostics were emitted.
 static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
@@ -953,7 +986,7 @@
 
   unsigned msg = diag::err_bad_cxx_cast_generic;
   TryCastResult tcr =
-    TryReinterpretCast(Self, SrcExpr, DestType,
+      TryReinterpretCast(Self, SrcExpr, DestType,
                        /*CStyle*/false, OpRange, msg, Kind);
   if (tcr != TC_Success && msg != 0) {
     if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
@@ -2764,6 +2797,41 @@
     checkCastAlign();
 }
 
+void CastOperation::CheckBuiltinBitCast() {
+  SrcExpr = Self.DefaultLvalueConversion(SrcExpr.get());
+
+  QualType SrcType = SrcExpr.get()->getType();
+  CharUnits DestSize = Self.Context.getTypeSizeInChars(DestType);
+  CharUnits SourceSize = Self.Context.getTypeSizeInChars(SrcType);
+  if (DestSize != SourceSize) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_type_size_mismatch)
+        << (int)SourceSize.getQuantity() << (int)DestSize.getQuantity();
+    SrcExpr = ExprError();
+    return;
+  }
+
+  if (!DestType.isTriviallyCopyableType(Self.Context)) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+        << 1;
+    SrcExpr = ExprError();
+    return;
+  }
+
+  if (!SrcType.isTriviallyCopyableType(Self.Context)) {
+    Self.Diag(OpRange.getBegin(), diag::err_bit_cast_non_trivially_copyable)
+        << 0;
+    SrcExpr = ExprError();
+    return;
+  }
+
+  if (Self.Context.hasSameUnqualifiedType(DestType, SrcType)) {
+    Kind = CK_NoOp;
+    return;
+  }
+
+  Kind = CK_BitCast;
+}
+
 /// DiagnoseCastQual - Warn whenever casts discards a qualifiers, be it either
 /// const, volatile or both.
 static void DiagnoseCastQual(Sema &Self, const ExprResult &SrcExpr,
Index: clang/lib/Parse/ParseExprCXX.cpp
===================================================================
--- clang/lib/Parse/ParseExprCXX.cpp
+++ clang/lib/Parse/ParseExprCXX.cpp
@@ -3487,3 +3487,37 @@
   ConsumeAnyToken();
   return Result;
 }
+
+/// Parse a __builtin_bit_cast(T, E).
+ExprResult Parser::ParseBuiltinBitCast() {
+  SourceLocation KWLoc = ConsumeToken();
+
+  BalancedDelimiterTracker T(*this, tok::l_paren);
+  if (T.expectAndConsume(diag::err_expected_lparen_after, "__builtin_bit_cast"))
+    return ExprError();
+
+  // Parse the common declaration-specifiers piece.
+  DeclSpec DS(AttrFactory);
+  ParseSpecifierQualifierList(DS);
+
+  // Parse the abstract-declarator, if present.
+  Declarator DeclaratorInfo(DS, DeclaratorContext::TypeNameContext);
+  ParseDeclarator(DeclaratorInfo);
+
+  if (ExpectAndConsume(tok::comma)) {
+    Diag(Tok.getLocation(), diag::err_expected) << tok::comma;
+    SkipUntil(tok::r_paren, StopAtSemi);
+    return ExprError();
+  }
+
+  ExprResult Operand = ParseExpression();
+
+  if (T.consumeClose())
+    return ExprError();
+
+  if (Operand.isInvalid() || DeclaratorInfo.isInvalidType())
+    return ExprError();
+
+  return Actions.ActOnBuiltinBitCastExpr(KWLoc, DeclaratorInfo, Operand,
+                                         T.getCloseLocation());
+}
Index: clang/lib/Parse/ParseExpr.cpp
===================================================================
--- clang/lib/Parse/ParseExpr.cpp
+++ clang/lib/Parse/ParseExpr.cpp
@@ -1223,6 +1223,9 @@
   case tok::kw_static_cast:
     Res = ParseCXXCasts();
     break;
+  case tok::kw___builtin_bit_cast:
+    Res = ParseBuiltinBitCast();
+    break;
   case tok::kw_typeid:
     Res = ParseCXXTypeid();
     break;
Index: clang/lib/Lex/PPMacroExpansion.cpp
===================================================================
--- clang/lib/Lex/PPMacroExpansion.cpp
+++ clang/lib/Lex/PPMacroExpansion.cpp
@@ -1624,6 +1624,7 @@
                       .Case("__builtin_FILE", true)
                       .Case("__builtin_FUNCTION", true)
                       .Case("__builtin_COLUMN", true)
+                      .Case("__builtin_bit_cast", true)
                       .Default(false);
         }
       });
Index: clang/lib/CodeGen/CGExprScalar.cpp
===================================================================
--- clang/lib/CodeGen/CGExprScalar.cpp
+++ clang/lib/CodeGen/CGExprScalar.cpp
@@ -2037,7 +2037,26 @@
   case CK_BlockPointerToObjCPointerCast:
   case CK_AnyPointerToBlockPointerCast:
   case CK_BitCast: {
-    Value *Src = Visit(const_cast<Expr*>(E));
+    if (isa<BuiltinBitCastExpr>(CE)) {
+      // Emit the subexpression to memory and do a pointer cast. We can't just
+      // emit a bitcast instruction here because bitcast doesn't support casting
+      // a non-scalar type to a scalar type, nor does it support casting between
+      // pointers and non-pointers.
+      ASTContext &Ctx = CGF.getContext();
+      // Use the max align of the two types for the temporary alloca.
+      CharUnits Align = std::max(Ctx.getTypeAlignInChars(E->getType()),
+                                 Ctx.getTypeAlignInChars(DestTy));
+      Address Temp = CGF.CreateTempAlloca(CGF.ConvertType(E->getType()), Align,
+                                          "bit_cast.temp");
+      CGF.EmitAnyExprToMem(E, Temp, E->getType().getQualifiers(),
+                           /*isInit=*/false);
+      Temp = Builder.CreateElementBitCast(Temp, CGF.ConvertType(DestTy));
+      LValue TempLV = CGF.MakeAddrLValue(Temp, DestTy);
+      TempLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+      return EmitLoadOfLValue(TempLV, CE->getExprLoc());
+    }
+
+    Value *Src = Visit(const_cast<Expr *>(E));
     llvm::Type *SrcTy = Src->getType();
     llvm::Type *DstTy = ConvertType(DestTy);
     if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
Index: clang/lib/CodeGen/CGExprComplex.cpp
===================================================================
--- clang/lib/CodeGen/CGExprComplex.cpp
+++ clang/lib/CodeGen/CGExprComplex.cpp
@@ -464,7 +464,21 @@
     return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
   }
 
-  case CK_BitCast:
+  case CK_BitCast: {
+    ASTContext &Ctx = CGF.getContext();
+    // Use the max align of the two types for the temporary alloca.
+    CharUnits Align = std::max(Ctx.getTypeAlignInChars(Op->getType()),
+                               Ctx.getTypeAlignInChars(DestTy));
+    Address Temp = CGF.CreateTempAlloca(CGF.ConvertType(Op->getType()), Align,
+                                        "bit_cast.temp");
+    CGF.EmitAnyExprToMem(Op, Temp, Op->getType().getQualifiers(),
+                         /*isInit=*/false);
+    Temp = Builder.CreateElementBitCast(Temp, CGF.ConvertType(DestTy));
+    LValue TempLV = CGF.MakeAddrLValue(Temp, DestTy);
+    TempLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+    return EmitLoadOfLValue(TempLV, Op->getExprLoc());
+  }
+
   case CK_BaseToDerived:
   case CK_DerivedToBase:
   case CK_UncheckedDerivedToBase:
Index: clang/lib/CodeGen/CGExprAgg.cpp
===================================================================
--- clang/lib/CodeGen/CGExprAgg.cpp
+++ clang/lib/CodeGen/CGExprAgg.cpp
@@ -80,6 +80,8 @@
   void EmitFinalDestCopy(QualType type, const LValue &src,
                          ExprValueKind SrcValueKind = EVK_NonRValue);
   void EmitFinalDestCopy(QualType type, RValue src);
+  void EmitTrivialFinalDestCopy(QualType Ty, const LValue &Source);
+  void EmitTrivialFinalDestCopy(QualType Ty, RValue Source);
   void EmitCopy(QualType type, const AggValueSlot &dest,
                 const AggValueSlot &src);
 
@@ -342,11 +344,20 @@
     }
   }
 
-  AggValueSlot srcAgg =
-    AggValueSlot::forLValue(src, AggValueSlot::IsDestructed,
-                            needsGC(type), AggValueSlot::IsAliased,
-                            AggValueSlot::MayOverlap);
-  EmitCopy(type, Dest, srcAgg);
+  EmitTrivialFinalDestCopy(type, src);
+}
+
+void AggExprEmitter::EmitTrivialFinalDestCopy(QualType Ty, RValue Source) {
+  LValue SourceLValue = CGF.MakeAddrLValue(Source.getAggregateAddress(), Ty);
+  EmitTrivialFinalDestCopy(Ty, SourceLValue);
+}
+
+void AggExprEmitter::EmitTrivialFinalDestCopy(QualType Ty,
+                                              const LValue &Source) {
+  AggValueSlot srcAgg = AggValueSlot::forLValue(
+      Source, AggValueSlot::IsDestructed, needsGC(Ty),
+      AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+  EmitCopy(Ty, Dest, srcAgg);
 }
 
 /// Perform a copy from the source into the destination.
@@ -807,11 +818,29 @@
     Visit(E->getSubExpr());
     break;
 
+  case CK_BitCast: {
+    if (Dest.isIgnored()) {
+      CGF.EmitAnyExpr(E->getSubExpr(), AggValueSlot::ignored(),
+                      /*IgnoreResult=*/true);
+      break;
+    }
+
+    QualType SourceType = E->getSubExpr()->getType();
+    QualType DestType = E->getType();
+    Address Temp = CGF.CreateIRTemp(SourceType, "bit_cast.temp");
+    CGF.EmitAnyExprToMem(E->getSubExpr(), Temp, SourceType.getQualifiers(),
+                         /*IsInit=*/false);
+    Address Casted =
+        Builder.CreateElementBitCast(Temp, CGF.ConvertType(DestType));
+    LValue CastedLV = CGF.MakeAddrLValue(Casted, DestType);
+    CastedLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
+    return EmitTrivialFinalDestCopy(DestType, CastedLV);
+  }
+
   case CK_LValueBitCast:
     llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
 
   case CK_Dependent:
-  case CK_BitCast:
   case CK_ArrayToPointerDecay:
   case CK_FunctionToPointerDecay:
   case CK_NullToPointer:
Index: clang/lib/CodeGen/CGExpr.cpp
===================================================================
--- clang/lib/CodeGen/CGExpr.cpp
+++ clang/lib/CodeGen/CGExpr.cpp
@@ -225,10 +225,11 @@
                                        bool IsInit) {
   // FIXME: This function should take an LValue as an argument.
   switch (getEvaluationKind(E->getType())) {
-  case TEK_Complex:
-    EmitComplexExprIntoLValue(E, MakeAddrLValue(Location, E->getType()),
-                              /*isInit*/ false);
+  case TEK_Complex: {
+    LValue LV = MakeAddrLValue(Location, E->getType());
+    EmitComplexExprIntoLValue(E, LV, /*isInit=*/false);
     return;
+  }
 
   case TEK_Aggregate: {
     EmitAggExpr(E, AggValueSlot::forAddr(Location, Quals,
Index: clang/lib/AST/StmtProfile.cpp
===================================================================
--- clang/lib/AST/StmtProfile.cpp
+++ clang/lib/AST/StmtProfile.cpp
@@ -1569,6 +1569,11 @@
   VisitCXXNamedCastExpr(S);
 }
 
+void StmtProfiler::VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *S) {
+  VisitExpr(S);
+  VisitType(S->getTypeInfoAsWritten()->getType());
+}
+
 void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
   VisitCallExpr(S);
 }
Index: clang/lib/AST/StmtPrinter.cpp
===================================================================
--- clang/lib/AST/StmtPrinter.cpp
+++ clang/lib/AST/StmtPrinter.cpp
@@ -1710,6 +1710,14 @@
   VisitCXXNamedCastExpr(Node);
 }
 
+void StmtPrinter::VisitBuiltinBitCastExpr(BuiltinBitCastExpr *Node) {
+  OS << "__builtin_bit_cast(";
+  Node->getTypeInfoAsWritten()->getType().print(OS, Policy);
+  OS << ", ";
+  PrintExpr(Node->getSubExpr());
+  OS << ")";
+}
+
 void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
   OS << "typeid(";
   if (Node->isTypeOperand()) {
Index: clang/lib/AST/ItaniumMangle.cpp
===================================================================
--- clang/lib/AST/ItaniumMangle.cpp
+++ clang/lib/AST/ItaniumMangle.cpp
@@ -3618,6 +3618,7 @@
   case Expr::AtomicExprClass:
   case Expr::SourceLocExprClass:
   case Expr::FixedPointLiteralClass:
+  case Expr::BuiltinBitCastExprClass:
   {
     if (!NullOut) {
       // As bad as this diagnostic is, it's better than crashing.
Index: clang/lib/AST/ExprConstant.cpp
===================================================================
--- clang/lib/AST/ExprConstant.cpp
+++ clang/lib/AST/ExprConstant.cpp
@@ -48,6 +48,7 @@
 #include "clang/Basic/FixedPoint.h"
 #include "clang/Basic/TargetInfo.h"
 #include "llvm/Support/SaveAndRestore.h"
+#include "llvm/ADT/Optional.h"
 #include "llvm/Support/raw_ostream.h"
 #include <cstring>
 #include <functional>
@@ -55,8 +56,10 @@
 #define DEBUG_TYPE "exprconstant"
 
 using namespace clang;
+using llvm::APInt;
 using llvm::APSInt;
 using llvm::APFloat;
+using llvm::Optional;
 
 static bool IsGlobalLValue(APValue::LValueBase B);
 
@@ -5350,6 +5353,457 @@
 //===----------------------------------------------------------------------===//
 namespace {
 
+class APBuffer {
+  // FIXME: We're going to need bit-level granularity when we support
+  // bit-fields.
+  // FIXME: Its possible under the C++ standard for 'char' to not be 8 bits, but
+  // we don't support a host or target where that is the case. Still, we should
+  // use a more generic type in case we ever do.
+  SmallVector<Optional<unsigned char>, 32> Bytes;
+
+  bool TargetIsLittleEndian;
+
+public:
+  APBuffer(CharUnits Width, bool TargetIsLittleEndian)
+      : Bytes(Width.getQuantity()),
+        TargetIsLittleEndian(TargetIsLittleEndian) {}
+
+  LLVM_NODISCARD
+  bool readObject(CharUnits Offset, CharUnits Width,
+                  SmallVectorImpl<unsigned char> &Output) const {
+    for (CharUnits I = Offset, E = Offset + Width; I != E; ++I) {
+      // If a byte of an integer is uninitialized, then the whole integer is
+      // uninitalized.
+      if (!Bytes[I.getQuantity()])
+        return false;
+      Output.push_back(*Bytes[I.getQuantity()]);
+    }
+    if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+      std::reverse(Output.begin(), Output.end());
+    return true;
+  }
+
+  void writeObject(CharUnits Offset, SmallVectorImpl<unsigned char> &Input) {
+    if (llvm::sys::IsLittleEndianHost != TargetIsLittleEndian)
+      std::reverse(Input.begin(), Input.end());
+
+    size_t Index = 0;
+    for (unsigned char Byte : Input) {
+      assert(!Bytes[Offset.getQuantity() + Index] && "overwriting a byte?");
+      Bytes[Offset.getQuantity() + Index] = Byte;
+      ++Index;
+    }
+  }
+
+  size_t size() { return Bytes.size(); }
+};
+
+/// Traverse an APValue to produce an APBuffer, emulating how the current target
+/// would represent the value at runtime.
+class BitCastReader {
+  EvalInfo &Info;
+  APBuffer Buffer;
+  const CastExpr *BCE;
+
+  BitCastReader(EvalInfo &Info, CharUnits ObjectWidth, const CastExpr *BCE)
+      : Info(Info),
+        Buffer(ObjectWidth, Info.Ctx.getTargetInfo().isLittleEndian()),
+        BCE(BCE) {}
+
+  bool visit(const APValue &Val, QualType Ty) {
+    return visit(Val, Ty, CharUnits::fromQuantity(0));
+  }
+
+  // Write out Val with type Ty into Buffer starting at Offset.
+  bool visit(const APValue &Val, QualType Ty, CharUnits Offset) {
+    assert((size_t)Offset.getQuantity() <= Buffer.size());
+
+    // As a special case, nullptr_t has an indeterminate value.
+    if (Ty->isNullPtrType())
+      return true;
+
+    // Dig through Src to find the byte at SrcOffset.
+    switch (Val.getKind()) {
+    case APValue::Indeterminate:
+      Info.FFDiag(BCE->getExprLoc(), diag::note_constexpr_access_uninit)
+          << 0 << 1;
+      return false;
+
+    case APValue::None:
+      Info.FFDiag(BCE->getExprLoc(), diag::note_constexpr_access_uninit)
+          << 0 << 0;
+      return false;
+
+    case APValue::Int:
+      return visitInt(Val.getInt(), Ty, Offset);
+    case APValue::Float:
+      return visitFloat(Val.getFloat(), Ty, Offset);
+    case APValue::Array:
+      return visitArray(Val, Ty, Offset);
+    case APValue::Struct:
+      return visitRecord(Val, Ty, Offset);
+
+    case APValue::ComplexInt:
+    case APValue::ComplexFloat:
+    case APValue::Vector:
+    case APValue::FixedPoint:
+      // FIXME: We should support these.
+
+    case APValue::Union:
+    case APValue::MemberPointer:
+    case APValue::AddrLabelDiff: {
+      Info.FFDiag(BCE->getBeginLoc(),
+                  diag::note_constexpr_bit_cast_unsupported_type)
+          << Ty;
+      return false;
+    }
+
+    case APValue::LValue: {
+      LValue LVal;
+      LVal.setFrom(Info.Ctx, Val);
+      APValue RVal;
+      if (!handleLValueToRValueConversion(Info, BCE, Ty.withConst(),
+                                          LVal, RVal))
+        return false;
+      return visit(RVal, Ty, Offset);
+    }
+    }
+  }
+
+  bool visitRecord(const APValue &Val, QualType Ty, CharUnits Offset) {
+    const RecordDecl *RD = Ty->getAsRecordDecl();
+    const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+    // Visit the base classes.
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+        const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+        CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+
+        if (!visitRecord(Val.getStructBase(I), BS.getType(),
+                         Layout.getBaseClassOffset(BaseDecl) + Offset))
+          return false;
+      }
+    }
+
+    // Visit the fields.
+    unsigned FieldIdx = 0;
+    for (FieldDecl *FD : RD->fields()) {
+      if (FD->isBitField()) {
+        Info.FFDiag(BCE->getBeginLoc(),
+                    diag::note_constexpr_bit_cast_unsupported_bitfield);
+        return false;
+      }
+
+      uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+
+      assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0 &&
+             "only bit-fields can have sub-char alignment");
+      CharUnits FieldOffset =
+          Info.Ctx.toCharUnitsFromBits(FieldOffsetBits) + Offset;
+      QualType FieldTy = FD->getType();
+      if (!visit(Val.getStructField(FieldIdx), FieldTy, FieldOffset))
+        return false;
+      ++FieldIdx;
+    }
+
+    return true;
+  }
+
+  bool visitArray(const APValue &Val, QualType Ty, CharUnits Offset) {
+    const auto *CAT =
+        dyn_cast_or_null<ConstantArrayType>(Ty->getAsArrayTypeUnsafe());
+    if (!CAT)
+      return false;
+
+    CharUnits ElemWidth = Info.Ctx.getTypeSizeInChars(CAT->getElementType());
+    unsigned NumInitializedElts = Val.getArrayInitializedElts();
+    unsigned ArraySize = Val.getArraySize();
+    // First, initialize the initialized elements.
+    for (unsigned I = 0; I != NumInitializedElts; ++I) {
+      const APValue &SubObj = Val.getArrayInitializedElt(I);
+      if (!visit(SubObj, CAT->getElementType(), Offset + I * ElemWidth))
+        return false;
+    }
+
+    // Next, initialize the rest of the array using the filler.
+    if (Val.hasArrayFiller()) {
+      const APValue &Filler = Val.getArrayFiller();
+      for (unsigned I = NumInitializedElts; I != ArraySize; ++I) {
+        if (!visit(Filler, CAT->getElementType(), Offset + I * ElemWidth))
+          return false;
+      }
+    }
+
+    return true;
+  }
+
+  bool visitInt(const APSInt &Val, QualType Ty, CharUnits Offset) {
+    CharUnits Width = Info.Ctx.getTypeSizeInChars(Ty);
+    SmallVector<unsigned char, 8> Bytes(Width.getQuantity());
+    llvm::StoreIntToMemory(Val, &*Bytes.begin(), Width.getQuantity());
+    Buffer.writeObject(Offset, Bytes);
+    return true;
+  }
+
+  bool visitFloat(const APFloat &Val, QualType Ty, CharUnits Offset) {
+    APSInt AsInt(Val.bitcastToAPInt());
+    return visitInt(AsInt, Ty, Offset);
+  }
+
+public:
+  static Optional<APBuffer> read(EvalInfo &Info, const APValue &Src,
+                                 const CastExpr *BCE) {
+    CharUnits DstSize = Info.Ctx.getTypeSizeInChars(BCE->getType());
+    BitCastReader Reader(Info, DstSize, BCE);
+    if (!Reader.visit(Src, BCE->getSubExpr()->getType()))
+      return None;
+    return Reader.Buffer;
+  }
+};
+
+/// Write an APBuffer into an APValue.
+class BitCastWriter {
+  EvalInfo &Info;
+  const APBuffer &Buffer;
+  const CastExpr *BCE;
+
+  BitCastWriter(EvalInfo &Info, const APBuffer &Buffer, const CastExpr *BCE)
+      : Info(Info), Buffer(Buffer), BCE(BCE) {}
+
+  // Emit an unsupported bit_cast type error. Sema refuses to build a bit_cast
+  // with an invalid type, so anything left is a deficiency on our part (FIXME).
+  // Ideally this will be unreachable.
+  llvm::NoneType unsupportedType(QualType Ty) {
+    Info.FFDiag(BCE->getBeginLoc(),
+                diag::note_constexpr_bit_cast_unsupported_type)
+        << Ty;
+    return None;
+  }
+
+  Optional<APValue> visit(const BuiltinType *T, CharUnits Offset) {
+    if (T->isNullPtrType()) {
+      uint64_t NullValue = Info.Ctx.getTargetNullPointerValue(QualType(T, 0));
+      return APValue((Expr *)nullptr,
+                     /*Offset=*/CharUnits::fromQuantity(NullValue),
+                     APValue::NoLValuePath{}, /*IsNullPtr=*/true);
+    }
+
+    CharUnits SizeOf = Info.Ctx.getTypeSizeInChars(T);
+    SmallVector<uint8_t, 8> Bytes;
+    if (!Buffer.readObject(Offset, SizeOf, Bytes))
+      return APValue::IndeterminateValue();
+
+    APSInt Val(SizeOf.getQuantity() * Info.Ctx.getCharWidth(), true);
+    llvm::LoadIntFromMemory(Val, &*Bytes.begin(), Bytes.size());
+
+    if (T->isIntegralOrEnumerationType()) {
+      Val.setIsSigned(T->isSignedIntegerOrEnumerationType());
+      return APValue(Val);
+    }
+
+    if (T->isRealFloatingType()) {
+      const llvm::fltSemantics &Semantics =
+          Info.Ctx.getFloatTypeSemantics(QualType(T, 0));
+      return APValue(APFloat(Semantics, Val));
+    }
+
+    return unsupportedType(QualType(T, 0));
+  }
+
+  Optional<APValue> visit(const RecordType *RTy, CharUnits Offset) {
+    const RecordDecl *RD = RTy->getAsRecordDecl();
+    const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+    unsigned NumBases = 0;
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+      NumBases = CXXRD->getNumBases();
+
+    APValue ResultVal(APValue::UninitStruct(), NumBases,
+                      std::distance(RD->field_begin(), RD->field_end()));
+
+    // Visit the base classes.
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+      for (size_t I = 0, E = CXXRD->getNumBases(); I != E; ++I) {
+        const CXXBaseSpecifier &BS = CXXRD->bases_begin()[I];
+        CXXRecordDecl *BaseDecl = BS.getType()->getAsCXXRecordDecl();
+        if (BaseDecl->isEmpty() ||
+            Info.Ctx.getASTRecordLayout(BaseDecl).getNonVirtualSize().isZero())
+          continue;
+
+        Optional<APValue> SubObj = visitType(
+            BS.getType(), Layout.getBaseClassOffset(BaseDecl) + Offset);
+        if (!SubObj)
+          return None;
+        ResultVal.getStructBase(I) = *SubObj;
+      }
+    }
+
+    // Visit the fields.
+    unsigned FieldIdx = 0;
+    for (FieldDecl *FD : RD->fields()) {
+      // FIXME: We don't currently support bit-fields. A lot of the logic for
+      // this is in CodeGen, so we need to factor it around.
+      if (FD->isBitField()) {
+        Info.FFDiag(BCE->getBeginLoc(),
+                    diag::note_constexpr_bit_cast_unsupported_bitfield);
+        return None;
+      }
+
+      uint64_t FieldOffsetBits = Layout.getFieldOffset(FieldIdx);
+      assert(FieldOffsetBits % Info.Ctx.getCharWidth() == 0);
+
+      CharUnits FieldOffset =
+          CharUnits::fromQuantity(FieldOffsetBits / Info.Ctx.getCharWidth()) +
+          Offset;
+      QualType FieldTy = FD->getType();
+      Optional<APValue> SubObj = visitType(FieldTy, FieldOffset);
+      if (!SubObj)
+        return None;
+      ResultVal.getStructField(FieldIdx) = *SubObj;
+      ++FieldIdx;
+    }
+
+    return ResultVal;
+  }
+
+  Optional<APValue> visit(const EnumType *Ty, CharUnits Offset) {
+    QualType RepresentationType = Ty->getDecl()->getIntegerType();
+    assert(!RepresentationType.isNull() &&
+           "enum forward decl should be caught by Sema");
+    return visitType(RepresentationType, Offset);
+  }
+
+  Optional<APValue> visit(const ConstantArrayType *Ty, CharUnits Offset) {
+    size_t Size = Ty->getSize().getLimitedValue();
+    CharUnits ElementWidth = Info.Ctx.getTypeSizeInChars(Ty->getElementType());
+
+    APValue ArrayValue(APValue::UninitArray(), Size, Size);
+    for (size_t I = 0; I != Size; ++I) {
+      Optional<APValue> ElementValue =
+          visitType(Ty->getElementType(), Offset + I * ElementWidth);
+      if (!ElementValue)
+        return None;
+      ArrayValue.getArrayInitializedElt(I) = std::move(*ElementValue);
+    }
+
+    return ArrayValue;
+  }
+
+  Optional<APValue> visit(const Type *Ty, CharUnits Offset) {
+    return unsupportedType(QualType(Ty, 0));
+  }
+
+  Optional<APValue> visitType(QualType Ty, CharUnits Offset) {
+    QualType Can = Ty.getCanonicalType();
+
+    switch (Can->getTypeClass()) {
+#define TYPE(Class, Base)                                                      \
+  case Type::Class:                                                            \
+    return visit(cast<Class##Type>(Can.getTypePtr()), Offset);
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)                                        \
+  case Type::Class:                                                            \
+    llvm_unreachable("non-canonical type should be impossible!");
+#define DEPENDENT_TYPE(Class, Base)                                            \
+  case Type::Class:                                                            \
+    llvm_unreachable(                                                          \
+        "dependent types aren't supported in the constant evaluator!");
+#define NON_CANONICAL_UNLESS_DEPENDENT(Class, Base)                            \
+  case Type::Class:                                                            \
+    llvm_unreachable("either dependent or not canonical!");
+#include "clang/AST/TypeNodes.def"
+    }
+  }
+
+public:
+  // Pull out a full value of type DstType.
+  static Optional<APValue> write(EvalInfo &Info, APBuffer &Buffer,
+                                 const CastExpr *BCE) {
+    BitCastWriter Writer(Info, Buffer, BCE);
+    return Writer.visitType(BCE->getType(), CharUnits::fromQuantity(0));
+  }
+};
+
+static bool checkBitCastConstexprEligibilityType(SourceLocation Loc,
+                                                 QualType Ty, EvalInfo *Info,
+                                                 const ASTContext &Ctx) {
+  Ty = Ty.getCanonicalType();
+
+  auto diag = [&](int Reason) {
+    if (Info)
+      Info->FFDiag(Loc, diag::note_constexpr_bit_cast_invalid_type)
+          << Reason << Ty;
+    return false;
+  };
+  auto note = [&](int Construct, QualType NoteTy, SourceLocation NoteLoc) {
+    if (Info)
+      Info->Note(NoteLoc, diag::note_constexpr_bit_cast_invalid_subtype)
+          << NoteTy << Construct << Ty;
+    return false;
+  };
+
+  if (Ty->isUnionType())
+    return diag(0);
+  if (Ty->isPointerType())
+    return diag(1);
+  if (Ty->isMemberPointerType())
+    return diag(2);
+  if (Ty.isVolatileQualified())
+    return diag(3);
+
+  if (RecordDecl *Record = Ty->getAsRecordDecl()) {
+    if (auto *CXXRD = dyn_cast<CXXRecordDecl>(Record)) {
+      for (CXXBaseSpecifier &BS : CXXRD->bases())
+        if (!checkBitCastConstexprEligibilityType(Loc, BS.getType(), Info, Ctx))
+          return note(1, BS.getType(), BS.getBeginLoc());
+    }
+    for (FieldDecl *FD : Record->fields()) {
+      if (!checkBitCastConstexprEligibilityType(Loc, FD->getType(), Info, Ctx))
+        return note(0, FD->getType(), FD->getBeginLoc());
+    }
+  }
+
+  if (Ty->isArrayType() && !checkBitCastConstexprEligibilityType(
+          Loc, Ctx.getBaseElementType(Ty), Info, Ctx))
+    return false;
+
+  return true;
+}
+
+static bool checkBitCastConstexprEligibility(EvalInfo *Info,
+                                             const ASTContext &Ctx,
+                                             const CastExpr *BCE) {
+  bool DestOK = checkBitCastConstexprEligibilityType(BCE->getBeginLoc(),
+                                                     BCE->getType(), Info, Ctx);
+  bool SourceOK = DestOK && checkBitCastConstexprEligibilityType(
+                                BCE->getBeginLoc(),
+                                BCE->getSubExpr()->getType(), Info, Ctx);
+  return SourceOK;
+}
+
+static bool handleBitCast(EvalInfo &Info, APValue &DestValue,
+                          APValue &SourceValue, const CastExpr *BCE) {
+  assert(CHAR_BIT == 8 && Info.Ctx.getTargetInfo().getCharWidth() == 8 &&
+         "no host or target supports non 8-bit chars");
+
+  if (!checkBitCastConstexprEligibility(&Info, Info.Ctx, BCE))
+    return false;
+
+  // Read out SourceValue into a char buffer.
+  Optional<APBuffer> Buffer = BitCastReader::read(Info, SourceValue, BCE);
+  if (!Buffer)
+    return false;
+
+  // Write out the buffer into a new APValue.
+  Optional<APValue> MaybeDestValue = BitCastWriter::write(Info, *Buffer, BCE);
+  if (!MaybeDestValue)
+    return false;
+
+  DestValue = std::move(*MaybeDestValue);
+  return true;
+}
+
 template <class Derived>
 class ExprEvaluatorBase
   : public ConstStmtVisitor<Derived, bool> {
@@ -5484,6 +5938,9 @@
       CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
     return static_cast<Derived*>(this)->VisitCastExpr(E);
   }
+  bool VisitBuiltinBitCastExpr(const BuiltinBitCastExpr *E) {
+    return static_cast<Derived*>(this)->VisitCastExpr(E);
+  }
 
   bool VisitBinaryOperator(const BinaryOperator *E) {
     switch (E->getOpcode()) {
@@ -5776,6 +6233,17 @@
         return false;
       return DerivedSuccess(RVal, E);
     }
+    case CK_BitCast: {
+      if (!isa<BuiltinBitCastExpr>(E))
+        Info.CCEDiag(E->getBeginLoc(), diag::note_constexpr_invalid_cast);
+
+      APValue DestValue, SourceValue;
+      if (!Evaluate(SourceValue, Info, E->getSubExpr()))
+        return false;
+      if (!handleBitCast(Info, DestValue, SourceValue, E))
+        return false;
+      return DerivedSuccess(DestValue, E);
+    }
     }
 
     return Error(E);
@@ -6627,6 +7095,11 @@
     break;
 
   case CK_BitCast:
+    // CK_BitCast originating from a __builtin_bit_cast have different constexpr
+    // semantics. This is only reachable when bit casting to nullptr_t.
+    if (isa<BuiltinBitCastExpr>(E))
+      return ExprEvaluatorBaseTy::VisitCastExpr(E);
+    LLVM_FALLTHROUGH;
   case CK_CPointerToObjCPointerCast:
   case CK_BlockPointerToObjCPointerCast:
   case CK_AnyPointerToBlockPointerCast:
@@ -8150,7 +8623,7 @@
   }
 
   bool Success(const APValue &V, const Expr *E) {
-    if (V.isLValue() || V.isAddrLabelDiff()) {
+    if (V.isLValue() || V.isAddrLabelDiff() || V.isIndeterminate()) {
       Result = V;
       return true;
     }
@@ -10556,7 +11029,6 @@
   case CK_IntegralToFixedPoint:
     llvm_unreachable("invalid cast kind for integral value");
 
-  case CK_BitCast:
   case CK_Dependent:
   case CK_LValueBitCast:
   case CK_ARCProduceObject:
@@ -10570,6 +11042,7 @@
   case CK_LValueToRValue:
   case CK_AtomicToNonAtomic:
   case CK_NoOp:
+  case CK_BitCast:
     return ExprEvaluatorBaseTy::VisitCastExpr(E);
 
   case CK_MemberPointerToBoolean:
@@ -11128,7 +11601,6 @@
 bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
 
   switch (E->getCastKind()) {
-  case CK_BitCast:
   case CK_BaseToDerived:
   case CK_DerivedToBase:
   case CK_UncheckedDerivedToBase:
@@ -11182,6 +11654,7 @@
   case CK_LValueToRValue:
   case CK_AtomicToNonAtomic:
   case CK_NoOp:
+  case CK_BitCast:
     return ExprEvaluatorBaseTy::VisitCastExpr(E);
 
   case CK_Dependent:
@@ -12477,6 +12950,11 @@
   case Expr::ChooseExprClass: {
     return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(), Ctx);
   }
+  case Expr::BuiltinBitCastExprClass: {
+    if (!checkBitCastConstexprEligibility(nullptr, Ctx, cast<CastExpr>(E)))
+      return ICEDiag(IK_NotICE, E->getBeginLoc());
+    return CheckICE(cast<CastExpr>(E)->getSubExpr(), Ctx);
+  }
   }
 
   llvm_unreachable("Invalid StmtClass!");
Index: clang/lib/AST/ExprClassification.cpp
===================================================================
--- clang/lib/AST/ExprClassification.cpp
+++ clang/lib/AST/ExprClassification.cpp
@@ -343,6 +343,7 @@
   case Expr::CXXReinterpretCastExprClass:
   case Expr::CXXConstCastExprClass:
   case Expr::ObjCBridgedCastExprClass:
+  case Expr::BuiltinBitCastExprClass:
     // Only in C++ can casts be interesting at all.
     if (!Lang.CPlusPlus) return Cl::CL_PRValue;
     return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
Index: clang/lib/AST/Expr.cpp
===================================================================
--- clang/lib/AST/Expr.cpp
+++ clang/lib/AST/Expr.cpp
@@ -3406,7 +3406,8 @@
   case CXXStaticCastExprClass:
   case CXXReinterpretCastExprClass:
   case CXXConstCastExprClass:
-  case CXXFunctionalCastExprClass: {
+  case CXXFunctionalCastExprClass:
+  case BuiltinBitCastExprClass: {
     // While volatile reads are side-effecting in both C and C++, we treat them
     // as having possible (not definite) side-effects. This allows idiomatic
     // code to behave without warning, such as sizeof(*v) for a volatile-
Index: clang/include/clang/Sema/Sema.h
===================================================================
--- clang/include/clang/Sema/Sema.h
+++ clang/include/clang/Sema/Sema.h
@@ -5221,6 +5221,13 @@
                                SourceRange AngleBrackets,
                                SourceRange Parens);
 
+  ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
+                                     ExprResult Operand,
+                                     SourceLocation RParenLoc);
+
+  ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
+                                     Expr *Operand, SourceLocation RParenLoc);
+
   ExprResult BuildCXXTypeId(QualType TypeInfoType,
                             SourceLocation TypeidLoc,
                             TypeSourceInfo *Operand,
Index: clang/include/clang/Parse/Parser.h
===================================================================
--- clang/include/clang/Parse/Parser.h
+++ clang/include/clang/Parse/Parser.h
@@ -1780,6 +1780,9 @@
   // C++ 5.2p1: C++ Casts
   ExprResult ParseCXXCasts();
 
+  /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
+  ExprResult ParseBuiltinBitCast();
+
   //===--------------------------------------------------------------------===//
   // C++ 5.2p1: C++ Type Identification
   ExprResult ParseCXXTypeid();
Index: clang/include/clang/Basic/TokenKinds.def
===================================================================
--- clang/include/clang/Basic/TokenKinds.def
+++ clang/include/clang/Basic/TokenKinds.def
@@ -669,7 +669,7 @@
 KEYWORD(__builtin_convertvector   , KEYALL)
 ALIAS("__char16_t"   , char16_t   , KEYCXX)
 ALIAS("__char32_t"   , char32_t   , KEYCXX)
-
+KEYWORD(__builtin_bit_cast        , KEYALL)
 KEYWORD(__builtin_available       , KEYALL)
 
 // Clang-specific keywords enabled only in testing.
Index: clang/include/clang/Basic/StmtNodes.td
===================================================================
--- clang/include/clang/Basic/StmtNodes.td
+++ clang/include/clang/Basic/StmtNodes.td
@@ -192,6 +192,7 @@
 def BlockExpr : DStmt<Expr>;
 def OpaqueValueExpr : DStmt<Expr>;
 def TypoExpr : DStmt<Expr>;
+def BuiltinBitCastExpr : DStmt<ExplicitCastExpr>;
 
 // Microsoft Extensions.
 def MSPropertyRefExpr : DStmt<Expr>;
Index: clang/include/clang/Basic/DiagnosticSemaKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -9715,4 +9715,8 @@
   "%select{non-pointer|function pointer|void pointer}0 argument to "
   "'__builtin_launder' is not allowed">;
 
+def err_bit_cast_non_trivially_copyable : Error<
+  "__builtin_bit_cast %select{source|destination}0 type must be trivially copyable">;
+def err_bit_cast_type_size_mismatch : Error<
+  "__builtin_bit_cast source size does not equal destination size (%0 and %1)">;
 } // end of sema component.
Index: clang/include/clang/Basic/DiagnosticASTKinds.td
===================================================================
--- clang/include/clang/Basic/DiagnosticASTKinds.td
+++ clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -213,6 +213,15 @@
   "size to copy (%4) is not a multiple of size of element type %3 (%5)|"
   "source is not a contiguous array of at least %4 elements of type %3|"
   "destination is not a contiguous array of at least %4 elements of type %3}2">;
+def note_constexpr_bit_cast_unsupported_type : Note<
+  "constexpr bit_cast involving type %0 is not yet supported">;
+def note_constexpr_bit_cast_unsupported_bitfield : Note<
+  "constexpr bit_cast involving bit-field is not yet supported">;
+def note_constexpr_bit_cast_invalid_type : Note<
+  "cannot constexpr evaluate a bit_cast with a "
+  "%select{union|pointer|member pointer|volatile}0 type %1">;
+def note_constexpr_bit_cast_invalid_subtype : Note<
+  "invalid type %0 is a %select{member|base}1 of %2">;
 
 def warn_integer_constant_overflow : Warning<
   "overflow in expression; result is %0 with type %1">,
Index: clang/include/clang/AST/RecursiveASTVisitor.h
===================================================================
--- clang/include/clang/AST/RecursiveASTVisitor.h
+++ clang/include/clang/AST/RecursiveASTVisitor.h
@@ -2282,6 +2282,10 @@
   TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
 })
 
+DEF_TRAVERSE_STMT(BuiltinBitCastExpr, {
+  TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+})
+
 template <typename Derived>
 bool RecursiveASTVisitor<Derived>::TraverseSynOrSemInitListExpr(
     InitListExpr *S, DataRecursionQueue *Queue) {
Index: clang/include/clang/AST/ExprCXX.h
===================================================================
--- clang/include/clang/AST/ExprCXX.h
+++ clang/include/clang/AST/ExprCXX.h
@@ -4716,6 +4716,35 @@
   }
 };
 
+/// Represents a C++2a __builtin_bit_cast(T, v) expression. Used to implement
+/// std::bit_cast. These can sometimes be evaluated as part of a constant
+/// expression, but otherwise CodeGen to a simple memcpy in general.
+class BuiltinBitCastExpr final
+    : public ExplicitCastExpr,
+      private llvm::TrailingObjects<BuiltinBitCastExpr, CXXBaseSpecifier *> {
+  friend class ASTStmtReader;
+  friend class CastExpr;
+  friend class TrailingObjects;
+
+  SourceLocation KWLoc;
+  SourceLocation RParenLoc;
+
+public:
+  BuiltinBitCastExpr(QualType T, ExprValueKind VK, CastKind CK, Expr *SrcExpr,
+                     TypeSourceInfo *DstType, SourceLocation KWLoc,
+                     SourceLocation RParenLoc)
+      : ExplicitCastExpr(BuiltinBitCastExprClass, T, VK, CK, SrcExpr, 0,
+                         DstType),
+        KWLoc(KWLoc), RParenLoc(RParenLoc) {}
+
+  SourceLocation getBeginLoc() const LLVM_READONLY { return KWLoc; }
+  SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
+
+  static bool classof(const Stmt *T) {
+    return T->getStmtClass() == BuiltinBitCastExprClass;
+  }
+};
+
 } // namespace clang
 
 #endif // LLVM_CLANG_AST_EXPRCXX_H
Index: clang/include/clang-c/Index.h
===================================================================
--- clang/include/clang-c/Index.h
+++ clang/include/clang-c/Index.h
@@ -2536,7 +2536,11 @@
    */
   CXCursor_OMPTargetTeamsDistributeSimdDirective = 279,
 
-  CXCursor_LastStmt = CXCursor_OMPTargetTeamsDistributeSimdDirective,
+  /** C++2a std::bit_cast expression.
+   */
+  CXCursor_BuiltinBitCastExpr = 280,
+
+  CXCursor_LastStmt = CXCursor_BuiltinBitCastExpr,
 
   /**
    * Cursor that represents the translation unit itself.
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to