mstorsjo retitled this revision from "Headers: Add iso_volatile load/store
intrinsics" to "[MS] Implement __iso_volatile loads/stores as builtins".
mstorsjo updated the summary for this revision.
mstorsjo updated this revision to Diff 72782.
mstorsjo added a comment.
Changed to implement it as builtins, as requested. I had to put this at the
bottom in EmitBuiltinExpr (which returns RValues) instead of in
EmitARMBuiltinExpr (which returns Value*), since the returned Value* for stores
is nullptr. A nullptr return value from EmitTargetBuiltinExpr indicates that it
wasn't handled, this triggered errors about the store builtins being
unsupported.
https://reviews.llvm.org/D24986
Files:
include/clang/Basic/BuiltinsARM.def
lib/CodeGen/CGBuiltin.cpp
test/CodeGen/ms-volatile-arm.c
Index: test/CodeGen/ms-volatile-arm.c
===================================================================
--- /dev/null
+++ test/CodeGen/ms-volatile-arm.c
@@ -0,0 +1,13 @@
+// REQUIRES: arm-registered-target
+// RUN: %clang_cc1 -triple thumbv7-win32 -emit-llvm -fms-extensions -fms-volatile -o - < %s | FileCheck %s
+
+void test1(int volatile *p, int v) {
+ __iso_volatile_store32(p, v);
+ // CHECK-LABEL: @test1
+ // CHECK: store volatile {{.*}}, {{.*}}
+}
+int test2(const int volatile *p) {
+ return __iso_volatile_load32(p);
+ // CHECK-LABEL: @test2
+ // CHECK: load volatile {{.*}}
+}
Index: lib/CodeGen/CGBuiltin.cpp
===================================================================
--- lib/CodeGen/CGBuiltin.cpp
+++ lib/CodeGen/CGBuiltin.cpp
@@ -134,6 +134,36 @@
return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
}
+static Value *EmitVolatileStore(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Address = CGF.EmitScalarExpr(E->getArg(0));
+ Value *Val = CGF.EmitScalarExpr(E->getArg(1));
+
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getArg(1)->getType());
+ // Call the full version of EmitStoreOfScalar directly, to override the volatile
+ // flag without actually setting volatile in the type. This avoids
+ // LValueIsSuitableForInlineAtomic picking it up and transforming it into an
+ // atomic store.
+ CGF.EmitStoreOfScalar(Val, LV.getAddress(), true, LV.getType(),
+ LV.getAlignmentSource(), LV.getTBAAInfo(), false,
+ LV.getTBAABaseType(), LV.getTBAAOffset(),
+ LV.isNontemporal());
+ return nullptr;
+}
+
+static Value *EmitVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
+ Value *Address = CGF.EmitScalarExpr(E->getArg(0));
+
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(Address, E->getType());
+ // Call the full version of EmitLoadOfScalar directly, to override the volatile
+ // flag without actually setting volatile in the type. This avoids
+ // LValueIsSuitableForInlineAtomic picking it up and transforming it into an
+ // atomic load.
+ return CGF.EmitLoadOfScalar(LV.getAddress(), true, LV.getType(),
+ E->getExprLoc(), LV.getAlignmentSource(),
+ LV.getTBAAInfo(), LV.getTBAABaseType(),
+ LV.getTBAAOffset(), LV.isNontemporal());
+}
+
static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E) {
@@ -2558,6 +2588,33 @@
return RValue::get(V);
}
+ switch (getTarget().getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ switch (BuiltinID) {
+ case ARM::BI__iso_volatile_load8:
+ case ARM::BI__iso_volatile_load16:
+ case ARM::BI__iso_volatile_load32:
+ case ARM::BI__iso_volatile_load64:
+ return RValue::get(EmitVolatileLoad(*this, E));
+ case ARM::BI__iso_volatile_store8:
+ case ARM::BI__iso_volatile_store16:
+ case ARM::BI__iso_volatile_store32:
+ case ARM::BI__iso_volatile_store64:
+ // EmitVolatileStore returns nullptr, but we want to
+ // return that RValue here. If handled via EmitTargetBuiltinExpr
+ // below, the returned Value *V will be nullptr, and we will
+ // continue on to declaring the builtin unsupported below, even
+ // though it was handled correctly.
+ return RValue::get(EmitVolatileStore(*this, E));
+ }
+ break;
+ default:
+ break;
+ }
+
// See if we have a target specific builtin that needs to be lowered.
if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
return RValue::get(V);
Index: include/clang/Basic/BuiltinsARM.def
===================================================================
--- include/clang/Basic/BuiltinsARM.def
+++ include/clang/Basic/BuiltinsARM.def
@@ -115,6 +115,14 @@
LANGBUILTIN(__dmb, "vUi", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__dsb, "vUi", "nc", ALL_MS_LANGUAGES)
LANGBUILTIN(__isb, "vUi", "nc", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_load8, "ccCD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_load16, "ssCD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_load32, "iiCD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_load64, "LLiLLiCD*", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_store8, "vcD*c", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_store16, "vsD*s", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_store32, "viD*i", "n", ALL_MS_LANGUAGES)
+LANGBUILTIN(__iso_volatile_store64, "vLLiD*LLi", "n", ALL_MS_LANGUAGES)
LANGBUILTIN(__ldrexd, "WiWiCD*", "", ALL_MS_LANGUAGES)
LANGBUILTIN(_MoveFromCoprocessor, "UiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
LANGBUILTIN(_MoveFromCoprocessor2, "UiIUiIUiIUiIUiIUi", "", ALL_MS_LANGUAGES)
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits