wmi updated this revision to Diff 116232.
wmi added a comment.
Changes following the discussion:
- Put the bitfield split logic under an option and off by default.
- When sanitizer is enabled, the option for bitfield split will be ignored and
a warning message will be emitted.
In addition, a test is added.
Repository:
rL LLVM
https://reviews.llvm.org/D36562
Files:
include/clang/Basic/DiagnosticDriverKinds.td
include/clang/Driver/Options.td
include/clang/Frontend/CodeGenOptions.def
lib/CodeGen/CGExpr.cpp
lib/Driver/ToolChains/Clang.cpp
lib/Frontend/CompilerInvocation.cpp
test/CodeGenCXX/bitfield-split.cpp
Index: test/CodeGenCXX/bitfield-split.cpp
===================================================================
--- test/CodeGenCXX/bitfield-split.cpp
+++ test/CodeGenCXX/bitfield-split.cpp
@@ -0,0 +1,161 @@
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsplit-bitfields -emit-llvm \
+// RUN: -o - %s | FileCheck %s
+// RUN: %clang_cc1 -triple x86_64-linux-gnu -fsplit-bitfields -emit-llvm \
+// RUN: -fsanitize=address -o - %s | FileCheck %s --check-prefix=SANITIZE
+// Check -fsplit-bitfields will be ignored since sanitizer is enabled.
+
+struct S1 {
+ unsigned f1:2;
+ unsigned f2:6;
+ unsigned f3:8;
+ unsigned f4:4;
+ unsigned f5:8;
+};
+
+S1 a1;
+unsigned read8_1() {
+ // CHECK-LABEL: @_Z7read8_1v
+ // CHECK: %bf.load = load i8, i8* getelementptr (i8, i8* bitcast (%struct.S1* @a1 to i8*), i32 1), align 1
+ // CHECK-NEXT: %bf.cast = zext i8 %bf.load to i32
+ // CHECK-NEXT: ret i32 %bf.cast
+ // SANITIZE-LABEL: @_Z7read8_1v
+ // SANITIZE: %bf.load = load i32, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE: %bf.lshr = lshr i32 %bf.load, 8
+ // SANITIZE: %bf.clear = and i32 %bf.lshr, 255
+ // SANITIZE: ret i32 %bf.clear
+ return a1.f3;
+}
+void write8_1() {
+ // CHECK-LABEL: @_Z8write8_1v
+ // CHECK: store i8 3, i8* getelementptr (i8, i8* bitcast (%struct.S1* @a1 to i8*), i32 1), align 1
+ // CHECK-NEXT: ret void
+ // SANITIZE-LABEL: @_Z8write8_1v
+ // SANITIZE: %bf.load = load i32, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE-NEXT: %bf.clear = and i32 %bf.load, -65281
+ // SANITIZE-NEXT: %bf.set = or i32 %bf.clear, 768
+ // SANITIZE-NEXT: store i32 %bf.set, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE-NEXT: ret void
+ a1.f3 = 3;
+}
+
+unsigned read8_2() {
+ // CHECK-LABEL: @_Z7read8_2v
+ // CHECK: %bf.load = load i32, i32* getelementptr inbounds (%struct.S1, %struct.S1* @a1, i32 0, i32 0), align 4
+ // CHECK-NEXT: %bf.lshr = lshr i32 %bf.load, 20
+ // CHECK-NEXT: %bf.clear = and i32 %bf.lshr, 255
+ // CHECK-NEXT: ret i32 %bf.clear
+ // SANITIZE-LABEL: @_Z7read8_2v
+ // SANITIZE: %bf.load = load i32, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE-NEXT: %bf.lshr = lshr i32 %bf.load, 20
+ // SANITIZE-NEXT: %bf.clear = and i32 %bf.lshr, 255
+ // SANITIZE-NEXT: ret i32 %bf.clear
+ return a1.f5;
+}
+void write8_2() {
+ // CHECK-LABEL: @_Z8write8_2v
+ // CHECK: %bf.load = load i32, i32* getelementptr inbounds (%struct.S1, %struct.S1* @a1, i32 0, i32 0), align 4
+ // CHECK-NEXT: %bf.clear = and i32 %bf.load, -267386881
+ // CHECK-NEXT: %bf.set = or i32 %bf.clear, 3145728
+ // CHECK-NEXT: store i32 %bf.set, i32* getelementptr inbounds (%struct.S1, %struct.S1* @a1, i32 0, i32 0), align 4
+ // CHECK-NEXT: ret void
+ // SANITIZE-LABEL: @_Z8write8_2v
+ // SANITIZE: %bf.load = load i32, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE-NEXT: %bf.clear = and i32 %bf.load, -267386881
+ // SANITIZE-NEXT: %bf.set = or i32 %bf.clear, 3145728
+ // SANITIZE-NEXT: store i32 %bf.set, i32* getelementptr inbounds {{.*}}, align 4
+ // SANITIZE-NEXT: ret void
+ a1.f5 = 3;
+}
+
+struct S2 {
+ unsigned long f1:16;
+ unsigned long f2:16;
+ unsigned long f3:6;
+};
+
+S2 a2;
+unsigned read16_1() {
+ // CHECK-LABEL: @_Z8read16_1v
+ // CHECK: %bf.load = load i16, i16* bitcast (%struct.S2* @a2 to i16*), align 2
+ // CHECK-NEXT: %bf.cast = zext i16 %bf.load to i64
+ // CHECK-NEXT: %conv = trunc i64 %bf.cast to i32
+ // CHECK-NEXT: ret i32 %conv
+ // SANITIZE-LABEL: @_Z8read16_1v
+ // SANITIZE: %bf.load = load i64, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.clear = and i64 %bf.load, 65535
+ // SANITIZE-NEXT: %conv = trunc i64 %bf.clear to i32
+ // SANITIZE-NEXT: ret i32 %conv
+ return a2.f1;
+}
+unsigned read16_2() {
+ // CHECK-LABEL: @_Z8read16_2v
+ // CHECK: %bf.load = load i16, i16* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.S2* @a2 to i8*), i32 2) to i16*), align 2
+ // CHECK-NEXT: %bf.cast = zext i16 %bf.load to i64
+ // CHECK-NEXT: %conv = trunc i64 %bf.cast to i32
+ // CHECK-NEXT: ret i32 %conv
+ // SANITIZE-LABEL: @_Z8read16_2v
+ // SANITIZE: %bf.load = load i64, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.lshr = lshr i64 %bf.load, 16
+ // SANITIZE-NEXT: %bf.clear = and i64 %bf.lshr, 65535
+ // SANITIZE-NEXT: %conv = trunc i64 %bf.clear to i32
+ // SANITIZE-NEXT: ret i32 %conv
+ return a2.f2;
+}
+
+void write16_1() {
+ // CHECK-LABEL: @_Z9write16_1v
+ // CHECK: store i16 5, i16* bitcast (%struct.S2* @a2 to i16*), align 2
+ // CHECK-NEXT: ret void
+ // SANITIZE-LABEL: @_Z9write16_1v
+ // SANITIZE: %bf.load = load i64, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.clear = and i64 %bf.load, -65536
+ // SANITIZE-NEXT: %bf.set = or i64 %bf.clear, 5
+ // SANITIZE-NEXT: store i64 %bf.set, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: ret void
+ a2.f1 = 5;
+}
+void write16_2() {
+ // CHECK-LABEL: @_Z9write16_2v
+ // CHECK: store i16 5, i16* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.S2* @a2 to i8*), i32 2) to i16*), align 2
+ // CHECK-NEXT: ret void
+ // SANITIZE-LABEL: @_Z9write16_2v
+ // SANITIZE: %bf.load = load i64, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.clear = and i64 %bf.load, -4294901761
+ // SANITIZE-NEXT: %bf.set = or i64 %bf.clear, 327680
+ // SANITIZE-NEXT: store i64 %bf.set, i64* bitcast {{.*}}, align 8
+ // SANITIZE-NEXT: ret void
+ a2.f2 = 5;
+}
+
+struct S3 {
+ unsigned long f1:14;
+ unsigned long f2:18;
+ unsigned long f3:32;
+};
+
+S3 a3;
+unsigned read32_1() {
+ // CHECK-LABEL: @_Z8read32_1v
+ // CHECK: %bf.load = load i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.S3* @a3 to i8*), i32 4) to i32*), align 4
+ // CHECK-NEXT: %bf.cast = zext i32 %bf.load to i64
+ // CHECK-NEXT: %conv = trunc i64 %bf.cast to i32
+ // CHECK-NEXT: ret i32 %conv
+ // SANITIZE-LABEL: @_Z8read32_1v
+ // SANITIZE: %bf.load = load i64, i64* getelementptr inbounds {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.lshr = lshr i64 %bf.load, 32
+ // SANITIZE-NEXT: %conv = trunc i64 %bf.lshr to i32
+ // SANITIZE-NEXT: ret i32 %conv
+ return a3.f3;
+}
+void write32_1() {
+ // CHECK-LABEL: @_Z9write32_1v
+ // CHECK: store i32 5, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.S3* @a3 to i8*), i32 4) to i32*), align 4
+ // CHECK-NEXT: ret void
+ // SANITIZE-LABEL: @_Z9write32_1v
+ // SANITIZE: %bf.load = load i64, i64* getelementptr inbounds {{.*}}, align 8
+ // SANITIZE-NEXT: %bf.clear = and i64 %bf.load, 4294967295
+ // SANITIZE-NEXT: %bf.set = or i64 %bf.clear, 21474836480
+ // SANITIZE-NEXT: store i64 %bf.set, i64* getelementptr inbounds {{.*}}, align 8
+ // SANITIZE-NEXT: ret void
+ a3.f3 = 5;
+}
Index: lib/Frontend/CompilerInvocation.cpp
===================================================================
--- lib/Frontend/CompilerInvocation.cpp
+++ lib/Frontend/CompilerInvocation.cpp
@@ -545,6 +545,8 @@
OPT_fuse_register_sized_bitfield_access);
Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
Opts.StructPathTBAA = !Args.hasArg(OPT_no_struct_path_tbaa);
+ Opts.SplitBitfields =
+ Args.hasFlag(OPT_fsplit_bitfields, OPT_fno_split_bitfields, false);
Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
Opts.NoCommon = Args.hasArg(OPT_fno_common);
@@ -2729,6 +2731,13 @@
if (Arch == llvm::Triple::spir || Arch == llvm::Triple::spir64) {
Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
}
+
+ // If sanitizer is enabled, disable OPT_fsplit_bitfields
+ if (Res.getCodeGenOpts().SplitBitfields &&
+ !Res.getLangOpts()->Sanitize.empty()) {
+ Res.getCodeGenOpts().SplitBitfields = false;
+ Diags.Report(diag::warn_drv_split_bitfields_ignored);
+ }
return Success;
}
Index: lib/Driver/ToolChains/Clang.cpp
===================================================================
--- lib/Driver/ToolChains/Clang.cpp
+++ lib/Driver/ToolChains/Clang.cpp
@@ -2406,6 +2406,9 @@
options::OPT_fno_optimize_sibling_calls))
CmdArgs.push_back("-mdisable-tail-calls");
+ Args.AddLastArg(CmdArgs, options::OPT_fsplit_bitfields,
+ options::OPT_fno_split_bitfields);
+
// Handle segmented stacks.
if (Args.hasArg(options::OPT_fsplit_stack))
CmdArgs.push_back("-split-stacks");
Index: lib/CodeGen/CGExpr.cpp
===================================================================
--- lib/CodeGen/CGExpr.cpp
+++ lib/CodeGen/CGExpr.cpp
@@ -1674,30 +1674,76 @@
return EmitLoadOfBitfieldLValue(LV, Loc);
}
+/// Check if current Field is better to be accessed separately. When current
+/// field has legal integer width, and its bitfield offset is naturally
+/// aligned, it is better to access the bitfield like a separate integer var.
+static bool IsSeparatableBitfield(const CGBitFieldInfo &Info,
+ const llvm::DataLayout &DL,
+ ASTContext &Context) {
+ if (!DL.isLegalInteger(Info.Size))
+ return false;
+ // Make sure Field is natually aligned.
+ if (Info.Offset %
+ (DL.getABIIntegerTypeAlignment(Info.Size) * Context.getCharWidth()) !=
+ 0)
+ return false;
+ return true;
+}
+
RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
SourceLocation Loc) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
llvm::Type *ResLTy = ConvertType(LV.getType());
Address Ptr = LV.getBitFieldAddress();
- llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
+ llvm::Value *Val = nullptr;
+ if (CGM.getCodeGenOpts().SplitBitfields &&
+ IsSeparatableBitfield(Info, CGM.getDataLayout(), getContext())) {
+ // Ptr is the pointer to the Bitfield Storage. Add Offset to the Ptr
+ // if the Offset is not zero.
+ if (Info.Offset != 0) {
+ Address BC = Builder.CreateBitCast(Ptr, Int8PtrTy);
+ llvm::Value *GEP = Builder.CreateGEP(
+ BC.getPointer(),
+ llvm::ConstantInt::get(Int32Ty,
+ Info.Offset / getContext().getCharWidth()));
+ Ptr = Address(GEP, Ptr.getAlignment());
+ }
+ llvm::Type *BFType = llvm::Type::getIntNTy(getLLVMContext(), Info.Size);
+ CharUnits Align = getContext().toCharUnitsFromBits(
+ CGM.getDataLayout().getABITypeAlignment(BFType) *
+ getContext().getCharWidth());
+ if (Ptr.getElementType()->getScalarSizeInBits() != Info.Size) {
+ // Adjust the element type of Ptr if it has different size with Info.Size.
+ llvm::Type *BFTypePtr =
+ llvm::Type::getIntNPtrTy(getLLVMContext(), Info.Size);
+ Ptr = Builder.CreateBitCast(Address(Ptr.getPointer(), Align), BFTypePtr);
+ } else if (Ptr.getAlignment() != Align) {
+ // Adjust the alignment of Ptr.
+ Ptr = Address(Ptr.getPointer(), Align);
+ }
- if (Info.IsSigned) {
- assert(static_cast<unsigned>(Info.Offset + Info.Size) <= Info.StorageSize);
- unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
- if (HighBits)
- Val = Builder.CreateShl(Val, HighBits, "bf.shl");
- if (Info.Offset + HighBits)
- Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+ Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
} else {
- if (Info.Offset)
- Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
- if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
- Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(Info.StorageSize,
- Info.Size),
- "bf.clear");
+ Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "bf.load");
+ if (Info.IsSigned) {
+ assert(static_cast<unsigned>(Info.Offset + Info.Size) <=
+ Info.StorageSize);
+ unsigned HighBits = Info.StorageSize - Info.Offset - Info.Size;
+ if (HighBits)
+ Val = Builder.CreateShl(Val, HighBits, "bf.shl");
+ if (Info.Offset + HighBits)
+ Val = Builder.CreateAShr(Val, Info.Offset + HighBits, "bf.ashr");
+ } else {
+ if (Info.Offset)
+ Val = Builder.CreateLShr(Val, Info.Offset, "bf.lshr");
+ if (static_cast<unsigned>(Info.Offset) + Info.Size < Info.StorageSize)
+ Val = Builder.CreateAnd(
+ Val, llvm::APInt::getLowBitsSet(Info.StorageSize, Info.Size),
+ "bf.clear");
+ }
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
EmitScalarRangeCheck(Val, LV.getType(), Loc);
@@ -1887,15 +1933,46 @@
// Get the source value, truncated to the width of the bit-field.
llvm::Value *SrcVal = Src.getScalarVal();
+ llvm::Value *MaskedVal;
- // Cast the source to the storage type and shift it into place.
- SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
- /*IsSigned=*/false);
- llvm::Value *MaskedVal = SrcVal;
-
- // See if there are other bits in the bitfield's storage we'll need to load
- // and mask together with source before storing.
- if (Info.StorageSize != Info.Size) {
+ bool SeparatableBitfield =
+ CGM.getCodeGenOpts().SplitBitfields &&
+ IsSeparatableBitfield(Info, CGM.getDataLayout(), getContext());
+ if (SeparatableBitfield) {
+ // Ptr is the pointer to the Bitfield Storage. Add Offset to the Ptr
+ // if the Offset is not zero.
+ if (Info.Offset != 0) {
+ Address BC = Builder.CreateBitCast(Ptr, Int8PtrTy);
+ llvm::Value *GEP = Builder.CreateGEP(
+ BC.getPointer(),
+ llvm::ConstantInt::get(Int32Ty,
+ Info.Offset / getContext().getCharWidth()));
+ Ptr = Address(GEP, Ptr.getAlignment());
+ }
+ llvm::Type *BFType = llvm::Type::getIntNTy(getLLVMContext(), Info.Size);
+ CharUnits Align = getContext().toCharUnitsFromBits(
+ CGM.getDataLayout().getABITypeAlignment(BFType) *
+ getContext().getCharWidth());
+ if (Ptr.getElementType()->getScalarSizeInBits() != Info.Size) {
+ // Adjust the element type of Ptr if it has different size with Info.Size.
+ llvm::Type *BFTypePtr =
+ llvm::Type::getIntNPtrTy(getLLVMContext(), Info.Size);
+ Ptr = Builder.CreateBitCast(Address(Ptr.getPointer(), Align), BFTypePtr);
+ } else if (Ptr.getAlignment() != Align) {
+ // Adjust the alignment of Ptr.
+ Ptr = Address(Ptr.getPointer(), Align);
+ }
+ // Cast the source to the storage type and shift it into place.
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
+ /*IsSigned=*/false);
+ MaskedVal = SrcVal;
+ } else if (Info.StorageSize != Info.Size) {
+ // Cast the source to the storage type and shift it into place.
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
+ /*IsSigned=*/false);
+
+ // See if there are other bits in the bitfield's storage we'll need to load
+ // and mask together with source before storing.
assert(Info.StorageSize > Info.Size && "Invalid bitfield size.");
llvm::Value *Val =
Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.load");
@@ -1921,6 +1998,10 @@
SrcVal = Builder.CreateOr(Val, SrcVal, "bf.set");
} else {
assert(Info.Offset == 0);
+ // Cast the source to the storage type and shift it into place.
+ SrcVal = Builder.CreateIntCast(SrcVal, Ptr.getElementType(),
+ /*IsSigned=*/false);
+ MaskedVal = SrcVal;
}
// Write the new value back out.
@@ -1931,7 +2012,7 @@
llvm::Value *ResultVal = MaskedVal;
// Sign extend the value if needed.
- if (Info.IsSigned) {
+ if (Info.IsSigned && !SeparatableBitfield) {
assert(Info.Size <= Info.StorageSize);
unsigned HighBits = Info.StorageSize - Info.Size;
if (HighBits) {
Index: include/clang/Frontend/CodeGenOptions.def
===================================================================
--- include/clang/Frontend/CodeGenOptions.def
+++ include/clang/Frontend/CodeGenOptions.def
@@ -177,6 +177,8 @@
CODEGENOPT(SanitizeStats , 1, 0) ///< Collect statistics for sanitizers.
CODEGENOPT(SimplifyLibCalls , 1, 1) ///< Set when -fbuiltin is enabled.
CODEGENOPT(SoftFloat , 1, 0) ///< -soft-float.
+CODEGENOPT(SplitBitfields , 1, 0) ///< Split bitfield with proper width and
+ ///< alignment.
CODEGENOPT(StrictEnums , 1, 0) ///< Optimize based on strict enum definition.
CODEGENOPT(StrictVTablePointers, 1, 0) ///< Optimize based on the strict vtable pointers
CODEGENOPT(TimePasses , 1, 0) ///< Set when -ftime-report is enabled.
Index: include/clang/Driver/Options.td
===================================================================
--- include/clang/Driver/Options.td
+++ include/clang/Driver/Options.td
@@ -1029,6 +1029,13 @@
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Filename defining the whitelist for imbuing the 'never instrument' XRay attribute.">;
+def fsplit_bitfields : Flag<["-"], "fsplit-bitfields">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Enable splitting bitfield with legal width and alignment in LLVM.">;
+def fno_split_bitfields : Flag<["-"], "fno-split-bitfields">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Disable splitting bitfield with legal width and alignment in LLVM.">;
+
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Group>;
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
Index: include/clang/Basic/DiagnosticDriverKinds.td
===================================================================
--- include/clang/Basic/DiagnosticDriverKinds.td
+++ include/clang/Basic/DiagnosticDriverKinds.td
@@ -326,4 +326,8 @@
"unable to find a Visual Studio installation; "
"try running Clang from a developer command prompt">,
InGroup<DiagGroup<"msvc-not-found">>;
+
+def warn_drv_split_bitfields_ignored : Warning<
+ "option '-fsplit-bitfields' cannot be enabled together with sanitizer; flag ignored">,
+ InGroup<OptionIgnored>;
}
_______________________________________________
cfe-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits