chandlerc updated this revision to Diff 162325.
chandlerc added a comment.

Move to a more conservative model suggested by Kristof.


Repository:
  rL LLVM

https://reviews.llvm.org/D51157

Files:
  clang/include/clang/Driver/Options.td
  clang/include/clang/Frontend/CodeGenOptions.def
  clang/lib/CodeGen/CGCall.cpp
  clang/lib/Driver/ToolChains/Arch/X86.cpp
  clang/lib/Driver/ToolChains/Clang.cpp
  clang/lib/Frontend/CompilerInvocation.cpp
  clang/test/CodeGen/attr-speculative-load-hardening.c
  clang/test/Driver/x86-target-features.c
  llvm/docs/LangRef.rst
  llvm/include/llvm/Bitcode/LLVMBitCodes.h
  llvm/include/llvm/IR/Attributes.td
  llvm/lib/AsmParser/LLLexer.cpp
  llvm/lib/AsmParser/LLParser.cpp
  llvm/lib/AsmParser/LLToken.h
  llvm/lib/Bitcode/Reader/BitcodeReader.cpp
  llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
  llvm/lib/IR/Attributes.cpp
  llvm/lib/IR/Verifier.cpp
  llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
  llvm/lib/Target/X86/X86TargetMachine.cpp
  llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp
  llvm/lib/Transforms/Utils/CodeExtractor.cpp
  llvm/test/CodeGen/X86/O0-pipeline.ll
  llvm/test/CodeGen/X86/O3-pipeline.ll
  llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
  llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll
  llvm/test/CodeGen/X86/speculative-load-hardening.ll
  llvm/test/Transforms/Inline/attributes.ll

Index: llvm/test/Transforms/Inline/attributes.ll
===================================================================
--- llvm/test/Transforms/Inline/attributes.ll
+++ llvm/test/Transforms/Inline/attributes.ll
@@ -26,6 +26,10 @@
   ret i32 %i
 }
 
+define i32 @slh_callee(i32 %i) speculative_load_hardening {
+  ret i32 %i
+}
+
 define i32 @alwaysinline_callee(i32 %i) alwaysinline {
   ret i32 %i
 }
@@ -161,6 +165,28 @@
 ; CHECK-NEXT: ret i32
 }
 
+; Can inline a normal function into an SLH'ed function.
+define i32 @test_caller_slh(i32 %i) speculative_load_hardening {
+; CHECK-LABEL: @test_caller_slh(
+; CHECK-SAME: ) [[SLH:.*]] {
+; CHECK-NOT: call
+; CHECK: ret i32
+entry:
+  %callee = call i32 @noattr_callee(i32 %i)
+  ret i32 %callee
+}
+
+; Can inline a SLH'ed function into a normal one, propagating SLH.
+define i32 @test_callee_slh(i32 %i) {
+; CHECK-LABEL: @test_callee_slh(
+; CHECK-SAME: ) [[SLH:.*]] {
+; CHECK-NOT: call
+; CHECK: ret i32
+entry:
+  %callee = call i32 @slh_callee(i32 %i)
+  ret i32 %callee
+}
+
 ; Check that a function doesn't get inlined if target-cpu strings don't match
 ; exactly.
 define i32 @test_target_cpu_callee0(i32 %i) "target-cpu"="corei7" {
@@ -384,6 +410,7 @@
 ; CHECK-NEXT: ret i32
 }
 
+; CHECK: attributes [[SLH]] = { speculative_load_hardening }
 ; CHECK: attributes [[FPMAD_FALSE]] = { "less-precise-fpmad"="false" }
 ; CHECK: attributes [[FPMAD_TRUE]] = { "less-precise-fpmad"="true" }
 ; CHECK: attributes [[NOIMPLICITFLOAT]] = { noimplicitfloat }
Index: llvm/test/CodeGen/X86/speculative-load-hardening.ll
===================================================================
--- llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -1,14 +1,14 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -x86-slh-lfence | FileCheck %s --check-prefix=X64-LFENCE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-slh-lfence | FileCheck %s --check-prefix=X64-LFENCE
 ;
 ; FIXME: Add support for 32-bit and other EH ABIs.
 
 declare void @leak(i32 %v1, i32 %v2)
 
 declare void @sink(i32)
 
-define i32 @test_trivial_entry_load(i32* %ptr) {
+define i32 @test_trivial_entry_load(i32* %ptr) speculative_load_hardening {
 ; X64-LABEL: test_trivial_entry_load:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rcx
@@ -29,7 +29,7 @@
   ret i32 %v
 }
 
-define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2, i32** %ptr3) {
+define void @test_basic_conditions(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2, i32** %ptr3) speculative_load_hardening {
 ; X64-LABEL: test_basic_conditions:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %r15
@@ -189,7 +189,7 @@
   ret void
 }
 
-define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind {
+define void @test_basic_loop(i32 %a, i32 %b, i32* %ptr1, i32* %ptr2) nounwind speculative_load_hardening {
 ; X64-LABEL: test_basic_loop:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rbp
@@ -293,7 +293,7 @@
   ret void
 }
 
-define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2) nounwind {
+define void @test_basic_nested_loop(i32 %a, i32 %b, i32 %c, i32* %ptr1, i32* %ptr2) nounwind speculative_load_hardening {
 ; X64-LABEL: test_basic_nested_loop:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rbp
@@ -481,7 +481,7 @@
 
 declare void @__cxa_throw(i8*, i8*, i8*) local_unnamed_addr
 
-define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
+define void @test_basic_eh(i32 %a, i32* %ptr1, i32* %ptr2) speculative_load_hardening personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*) {
 ; X64-LABEL: test_basic_eh:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rbp
@@ -631,7 +631,7 @@
 declare void @sink_double(double)
 
 ; Test direct and converting loads of floating point values.
-define void @test_fp_loads(float* %fptr, double* %dptr, i32* %i32ptr, i64* %i64ptr) nounwind {
+define void @test_fp_loads(float* %fptr, double* %dptr, i32* %i32ptr, i64* %i64ptr) nounwind speculative_load_hardening {
 ; X64-LABEL: test_fp_loads:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %r15
@@ -786,7 +786,7 @@
 declare void @sink_v2i64(<2 x i64>)
 
 ; Test loads of vectors.
-define void @test_vec_loads(<4 x float>* %v4f32ptr, <2 x double>* %v2f64ptr, <16 x i8>* %v16i8ptr, <8 x i16>* %v8i16ptr, <4 x i32>* %v4i32ptr, <2 x i64>* %v2i64ptr) nounwind {
+define void @test_vec_loads(<4 x float>* %v4f32ptr, <2 x double>* %v2f64ptr, <16 x i8>* %v16i8ptr, <8 x i16>* %v8i16ptr, <4 x i32>* %v4i32ptr, <2 x i64>* %v2i64ptr) nounwind speculative_load_hardening {
 ; X64-LABEL: test_vec_loads:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %r15
@@ -899,7 +899,7 @@
   ret void
 }
 
-define void @test_deferred_hardening(i32* %ptr1, i32* %ptr2, i32 %x) nounwind {
+define void @test_deferred_hardening(i32* %ptr1, i32* %ptr2, i32 %x) nounwind speculative_load_hardening {
 ; X64-LABEL: test_deferred_hardening:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %r14
Index: llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll
===================================================================
--- llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll
+++ llvm/test/CodeGen/X86/speculative-load-hardening-indirect.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections | FileCheck %s --check-prefix=X64
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening -data-sections -mattr=+retpoline | FileCheck %s --check-prefix=X64-RETPOLINE
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -data-sections | FileCheck %s --check-prefix=X64
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -data-sections -mattr=+retpoline | FileCheck %s --check-prefix=X64-RETPOLINE
 ;
 ; FIXME: Add support for 32-bit.
 
@@ -13,7 +13,7 @@
   i8* blockaddress(@test_indirectbr_global, %bb3)
 ]
 
-define i32 @test_indirect_call(i32 ()** %ptr) nounwind {
+define i32 @test_indirect_call(i32 ()** %ptr) nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirect_call:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rax
@@ -55,7 +55,7 @@
   ret i32 %v
 }
 
-define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind {
+define i32 @test_indirect_tail_call(i32 ()** %ptr) nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirect_tail_call:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rax
@@ -83,7 +83,7 @@
   ret i32 %v
 }
 
-define i32 @test_indirect_call_global() nounwind {
+define i32 @test_indirect_call_global() nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirect_call_global:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    pushq %rax
@@ -124,7 +124,7 @@
   ret i32 %v
 }
 
-define i32 @test_indirect_tail_call_global() nounwind {
+define i32 @test_indirect_tail_call_global() nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirect_tail_call_global:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rax
@@ -151,7 +151,7 @@
   ret i32 %v
 }
 
-define i32 @test_indirectbr(i8** %ptr) nounwind {
+define i32 @test_indirectbr(i8** %ptr) nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirectbr:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rcx
@@ -195,7 +195,7 @@
   ret i32 42
 }
 
-define i32 @test_indirectbr_global(i32 %idx) nounwind {
+define i32 @test_indirectbr_global(i32 %idx) nounwind speculative_load_hardening {
 ; X64-LABEL: test_indirectbr_global:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rcx
@@ -286,7 +286,7 @@
 
 ; This function's switch is crafted to trigger jump-table lowering in the x86
 ; backend so that we can test how the exact jump table lowering behaves.
-define i32 @test_switch_jumptable(i32 %idx) nounwind {
+define i32 @test_switch_jumptable(i32 %idx) nounwind speculative_load_hardening {
 ; X64-LABEL: test_switch_jumptable:
 ; X64:       # %bb.0: # %entry
 ; X64-NEXT:    movq %rsp, %rcx
Index: llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
===================================================================
--- llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
+++ llvm/test/CodeGen/X86/speculative-load-hardening-gather.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -x86-speculative-load-hardening | FileCheck %s
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
 
 declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, i8*, <4 x i32>, <4 x float>, i8)
 
@@ -950,6 +950,6 @@
   ret <4 x i64> %v
 }
 
-attributes #0 = { nounwind "target-features"="+avx2" }
-attributes #1 = { nounwind "target-features"="+avx512f" }
-attributes #2 = { nounwind "target-features"="+avx512vl" }
+attributes #0 = { nounwind speculative_load_hardening "target-features"="+avx2" }
+attributes #1 = { nounwind speculative_load_hardening "target-features"="+avx512f" }
+attributes #2 = { nounwind speculative_load_hardening "target-features"="+avx512vl" }
Index: llvm/test/CodeGen/X86/O3-pipeline.ll
===================================================================
--- llvm/test/CodeGen/X86/O3-pipeline.ll
+++ llvm/test/CodeGen/X86/O3-pipeline.ll
@@ -90,6 +90,7 @@
 ; CHECK-NEXT:       X86 LEA Optimize
 ; CHECK-NEXT:       X86 Optimize Call Frame
 ; CHECK-NEXT:       X86 Avoid Store Forwarding Block
+; CHECK-NEXT:       X86 speculative load hardening
 ; CHECK-NEXT:       MachineDominator Tree Construction
 ; CHECK-NEXT:       X86 EFLAGS copy lowering
 ; CHECK-NEXT:       X86 WinAlloca Expander
Index: llvm/test/CodeGen/X86/O0-pipeline.ll
===================================================================
--- llvm/test/CodeGen/X86/O0-pipeline.ll
+++ llvm/test/CodeGen/X86/O0-pipeline.ll
@@ -37,6 +37,7 @@
 ; CHECK-NEXT:       X86 PIC Global Base Reg Initialization
 ; CHECK-NEXT:       Expand ISel Pseudo-instructions
 ; CHECK-NEXT:       Local Stack Slot Allocation
+; CHECK-NEXT:       X86 speculative load hardening
 ; CHECK-NEXT:       MachineDominator Tree Construction
 ; CHECK-NEXT:       X86 EFLAGS copy lowering
 ; CHECK-NEXT:       X86 WinAlloca Expander
Index: llvm/lib/Transforms/Utils/CodeExtractor.cpp
===================================================================
--- llvm/lib/Transforms/Utils/CodeExtractor.cpp
+++ llvm/lib/Transforms/Utils/CodeExtractor.cpp
@@ -753,6 +753,7 @@
       case Attribute::SanitizeMemory:
       case Attribute::SanitizeThread:
       case Attribute::SanitizeHWAddress:
+      case Attribute::SpeculativeLoadHardening:
       case Attribute::StackProtect:
       case Attribute::StackProtectReq:
       case Attribute::StackProtectStrong:
Index: llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp
===================================================================
--- llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp
+++ llvm/lib/Transforms/IPO/ForceFunctionAttrs.cpp
@@ -58,6 +58,7 @@
       .Case("sanitize_hwaddress", Attribute::SanitizeHWAddress)
       .Case("sanitize_memory", Attribute::SanitizeMemory)
       .Case("sanitize_thread", Attribute::SanitizeThread)
+      .Case("speculative_load_hardening", Attribute::SpeculativeLoadHardening)
       .Case("ssp", Attribute::StackProtect)
       .Case("sspreq", Attribute::StackProtectReq)
       .Case("sspstrong", Attribute::StackProtectStrong)
Index: llvm/lib/Target/X86/X86TargetMachine.cpp
===================================================================
--- llvm/lib/Target/X86/X86TargetMachine.cpp
+++ llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -54,10 +54,6 @@
                                cl::desc("Enable the machine combiner pass"),
                                cl::init(true), cl::Hidden);
 
-static cl::opt<bool> EnableSpeculativeLoadHardening(
-    "x86-speculative-load-hardening",
-    cl::desc("Enable speculative load hardening"), cl::init(false), cl::Hidden);
-
 namespace llvm {
 
 void initializeWinEHStatePassPass(PassRegistry &);
@@ -475,8 +471,8 @@
     addPass(createX86AvoidStoreForwardingBlocks());
   }
 
-  if (EnableSpeculativeLoadHardening)
-    addPass(createX86SpeculativeLoadHardeningPass());
+  // Will only run if force enabled or detects the relevant attribute.
+  addPass(createX86SpeculativeLoadHardeningPass());
 
   addPass(createX86FlagsCopyLoweringPass());
   addPass(createX86WinAllocaExpander());
Index: llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
===================================================================
--- llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -75,6 +75,11 @@
 STATISTIC(NumInstsInserted, "Number of instructions inserted");
 STATISTIC(NumLFENCEsInserted, "Number of lfence instructions inserted");
 
+static cl::opt<bool> EnableSpeculativeLoadHardening(
+    "x86-speculative-load-hardening",
+    cl::desc("Force enable speculative load hardening"), cl::init(false),
+    cl::Hidden);
+
 static cl::opt<bool> HardenEdgesWithLFENCE(
     PASS_KEY "-lfence",
     cl::desc(
@@ -401,6 +406,12 @@
   LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
                     << " **********\n");
 
+  // Only run if this pass is forced enabled or we detect the relevant function
+  // attribute requesting SLH.
+  if (!EnableSpeculativeLoadHardening &&
+      !MF.getFunction().hasFnAttribute(Attribute::SpeculativeLoadHardening))
+    return false;
+
   Subtarget = &MF.getSubtarget<X86Subtarget>();
   MRI = &MF.getRegInfo();
   TII = Subtarget->getInstrInfo();
Index: llvm/lib/IR/Verifier.cpp
===================================================================
--- llvm/lib/IR/Verifier.cpp
+++ llvm/lib/IR/Verifier.cpp
@@ -1478,6 +1478,7 @@
   case Attribute::InaccessibleMemOnly:
   case Attribute::InaccessibleMemOrArgMemOnly:
   case Attribute::AllocSize:
+  case Attribute::SpeculativeLoadHardening:
   case Attribute::Speculatable:
   case Attribute::StrictFP:
     return true;
Index: llvm/lib/IR/Attributes.cpp
===================================================================
--- llvm/lib/IR/Attributes.cpp
+++ llvm/lib/IR/Attributes.cpp
@@ -323,6 +323,8 @@
     return "returns_twice";
   if (hasAttribute(Attribute::SExt))
     return "signext";
+  if (hasAttribute(Attribute::SpeculativeLoadHardening))
+    return "speculative_load_hardening";
   if (hasAttribute(Attribute::Speculatable))
     return "speculatable";
   if (hasAttribute(Attribute::StackProtect))
Index: llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
===================================================================
--- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -690,6 +690,8 @@
     return bitc::ATTR_KIND_SANITIZE_THREAD;
   case Attribute::SanitizeMemory:
     return bitc::ATTR_KIND_SANITIZE_MEMORY;
+  case Attribute::SpeculativeLoadHardening:
+    return bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING;
   case Attribute::SwiftError:
     return bitc::ATTR_KIND_SWIFT_ERROR;
   case Attribute::SwiftSelf:
Index: llvm/lib/Bitcode/Reader/BitcodeReader.cpp
===================================================================
--- llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1165,6 +1165,8 @@
   case Attribute::NoCfCheck:       return 1ULL << 57;
   case Attribute::OptForFuzzing:   return 1ULL << 58;
   case Attribute::ShadowCallStack: return 1ULL << 59;
+  case Attribute::SpeculativeLoadHardening:
+    return 1ULL << 60;
   case Attribute::Dereferenceable:
     llvm_unreachable("dereferenceable attribute not supported in raw format");
     break;
@@ -1389,6 +1391,8 @@
     return Attribute::SanitizeThread;
   case bitc::ATTR_KIND_SANITIZE_MEMORY:
     return Attribute::SanitizeMemory;
+  case bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING:
+    return Attribute::SpeculativeLoadHardening;
   case bitc::ATTR_KIND_SWIFT_ERROR:
     return Attribute::SwiftError;
   case bitc::ATTR_KIND_SWIFT_SELF:
Index: llvm/lib/AsmParser/LLToken.h
===================================================================
--- llvm/lib/AsmParser/LLToken.h
+++ llvm/lib/AsmParser/LLToken.h
@@ -219,6 +219,7 @@
   kw_sret,
   kw_sanitize_thread,
   kw_sanitize_memory,
+  kw_speculative_load_hardening,
   kw_strictfp,
   kw_swifterror,
   kw_swiftself,
Index: llvm/lib/AsmParser/LLParser.cpp
===================================================================
--- llvm/lib/AsmParser/LLParser.cpp
+++ llvm/lib/AsmParser/LLParser.cpp
@@ -1276,6 +1276,9 @@
       B.addAttribute(Attribute::SanitizeThread); break;
     case lltok::kw_sanitize_memory:
       B.addAttribute(Attribute::SanitizeMemory); break;
+    case lltok::kw_speculative_load_hardening:
+      B.addAttribute(Attribute::SpeculativeLoadHardening);
+      break;
     case lltok::kw_strictfp: B.addAttribute(Attribute::StrictFP); break;
     case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break;
     case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break;
@@ -1619,6 +1622,7 @@
     case lltok::kw_sanitize_hwaddress:
     case lltok::kw_sanitize_memory:
     case lltok::kw_sanitize_thread:
+    case lltok::kw_speculative_load_hardening:
     case lltok::kw_ssp:
     case lltok::kw_sspreq:
     case lltok::kw_sspstrong:
@@ -1715,6 +1719,7 @@
     case lltok::kw_sanitize_hwaddress:
     case lltok::kw_sanitize_memory:
     case lltok::kw_sanitize_thread:
+    case lltok::kw_speculative_load_hardening:
     case lltok::kw_ssp:
     case lltok::kw_sspreq:
     case lltok::kw_sspstrong:
Index: llvm/lib/AsmParser/LLLexer.cpp
===================================================================
--- llvm/lib/AsmParser/LLLexer.cpp
+++ llvm/lib/AsmParser/LLLexer.cpp
@@ -678,6 +678,7 @@
   KEYWORD(sanitize_hwaddress);
   KEYWORD(sanitize_thread);
   KEYWORD(sanitize_memory);
+  KEYWORD(speculative_load_hardening);
   KEYWORD(swifterror);
   KEYWORD(swiftself);
   KEYWORD(uwtable);
Index: llvm/include/llvm/IR/Attributes.td
===================================================================
--- llvm/include/llvm/IR/Attributes.td
+++ llvm/include/llvm/IR/Attributes.td
@@ -176,6 +176,15 @@
 /// HWAddressSanitizer is on.
 def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress">;
 
+/// Speculative Load Hardening is enabled.
+///
+/// Note that this uses the default compatibility (always compatible during
+/// inlining) and the default merge strategy of retaining the caller's
+/// attribute. This specifically matches the intent for this attribute which is
+/// that the context dominates, and inlined code will become hardened or lose
+/// its hardening based on the caller's attribute.
+def SpeculativeLoadHardening : EnumAttr<"speculative_load_hardening">;
+
 /// Argument is swift error.
 def SwiftError : EnumAttr<"swifterror">;
 
@@ -232,6 +241,7 @@
 def : MergeRule<"setOR<NoImplicitFloatAttr>">;
 def : MergeRule<"setOR<NoJumpTablesAttr>">;
 def : MergeRule<"setOR<ProfileSampleAccurateAttr>">;
+def : MergeRule<"setOR<SpeculativeLoadHardeningAttr>">;
 def : MergeRule<"adjustCallerSSPLevel">;
 def : MergeRule<"adjustCallerStackProbes">;
 def : MergeRule<"adjustCallerStackProbeSize">;
Index: llvm/include/llvm/Bitcode/LLVMBitCodes.h
===================================================================
--- llvm/include/llvm/Bitcode/LLVMBitCodes.h
+++ llvm/include/llvm/Bitcode/LLVMBitCodes.h
@@ -591,6 +591,7 @@
   ATTR_KIND_NOCF_CHECK = 56,
   ATTR_KIND_OPT_FOR_FUZZING = 57,
   ATTR_KIND_SHADOWCALLSTACK = 58,
+  ATTR_KIND_SPECULATIVE_LOAD_HARDENING = 59,
 };
 
 enum ComdatSelectionKindCodes {
Index: llvm/docs/LangRef.rst
===================================================================
--- llvm/docs/LangRef.rst
+++ llvm/docs/LangRef.rst
@@ -1636,6 +1636,28 @@
     This attribute indicates that HWAddressSanitizer checks
     (dynamic address safety analysis based on tagged pointers) are enabled for
     this function.
+``speculative_load_hardening``
+    This attribute indicates that
+    `Speculative Load Hardening <https://llvm.org/docs/SpeculativeLoadHardening.html>`_
+    should be enabled for the function body. This is a best-effort attempt to
+    mitigate all known speculative execution information leak vulnerabilities
+    that are based on the fundamental principles of modern processors'
+    speculative execution. These vulnerabilities are classified as "Spectre
+    variant #1" vulnerabilities typically. Notably, this does not attempt to
+    mitigate any vulnerabilities where the speculative execution and/or
+    prediction devices of specific processors can be *completely* undermined
+    (such as "Branch Target Injection", a.k.a, "Spectre variant #2"). Instead,
+    this is a target-independent request to harden against the completely
+    generic risk posed by speculative execution to incorrectly load secret data,
+    making it available to some micro-architectural side-channel for information
+    leak. For a processor without any speculative execution or predictors, this
+    is expected to be a no-op.
+
+    When inlining, the attribute is sticky. Inlining a function that carries
+    this attribute will cause the caller to gain the attribute. This is intended
+    to provide a maximally conservative model where the code in a function
+    annotated with this attribute will always (even after inlining) end up
+    hardened.
 ``speculatable``
     This function attribute indicates that the function does not have any
     effects besides calculating its result and does not have undefined behavior.
Index: clang/test/Driver/x86-target-features.c
===================================================================
--- clang/test/Driver/x86-target-features.c
+++ clang/test/Driver/x86-target-features.c
@@ -140,6 +140,15 @@
 // RETPOLINE-EXTERNAL-THUNK: "-target-feature" "+retpoline-external-thunk"
 // NO-RETPOLINE-EXTERNAL-THUNK: "-target-feature" "-retpoline-external-thunk"
 
+// RUN: %clang -target i386-linux-gnu -mspeculative-load-hardening %s -### -o %t.o 2>&1 | FileCheck -check-prefix=SLH %s
+// RUN: %clang -target i386-linux-gnu -mretpoline -mspeculative-load-hardening %s -### -o %t.o 2>&1 | FileCheck -check-prefix=RETPOLINE %s
+// RUN: %clang -target i386-linux-gnu -mno-speculative-load-hardening %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-SLH %s
+// SLH-NOT: retpoline
+// SLH: "-target-feature" "+retpoline-indirect-calls"
+// SLH-NOT: retpoline
+// SLH: "-mspeculative-load-hardening"
+// NO-SLH-NOT: retpoline
+
 // RUN: %clang -target i386-linux-gnu -mwaitpkg %s -### -o %t.o 2>&1 | FileCheck -check-prefix=WAITPKG %s
 // RUN: %clang -target i386-linux-gnu -mno-waitpkg %s -### -o %t.o 2>&1 | FileCheck -check-prefix=NO-WAITPKG %s
 // WAITPKG: "-target-feature" "+waitpkg"
Index: clang/test/CodeGen/attr-speculative-load-hardening.c
===================================================================
--- /dev/null
+++ clang/test/CodeGen/attr-speculative-load-hardening.c
@@ -0,0 +1,11 @@
+// RUN: %clang_cc1 -mspeculative-load-hardening -disable-llvm-passes -emit-llvm %s -o - | FileCheck %s -check-prefix=SLH
+//
+// Check that we set the attribute on each function.
+
+int test1() {
+  return 42;
+}
+// SLH: @{{.*}}test1{{.*}}[[SLH:#[0-9]+]]
+
+// SLH: attributes [[SLH]] = { {{.*}}speculative_load_hardening{{.*}} }
+
Index: clang/lib/Frontend/CompilerInvocation.cpp
===================================================================
--- clang/lib/Frontend/CompilerInvocation.cpp
+++ clang/lib/Frontend/CompilerInvocation.cpp
@@ -1147,6 +1147,10 @@
 
   Opts.KeepStaticConsts = Args.hasArg(OPT_fkeep_static_consts);
 
+  Opts.SpeculativeLoadHardening =
+      Args.hasFlag(OPT_mspeculative_load_hardening,
+                   OPT_mno_speculative_load_hardening, false);
+
   return Success;
 }
 
Index: clang/lib/Driver/ToolChains/Clang.cpp
===================================================================
--- clang/lib/Driver/ToolChains/Clang.cpp
+++ clang/lib/Driver/ToolChains/Clang.cpp
@@ -4115,6 +4115,9 @@
 
   Args.AddLastArg(CmdArgs, options::OPT_pthread);
 
+  Args.AddLastArg(CmdArgs, options::OPT_mspeculative_load_hardening,
+                  options::OPT_mno_speculative_load_hardening);
+
   RenderSSPOptions(getToolChain(), Args, CmdArgs, KernelOrKext);
 
   // Translate -mstackrealign
Index: clang/lib/Driver/ToolChains/Arch/X86.cpp
===================================================================
--- clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -146,15 +146,23 @@
 
   // Translate the high level `-mretpoline` flag to the specific target feature
   // flags. We also detect if the user asked for retpoline external thunks but
-  // failed to ask for retpolines themselves. This is a bit hacky but keeps
-  // existing usages working. We should consider deprecated this and instead
-  // warning if the user requests external retpoline thunks and *doesn't*
-  // request some form of retpolines.
-  if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline)) {
+  // failed to ask for retpolines themselves (through any of the different
+  // flags). This is a bit hacky but keeps existing usages working. We should
+  // consider deprecated this and instead warning if the user requests external
+  // retpoline thunks and *doesn't* request some form of retpolines.
+  if (Args.hasArgNoClaim(options::OPT_mretpoline, options::OPT_mno_retpoline,
+                         options::OPT_mspeculative_load_hardening,
+                         options::OPT_mno_speculative_load_hardening)) {
     if (Args.hasFlag(options::OPT_mretpoline, options::OPT_mno_retpoline,
                      false)) {
       Features.push_back("+retpoline-indirect-calls");
       Features.push_back("+retpoline-indirect-branches");
+    } else if (Args.hasFlag(options::OPT_mspeculative_load_hardening,
+                            options::OPT_mno_speculative_load_hardening,
+                            false)) {
+      // On x86, speculative load hardening relies on at least using retpolines
+      // for indirect calls.
+      Features.push_back("+retpoline-indirect-calls");
     }
   } else if (Args.hasFlag(options::OPT_mretpoline_external_thunk,
                           options::OPT_mno_retpoline_external_thunk, false)) {
Index: clang/lib/CodeGen/CGCall.cpp
===================================================================
--- clang/lib/CodeGen/CGCall.cpp
+++ clang/lib/CodeGen/CGCall.cpp
@@ -1784,6 +1784,9 @@
       FuncAttrs.addAttribute("stackrealign");
     if (CodeGenOpts.Backchain)
       FuncAttrs.addAttribute("backchain");
+
+    if (CodeGenOpts.SpeculativeLoadHardening)
+      FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
   }
 
   if (getLangOpts().assumeFunctionsAreConvergent()) {
Index: clang/include/clang/Frontend/CodeGenOptions.def
===================================================================
--- clang/include/clang/Frontend/CodeGenOptions.def
+++ clang/include/clang/Frontend/CodeGenOptions.def
@@ -211,6 +211,7 @@
 CODEGENOPT(SanitizeStats     , 1, 0) ///< Collect statistics for sanitizers.
 CODEGENOPT(SimplifyLibCalls  , 1, 1) ///< Set when -fbuiltin is enabled.
 CODEGENOPT(SoftFloat         , 1, 0) ///< -soft-float.
+CODEGENOPT(SpeculativeLoadHardening, 1, 0) ///< Enable speculative load hardening.
 CODEGENOPT(FineGrainedBitfieldAccesses, 1, 0) ///< Enable fine-grained bitfield accesses.
 CODEGENOPT(StrictEnums       , 1, 0) ///< Optimize based on strict enum definition.
 CODEGENOPT(StrictVTablePointers, 1, 0) ///< Optimize based on the strict vtable pointers
Index: clang/include/clang/Driver/Options.td
===================================================================
--- clang/include/clang/Driver/Options.td
+++ clang/include/clang/Driver/Options.td
@@ -2003,6 +2003,10 @@
 
 def mretpoline : Flag<["-"], "mretpoline">, Group<m_Group>, Flags<[CoreOption,DriverOption]>;
 def mno_retpoline : Flag<["-"], "mno-retpoline">, Group<m_Group>, Flags<[CoreOption,DriverOption]>;
+def mspeculative_load_hardening : Flag<["-"], "mspeculative-load-hardening">,
+  Group<m_Group>, Flags<[CoreOption,CC1Option]>;
+def mno_speculative_load_hardening : Flag<["-"], "mno-speculative-load-hardening">,
+  Group<m_Group>, Flags<[CoreOption]>;
 
 def mrelax : Flag<["-"], "mrelax">, Group<m_riscv_Features_Group>,
   HelpText<"Enable linker relaxation">;
_______________________________________________
cfe-commits mailing list
cfe-commits@lists.llvm.org
http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to