kstoimenov updated this revision to Diff 367601.
kstoimenov retitled this revision from " [asan] Implemented intrinsic for
the custom calling convention similar used by HWASan for X86." to "[asan]
Implemented intrinsic for the custom calling convention similar used by HWASan
for X86.".
kstoimenov added a comment.
Removed IntrInaccessibleMemOnly.
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D107850/new/
https://reviews.llvm.org/D107850
Files:
llvm/include/llvm/IR/Intrinsics.td
llvm/lib/Target/X86/X86AsmPrinter.cpp
llvm/lib/Target/X86/X86AsmPrinter.h
llvm/lib/Target/X86/X86InstrCompiler.td
llvm/lib/Target/X86/X86MCInstLower.cpp
llvm/lib/Target/X86/X86RegisterInfo.td
llvm/test/CodeGen/X86/asan-check-memaccess-add.ll
llvm/test/CodeGen/X86/asan-check-memaccess-or.ll
Index: llvm/test/CodeGen/X86/asan-check-memaccess-or.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/asan-check-memaccess-or.ll
@@ -0,0 +1,234 @@
+; RUN: llc < %s | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @load1(i8* nocapture readonly %x) {
+; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
+; CHECK: callq __asan_check_store1_rn[[RN1]]
+; CHECK-NEXT: retq
+ call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 0,
+ i32 0, i32 3, i32 1)
+ call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 1,
+ i32 0, i32 3, i32 1)
+ ret void
+}
+
+define void @load2(i16* nocapture readonly %x) {
+; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
+; CHECK: callq __asan_check_store2_rn[[RN2]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i16* %x to i64
+ %2 = bitcast i16* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 1, i32 3, i32 1)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 1, i32 3, i32 1)
+ ret void
+}
+
+define void @load4(i32* nocapture readonly %x) {
+; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
+; CHECK: callq __asan_check_store4_rn[[RN4]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i32* %x to i64
+ %2 = bitcast i32* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 2, i32 3, i32 1)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 2, i32 3, i32 1)
+ ret void
+}
+define void @load8(i64* nocapture readonly %x) {
+; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
+; CHECK: callq __asan_check_store8_rn[[RN8]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i64* %x to i64
+ %2 = bitcast i64* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 3, i32 3, i32 1)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 3, i32 3, i32 1)
+ ret void
+}
+
+define void @load16(i128* nocapture readonly %x) {
+; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
+; CHECK: callq __asan_check_store16_rn[[RN16]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i128* %x to i64
+ %2 = bitcast i128* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 4, i32 3, i32 1)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 4, i32 3, i32 1)
+ ret void
+}
+
+; CHECK: __asan_check_load1_rn[[RN1]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load1
+
+; CHECK: __asan_check_load2_rn[[RN2]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $1, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load2
+
+; CHECK: __asan_check_load4_rn[[RN4]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $3, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load4
+
+; CHECK: __asan_check_load8_rn[[RN8]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: cmpb $0, (%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load8
+
+; CHECK: __asan_check_load16_rn[[RN16]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: cmpw $0, (%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load16
+
+; CHECK: __asan_check_store1_rn[[RN1]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store1
+
+; CHECK: __asan_check_store2_rn[[RN2]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $1, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store2
+
+; CHECK: __asan_check_store4_rn[[RN4]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8
+; CHECK-NEXT: movb (%r8), %r8b
+; CHECK-NEXT: testb %r8b, %r8b
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $3, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store4
+
+; CHECK: __asan_check_store8_rn[[RN8]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: cmpb $0, (%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store8
+
+; CHECK: __asan_check_store16_rn[[RN16]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: orq $2147450880, %r8{{.*}}
+; CHECK-NEXT: cmpw $0, (%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store16
+
+declare void @llvm.asan.check.memaccess(i8*, i64 immarg, i32 immarg,
+ i32 immarg, i32 immarg, i32 immarg)
Index: llvm/test/CodeGen/X86/asan-check-memaccess-add.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/asan-check-memaccess-add.ll
@@ -0,0 +1,224 @@
+; RUN: llc < %s | FileCheck %s
+
+target triple = "x86_64-unknown-linux-gnu"
+
+define void @load1(i8* nocapture readonly %x) {
+; CHECK: callq __asan_check_load1_rn[[RN1:.*]]
+; CHECK: callq __asan_check_store1_rn[[RN1]]
+; CHECK-NEXT: retq
+ call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 0,
+ i32 0, i32 3, i32 0)
+ call void @llvm.asan.check.memaccess(i8* %x, i64 2147450880, i32 1,
+ i32 0, i32 3, i32 0)
+ ret void
+}
+
+define void @load2(i16* nocapture readonly %x) {
+; CHECK: callq __asan_check_load2_rn[[RN2:.*]]
+; CHECK: callq __asan_check_store2_rn[[RN2]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i16* %x to i64
+ %2 = bitcast i16* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 1, i32 3, i32 0)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 1, i32 3, i32 0)
+ ret void
+}
+
+define void @load4(i32* nocapture readonly %x) {
+; CHECK: callq __asan_check_load4_rn[[RN4:.*]]
+; CHECK: callq __asan_check_store4_rn[[RN4]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i32* %x to i64
+ %2 = bitcast i32* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 2, i32 3, i32 0)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 2, i32 3, i32 0)
+ ret void
+}
+define void @load8(i64* nocapture readonly %x) {
+; CHECK: callq __asan_check_load8_rn[[RN8:.*]]
+; CHECK: callq __asan_check_store8_rn[[RN8]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i64* %x to i64
+ %2 = bitcast i64* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 3, i32 3, i32 0)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 3, i32 3, i32 0)
+ ret void
+}
+
+define void @load16(i128* nocapture readonly %x) {
+; CHECK: callq __asan_check_load16_rn[[RN16:.*]]
+; CHECK: callq __asan_check_store16_rn[[RN16]]
+; CHECK-NEXT: retq
+ %1 = ptrtoint i128* %x to i64
+ %2 = bitcast i128* %x to i8*
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 0,
+ i32 4, i32 3, i32 0)
+ call void @llvm.asan.check.memaccess(i8* %2, i64 2147450880, i32 1,
+ i32 4, i32 3, i32 0)
+ ret void
+}
+
+; CHECK: __asan_check_load1_rn[[RN1]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load1
+
+; CHECK: __asan_check_load2_rn[[RN2]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $1, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load2
+
+; CHECK: __asan_check_load4_rn[[RN4]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $3, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load4
+
+; CHECK: __asan_check_load8_rn[[RN8]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: cmpb $0, 2147450880(%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load8
+
+; CHECK: __asan_check_load16_rn[[RN16]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: cmpw $0, 2147450880(%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_load16
+
+; CHECK: __asan_check_store1_rn[[RN1]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store1
+
+; CHECK: __asan_check_store2_rn[[RN2]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $1, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store2
+
+; CHECK: __asan_check_store4_rn[[RN4]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: movsbl 2147450880(%r8), %r8d
+; CHECK-NEXT: testl %r8d, %r8d
+; CHECK-NEXT: jne [[EXTRA:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[EXTRA]]:
+; CHECK-NEXT: pushq %rcx
+; CHECK-NEXT: movq [[REG]], %rcx
+; CHECK-NEXT: andl $7, %ecx
+; CHECK-NEXT: addl $3, %ecx
+; CHECK-NEXT: cmpl %r8d, %ecx
+; CHECK-NEXT: popq %rcx
+; CHECK-NEXT: jl [[RET]]
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store4
+
+; CHECK: __asan_check_store8_rn[[RN8]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: cmpb $0, 2147450880(%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store8
+
+; CHECK: __asan_check_store16_rn[[RN16]]:
+; CHECK-NEXT: movq [[REG:.*]], %r8
+; CHECK-NEXT: shrq $3, %r8
+; CHECK-NEXT: cmpw $0, 2147450880(%r8)
+; CHECK-NEXT: jne [[FAIL:.*]]
+; CHECK-NEXT: [[RET:.*]]:
+; CHECK-NEXT: retq
+; CHECK-NEXT: [[FAIL]]:
+; CHECK-NEXT: movq [[REG:.*]], %rdi
+; CHECK-NEXT: jmp __asan_report_store16
+
+declare void @llvm.asan.check.memaccess(i8*, i64 immarg, i32 immarg,
+ i32 immarg, i32 immarg, i32 immarg)
Index: llvm/lib/Target/X86/X86RegisterInfo.td
===================================================================
--- llvm/lib/Target/X86/X86RegisterInfo.td
+++ llvm/lib/Target/X86/X86RegisterInfo.td
@@ -436,6 +436,12 @@
(add RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11,
RBX, R14, R15, R12, R13, RBP, RSP, RIP)>;
+// GR64 - 64-bit GPRs without R8 and RIP. Could be used when emitting code for
+// intrinsics, which use implict input registers.
+def GR64NoR8 : RegisterClass<"X86", [i64], 64,
+ (add RAX, RCX, RDX, RSI, RDI, R9, R10, R11,
+ RBX, R14, R15, R12, R13, RBP, RSP)>;
+
// Segment registers for use by MOV instructions (and others) that have a
// segment register as one operand. Always contain a 16-bit segment
// descriptor.
Index: llvm/lib/Target/X86/X86MCInstLower.cpp
===================================================================
--- llvm/lib/Target/X86/X86MCInstLower.cpp
+++ llvm/lib/Target/X86/X86MCInstLower.cpp
@@ -43,8 +43,10 @@
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerCommon.h"
using namespace llvm;
@@ -1323,6 +1325,243 @@
.addExpr(Op));
}
+void X86AsmPrinter::LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
+ // FIXME: Make this work on non-ELF.
+ if (!TM.getTargetTriple().isOSBinFormatELF()) {
+ report_fatal_error("llvm.asan.check.memaccess only supported on ELF");
+ return;
+ }
+
+ unsigned Reg = MI.getOperand(0).getReg().id();
+ bool IsWrite = MI.getOperand(1).getImm();
+ size_t AccessSizeIndex = MI.getOperand(1).getImm();
+
+ MCSymbol *&Sym = AsanMemaccessSymbols[{Reg, IsWrite, AccessSizeIndex}];
+ if (!Sym) {
+ std::string Name = IsWrite ? "store" : "load";
+ std::string SymName = "__asan_check_" + Name +
+ utostr(1 << AccessSizeIndex) + "_rn" + utostr(Reg);
+ Sym = OutContext.getOrCreateSymbol(SymName);
+ }
+
+ EmitAndCountInstruction(
+ MCInstBuilder(X86::CALL64pcrel32)
+ .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
+}
+
+void X86AsmPrinter::emitAsanMemaccessPartial(Module &M, unsigned Reg,
+ bool IsWrite,
+ size_t AccessSizeIndex,
+ MCSubtargetInfo &STI) {
+ assert(AccessSizeIndex == 0 || AccessSizeIndex == 1 || AccessSizeIndex == 2);
+ assert(Reg != X86::R8);
+
+ uint64_t ShadowBase;
+ int MappingScale;
+ bool OrShadowOffset;
+ getAddressSanitizerParams(
+ Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
+ /*FIXME*/ false, &ShadowBase, &MappingScale, &OrShadowOffset);
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
+ .addReg(X86::R8)
+ .addReg(X86::R8)
+ .addImm(MappingScale),
+ STI);
+ if (OrShadowOffset) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
+ .addReg(X86::R8)
+ .addReg(X86::R8)
+ .addImm(ShadowBase),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV8rm)
+ .addReg(X86::R8B)
+ .addReg(X86::R8)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(0)
+ .addReg(X86::NoRegister),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::TEST8rr).addReg(X86::R8B).addReg(X86::R8B), STI);
+ } else {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOVSX32rm8)
+ .addReg(X86::R8D)
+ .addReg(X86::R8)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(ShadowBase)
+ .addReg(X86::NoRegister),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::TEST32rr).addReg(X86::R8D).addReg(X86::R8D), STI);
+ }
+ MCSymbol *AdditionalCheck = OutContext.createTempSymbol();
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(AdditionalCheck, OutContext))
+ .addImm(X86::COND_NE),
+ STI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(ReturnSym);
+ OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
+
+ // Shadow byte is non-zero so we need to perform additional checks.
+ OutStreamer->emitLabel(AdditionalCheck);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::PUSH64r).addReg(X86::RCX),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RCX)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ const size_t Granularity = 1ULL << MappingScale;
+ OutStreamer->emitInstruction(MCInstBuilder(X86::AND32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(Granularity - 1),
+ STI);
+ if (AccessSizeIndex == 1) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(1),
+ STI);
+ } else if (AccessSizeIndex == 2) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::ADD32ri8)
+ .addReg(X86::ECX)
+ .addReg(X86::ECX)
+ .addImm(3),
+ STI);
+ }
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::CMP32rr).addReg(X86::ECX).addReg(X86::R8D).addImm(1),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::POP64r).addReg(X86::RCX),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext))
+ .addImm(X86::COND_L),
+ STI);
+
+ emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI);
+}
+
+void X86AsmPrinter::emitAsanMemaccessFull(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex,
+ MCSubtargetInfo &STI) {
+ assert(AccessSizeIndex == 3 || AccessSizeIndex == 4);
+ assert(Reg != X86::R8);
+
+ uint64_t ShadowBase;
+ int MappingScale;
+ bool OrShadowOffset;
+ getAddressSanitizerParams(
+ Triple(M.getTargetTriple()), M.getDataLayout().getPointerSizeInBits(),
+ /*FIXME*/ false, &ShadowBase, &MappingScale, &OrShadowOffset);
+
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::MOV64rr).addReg(X86::R8).addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(MCInstBuilder(X86::SHR64ri)
+ .addReg(X86::R8)
+ .addReg(X86::R8)
+ .addImm(MappingScale),
+ STI);
+ if (OrShadowOffset) {
+ OutStreamer->emitInstruction(MCInstBuilder(X86::OR64ri32)
+ .addReg(X86::R8)
+ .addReg(X86::R8)
+ .addImm(ShadowBase),
+ STI);
+ auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
+ OutStreamer->emitInstruction(MCInstBuilder(OpCode)
+ .addReg(X86::R8)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(0)
+ .addReg(X86::NoRegister)
+ .addImm(0),
+ STI);
+ } else {
+ auto OpCode = AccessSizeIndex == 3 ? X86::CMP8mi : X86::CMP16mi8;
+ OutStreamer->emitInstruction(MCInstBuilder(OpCode)
+ .addReg(X86::R8)
+ .addImm(1)
+ .addReg(X86::NoRegister)
+ .addImm(ShadowBase)
+ .addReg(X86::NoRegister)
+ .addImm(0),
+ STI);
+ }
+ MCSymbol *ReportCode = OutContext.createTempSymbol();
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JCC_1)
+ .addExpr(MCSymbolRefExpr::create(ReportCode, OutContext))
+ .addImm(X86::COND_NE),
+ STI);
+ MCSymbol *ReturnSym = OutContext.createTempSymbol();
+ OutStreamer->emitLabel(ReturnSym);
+ OutStreamer->emitInstruction(MCInstBuilder(getRetOpcode(*Subtarget)), STI);
+
+ OutStreamer->emitLabel(ReportCode);
+ emitAsanReportError(M, Reg, IsWrite, AccessSizeIndex, STI);
+}
+
+void X86AsmPrinter::emitAsanReportError(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex,
+ MCSubtargetInfo &STI) {
+ std::string Name = IsWrite ? "store" : "load";
+ MCSymbol *ReportError = OutContext.getOrCreateSymbol(
+ "__asan_report_" + Name + utostr(1 << AccessSizeIndex));
+ OutStreamer->emitInstruction(MCInstBuilder(X86::MOV64rr)
+ .addReg(X86::RDI)
+ .addReg(X86::NoRegister + Reg),
+ STI);
+ OutStreamer->emitInstruction(
+ MCInstBuilder(X86::JMP_1)
+ .addExpr(MCSymbolRefExpr::create(ReportError, OutContext)),
+ STI);
+}
+
+void X86AsmPrinter::emitAsanMemaccessSymbols(Module &M) {
+ if (AsanMemaccessSymbols.empty())
+ return;
+
+ const Triple &TT = TM.getTargetTriple();
+ assert(TT.isOSBinFormatELF());
+ std::unique_ptr<MCSubtargetInfo> STI(
+ TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
+ assert(STI && "Unable to create subtarget info");
+
+ for (auto &P : AsanMemaccessSymbols) {
+ MCSymbol *Sym = P.second;
+ OutStreamer->SwitchSection(OutContext.getELFSection(
+ ".text.hot", ELF::SHT_PROGBITS,
+ ELF::SHF_EXECINSTR | ELF::SHF_ALLOC | ELF::SHF_GROUP, 0, Sym->getName(),
+ /*IsComdat=*/true));
+
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
+ OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
+ OutStreamer->emitLabel(Sym);
+
+ unsigned Reg = std::get<0>(P.first);
+ bool IsWrite = std::get<1>(P.first);
+ size_t AccessSizeIndex = std::get<1>(P.first);
+
+ if (AccessSizeIndex < 3) {
+ emitAsanMemaccessPartial(M, Reg, IsWrite, AccessSizeIndex, *STI);
+ } else {
+ emitAsanMemaccessFull(M, Reg, IsWrite, AccessSizeIndex, *STI);
+ }
+ }
+}
+
void X86AsmPrinter::LowerPATCHABLE_OP(const MachineInstr &MI,
X86MCInstLower &MCIL) {
// PATCHABLE_OP minsize, opcode, operands
@@ -2563,6 +2802,9 @@
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
return;
+ case X86::ASAN_CHECK_MEMACCESS:
+ return LowerASAN_CHECK_MEMACCESS(*MI);
+
case X86::MORESTACK_RET_RESTORE_R10:
// Return, then restore R10.
EmitAndCountInstruction(MCInstBuilder(getRetOpcode(*Subtarget)));
Index: llvm/lib/Target/X86/X86InstrCompiler.td
===================================================================
--- llvm/lib/Target/X86/X86InstrCompiler.td
+++ llvm/lib/Target/X86/X86InstrCompiler.td
@@ -260,6 +260,17 @@
"#SEH_Epilogue", []>;
}
+//===----------------------------------------------------------------------===//
+// Pseudo instructions used by address sanitizer.
+//===----------------------------------------------------------------------===//
+let
+ Defs = [R8, EFLAGS] in {
+def ASAN_CHECK_MEMACCESS : PseudoI<
+ (outs), (ins GR64NoR8:$addr, i8imm:$iswrite, i8imm:$accesssizeindex),
+ [(int_asan_check_memaccess GR64NoR8:$addr, (i8 timm:$iswrite),
+ (i8 timm:$accesssizeindex))]>, Sched<[]>;
+}
+
//===----------------------------------------------------------------------===//
// Pseudo instructions used by segmented stacks.
//
@@ -960,7 +971,7 @@
!strconcat(mnemonic, "{l}\t{$val, $ptr|$ptr, $val}"),
[(set
GR32:$dst,
- (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
+ (!cast<PatFrag>(frag # "_32") addr:$ptr, GR32:$val))]>,
OpSize32;
def NAME#64 : RI<opc, MRMSrcMem, (outs GR64:$dst),
(ins GR64:$val, i64mem:$ptr),
Index: llvm/lib/Target/X86/X86AsmPrinter.h
===================================================================
--- llvm/lib/Target/X86/X86AsmPrinter.h
+++ llvm/lib/Target/X86/X86AsmPrinter.h
@@ -98,6 +98,21 @@
void LowerFENTRY_CALL(const MachineInstr &MI, X86MCInstLower &MCIL);
+ // Address sanitizer specific lowering for X86.
+ void LowerASAN_CHECK_MEMACCESS(const MachineInstr &MI);
+ void emitAsanMemaccessSymbols(Module &M);
+ void emitAsanMemaccessPartial(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex, MCSubtargetInfo &STI);
+ void emitAsanMemaccessFull(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex, MCSubtargetInfo &STI);
+ void emitAsanReportError(Module &M, unsigned Reg, bool IsWrite,
+ size_t AccessSizeIndex, MCSubtargetInfo &STI);
+
+ typedef std::tuple<unsigned /*Reg*/, uint32_t /*IsWrite*/,
+ uint32_t /*AccessSizeIndex*/>
+ AsanMemaccessTuple;
+ std::map<AsanMemaccessTuple, MCSymbol *> AsanMemaccessSymbols;
+
// Choose between emitting .seh_ directives and .cv_fpo_ directives.
void EmitSEHInstruction(const MachineInstr *MI);
Index: llvm/lib/Target/X86/X86AsmPrinter.cpp
===================================================================
--- llvm/lib/Target/X86/X86AsmPrinter.cpp
+++ llvm/lib/Target/X86/X86AsmPrinter.cpp
@@ -753,6 +753,8 @@
void X86AsmPrinter::emitEndOfAsmFile(Module &M) {
const Triple &TT = TM.getTargetTriple();
+ emitAsanMemaccessSymbols(M);
+
if (TT.isOSBinFormatMachO()) {
// Mach-O uses non-lazy symbol stubs to encode per-TU information into
// global table for symbol lookup.
Index: llvm/include/llvm/IR/Intrinsics.td
===================================================================
--- llvm/include/llvm/IR/Intrinsics.td
+++ llvm/include/llvm/IR/Intrinsics.td
@@ -1339,8 +1339,8 @@
def int_sideeffect : DefaultAttrsIntrinsic<[], [], [IntrInaccessibleMemOnly, IntrWillReturn]>;
// The pseudoprobe intrinsic works as a place holder to the block it probes.
-// Like the sideeffect intrinsic defined above, this intrinsic is treated by the
-// optimizer as having opaque side effects so that it won't be get rid of or moved
+// Like the sideeffect intrinsic defined above, this intrinsic is treated by the
+// optimizer as having opaque side effects so that it won't be get rid of or moved
// out of the block it probes.
def int_pseudoprobe : Intrinsic<[], [llvm_i64_ty, llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
[IntrInaccessibleMemOnly, IntrWillReturn]>;
@@ -1637,6 +1637,10 @@
def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
+def int_asan_check_memaccess :
+ Intrinsic<[],[llvm_ptr_ty, llvm_i8_ty, llvm_i8_ty],
+ [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
def int_hwasan_check_memaccess :
Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
[ImmArg<ArgIndex<2>>]>;
_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits