mirror of
https://github.com/llvm/llvm-project.git
synced 2025-05-02 20:26:05 +00:00
[SSP] [3/3] cmpxchg and addrspacecast instructions can now
trigger stack protectors. Fixes PR42238. Add test coverage for llvm.memset, as proxy for all llvm.mem* intrinsics. There are two issues here: (1) they could be lowered to a libc call, which could be intercepted, and do Bad Stuff; (2) with a non-constant size, they could overwrite the current stack frame. The test was mostly written by Matt Arsenault in r363169, which was later reverted; I tweaked what he had and added the llvm.memset part. Differential Revision: https://reviews.llvm.org/D67845 llvm-svn: 373220
This commit is contained in:
parent
527815f5b0
commit
ed1f3f36ae
@ -164,12 +164,19 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
|
||||
if (AI == cast<StoreInst>(I)->getValueOperand())
|
||||
return true;
|
||||
break;
|
||||
case Instruction::AtomicCmpXchg:
|
||||
// cmpxchg conceptually includes both a load and store from the same
|
||||
// location. So, like store, the value being stored is what matters.
|
||||
if (AI == cast<AtomicCmpXchgInst>(I)->getNewValOperand())
|
||||
return true;
|
||||
break;
|
||||
case Instruction::PtrToInt:
|
||||
if (AI == cast<PtrToIntInst>(I)->getOperand(0))
|
||||
return true;
|
||||
break;
|
||||
case Instruction::Call: {
|
||||
// Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
|
||||
// Ignore intrinsics that do not become real instructions.
|
||||
// TODO: Narrow this to intrinsics that have store-like effects.
|
||||
const auto *CI = cast<CallInst>(I);
|
||||
if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
|
||||
return true;
|
||||
@ -180,6 +187,7 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
|
||||
case Instruction::BitCast:
|
||||
case Instruction::GetElementPtr:
|
||||
case Instruction::Select:
|
||||
case Instruction::AddrSpaceCast:
|
||||
if (HasAddressTaken(I))
|
||||
return true;
|
||||
break;
|
||||
@ -192,8 +200,19 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) {
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
case Instruction::Load:
|
||||
case Instruction::AtomicRMW:
|
||||
case Instruction::Ret:
|
||||
// These instructions take an address operand, but have load-like or
|
||||
// other innocuous behavior that should not trigger a stack protector.
|
||||
// atomicrmw conceptually has both load and store semantics, but the
|
||||
// value being stored must be integer; so if a pointer is being stored,
|
||||
// we'll catch it in the PtrToInt case above.
|
||||
break;
|
||||
default:
|
||||
// Conservatively return true for any instruction that takes an address
|
||||
// operand, but is not handled above.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
|
165
llvm/test/CodeGen/X86/stack-protector-2.ll
Normal file
165
llvm/test/CodeGen/X86/stack-protector-2.ll
Normal file
@ -0,0 +1,165 @@
|
||||
; RUN: llc -mtriple=x86_64-pc-linux-gnu -start-before=stack-protector -stop-after=stack-protector -o - < %s | FileCheck %s
|
||||
; Bugs 42238/43308: Test some additional situations not caught previously.
|
||||
|
||||
define void @store_captures() #0 {
|
||||
; CHECK-LABEL: @store_captures(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
||||
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
||||
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
||||
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
||||
; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
|
||||
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
|
||||
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
||||
; CHECK: SP_return:
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK: CallStackCheckFailBlk:
|
||||
; CHECK-NEXT: call void @__stack_chk_fail()
|
||||
; CHECK-NEXT: unreachable
|
||||
;
|
||||
entry:
|
||||
%retval = alloca i32, align 4
|
||||
%a = alloca i32, align 4
|
||||
%j = alloca i32*, align 8
|
||||
store i32 0, i32* %retval
|
||||
%load = load i32, i32* %a, align 4
|
||||
%add = add nsw i32 %load, 1
|
||||
store i32 %add, i32* %a, align 4
|
||||
store i32* %a, i32** %j, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
define i32* @non_captures() #0 {
|
||||
; load, atomicrmw, and ret do not trigger a stack protector.
|
||||
; CHECK-LABEL: @non_captures(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[ATOM:%.*]] = atomicrmw add i32* [[A]], i32 1 seq_cst
|
||||
; CHECK-NEXT: ret i32* [[A]]
|
||||
;
|
||||
entry:
|
||||
%a = alloca i32, align 4
|
||||
%load = load i32, i32* %a, align 4
|
||||
%atom = atomicrmw add i32* %a, i32 1 seq_cst
|
||||
ret i32* %a
|
||||
}
|
||||
|
||||
define void @store_addrspacecast_captures() #0 {
|
||||
; CHECK-LABEL: @store_addrspacecast_captures(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
||||
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
||||
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
|
||||
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
|
||||
; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
|
||||
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
|
||||
; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
||||
; CHECK: SP_return:
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK: CallStackCheckFailBlk:
|
||||
; CHECK-NEXT: call void @__stack_chk_fail()
|
||||
; CHECK-NEXT: unreachable
|
||||
;
|
||||
entry:
|
||||
%retval = alloca i32, align 4
|
||||
%a = alloca i32, align 4
|
||||
%j = alloca i32 addrspace(1)*, align 8
|
||||
store i32 0, i32* %retval
|
||||
%load = load i32, i32* %a, align 4
|
||||
%add = add nsw i32 %load, 1
|
||||
store i32 %add, i32* %a, align 4
|
||||
%a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
|
||||
store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @cmpxchg_captures() #0 {
|
||||
; CHECK-LABEL: @cmpxchg_captures(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
||||
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
||||
; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
|
||||
; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
|
||||
; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
|
||||
; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
|
||||
; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* null, i32* [[A]] seq_cst monotonic
|
||||
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
|
||||
; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
||||
; CHECK: SP_return:
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK: CallStackCheckFailBlk:
|
||||
; CHECK-NEXT: call void @__stack_chk_fail()
|
||||
; CHECK-NEXT: unreachable
|
||||
;
|
||||
entry:
|
||||
%retval = alloca i32, align 4
|
||||
%a = alloca i32, align 4
|
||||
%j = alloca i32*, align 8
|
||||
store i32 0, i32* %retval
|
||||
%load = load i32, i32* %a, align 4
|
||||
%add = add nsw i32 %load, 1
|
||||
store i32 %add, i32* %a, align 4
|
||||
|
||||
cmpxchg i32** %j, i32* null, i32* %a seq_cst monotonic
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @memset_captures(i64 %c) #0 {
|
||||
; CHECK-LABEL: @memset_captures(
|
||||
; CHECK-NEXT: entry:
|
||||
; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
|
||||
; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
|
||||
; CHECK-NEXT: [[CADDR:%.*]] = alloca i64, align 8
|
||||
; CHECK-NEXT: store i64 %c, i64* [[CADDR]], align 8
|
||||
; CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
|
||||
; CHECK-NEXT: [[IPTR:%.*]] = bitcast i32* [[I]] to i8*
|
||||
; CHECK-NEXT: [[COUNT:%.*]] = load i64, i64* [[CADDR]], align 8
|
||||
; CHECK-NEXT: call void @llvm.memset.p0i8.i64(i8* align 4 [[IPTR]], i8 0, i64 [[COUNT]], i1 false)
|
||||
; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
|
||||
; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
|
||||
; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
|
||||
; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
|
||||
; CHECK: SP_return:
|
||||
; CHECK-NEXT: ret void
|
||||
; CHECK: CallStackCheckFailBlk:
|
||||
; CHECK-NEXT: call void @__stack_chk_fail()
|
||||
; CHECK-NEXT: unreachable
|
||||
;
|
||||
entry:
|
||||
%c.addr = alloca i64, align 8
|
||||
store i64 %c, i64* %c.addr, align 8
|
||||
%i = alloca i32, align 4
|
||||
%i.ptr = bitcast i32* %i to i8*
|
||||
%count = load i64, i64* %c.addr, align 8
|
||||
call void @llvm.memset.p0i8.i64(i8* align 4 %i.ptr, i8 0, i64 %count, i1 false)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg)
|
||||
|
||||
attributes #0 = { sspstrong }
|
Loading…
x
Reference in New Issue
Block a user