mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-25 12:36:10 +00:00
[SystemZ] Add support for __builtin_setjmp and __builtin_longjmp (#116642)
Implementation for __builtin_setjmp and __builtin_longjmp for SystemZ.
This commit is contained in:
parent
01d8e0fc75
commit
030bbc92a7
@ -247,6 +247,8 @@ public:
|
||||
return RegNo < 4 ? 6 + RegNo : -1;
|
||||
}
|
||||
|
||||
bool hasSjLjLowering() const override { return true; }
|
||||
|
||||
std::pair<unsigned, unsigned> hardwareInterferenceSizes() const override {
|
||||
return std::make_pair(256, 256);
|
||||
}
|
||||
|
@ -4860,6 +4860,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
|
||||
// Buffer is a void**.
|
||||
Address Buf = EmitPointerWithAlignment(E->getArg(0));
|
||||
|
||||
if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
|
||||
// On this target, the back end fills in the context buffer completely.
|
||||
// It doesn't really matter if the frontend stores to the buffer before
|
||||
// calling setjmp, the back-end is going to overwrite them anyway.
|
||||
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
|
||||
return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
|
||||
}
|
||||
|
||||
// Store the frame pointer to the setjmp buffer.
|
||||
Value *FrameAddr = Builder.CreateCall(
|
||||
CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
|
||||
|
25
clang/test/CodeGen/SystemZ/builtin-setjmp-logjmp.c
Normal file
25
clang/test/CodeGen/SystemZ/builtin-setjmp-logjmp.c
Normal file
@ -0,0 +1,25 @@
|
||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5
|
||||
// RUN: %clang --target=s390x-linux -S -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
void *buf[20];
|
||||
// CHECK-LABEL: define dso_local void @foo(
|
||||
// CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
|
||||
// CHECK-NEXT: [[ENTRY:.*:]]
|
||||
// CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.eh.sjlj.setjmp(ptr @buf)
|
||||
// CHECK-NEXT: ret void
|
||||
//
|
||||
void foo()
|
||||
{
|
||||
__builtin_setjmp (buf);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: define dso_local void @foo1(
|
||||
// CHECK-SAME: ) #[[ATTR0]] {
|
||||
// CHECK-NEXT: [[ENTRY:.*:]]
|
||||
// CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(ptr @buf)
|
||||
// CHECK-NEXT: unreachable
|
||||
//
|
||||
void foo1()
|
||||
{
|
||||
__builtin_longjmp (buf, 1);
|
||||
}
|
@ -374,10 +374,11 @@ overall functioning of this intrinsic is compatible with the GCC
|
||||
to interoperate.
|
||||
|
||||
The single parameter is a pointer to a five word buffer in which the calling
|
||||
context is saved. The front end places the frame pointer in the first word, and
|
||||
the target implementation of this intrinsic should place the destination address
|
||||
for a `llvm.eh.sjlj.longjmp`_ in the second word. The following three words are
|
||||
available for use in a target-specific manner.
|
||||
context is saved. The format and contents of the buffer are target-specific.
|
||||
On certain targets (ARM, PowerPC, VE, X86), the front end places the
|
||||
frame pointer in the first word and the stack pointer in the third word,
|
||||
while the target implementation of this intrinsic fills in the remaining
|
||||
words. On other targets (SystemZ), saving the calling context to the buffer is left completely to the target implementation.
|
||||
|
||||
.. _llvm.eh.sjlj.longjmp:
|
||||
|
||||
|
@ -705,6 +705,11 @@ void SystemZAsmPrinter::emitInstruction(const MachineInstr *MI) {
|
||||
return;
|
||||
}
|
||||
|
||||
// EH_SjLj_Setup is a dummy terminator instruction of size 0.
|
||||
// It is used to handle the clobber register for builtin setjmp.
|
||||
case SystemZ::EH_SjLj_Setup:
|
||||
return;
|
||||
|
||||
default:
|
||||
Lower.lower(MI, LoweredMI);
|
||||
break;
|
||||
|
@ -751,6 +751,11 @@ SystemZTargetLowering::SystemZTargetLowering(const TargetMachine &TM,
|
||||
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
|
||||
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
|
||||
|
||||
// We're not using SJLJ for exception handling, but they're implemented
|
||||
// solely to support use of __builtin_setjmp / __builtin_longjmp.
|
||||
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
|
||||
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
|
||||
|
||||
// We want to use MVC in preference to even a single load/store pair.
|
||||
MaxStoresPerMemcpy = Subtarget.hasVector() ? 2 : 0;
|
||||
MaxStoresPerMemcpyOptSize = 0;
|
||||
@ -940,6 +945,240 @@ bool SystemZTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
|
||||
return SystemZVectorConstantInfo(Imm).isVectorConstantLegal(Subtarget);
|
||||
}
|
||||
|
||||
MachineBasicBlock *
|
||||
SystemZTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
DebugLoc DL = MI.getDebugLoc();
|
||||
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
||||
const SystemZRegisterInfo *TRI = Subtarget.getRegisterInfo();
|
||||
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
|
||||
const BasicBlock *BB = MBB->getBasicBlock();
|
||||
MachineFunction::iterator I = ++MBB->getIterator();
|
||||
|
||||
Register DstReg = MI.getOperand(0).getReg();
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
|
||||
assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
|
||||
Register mainDstReg = MRI.createVirtualRegister(RC);
|
||||
Register restoreDstReg = MRI.createVirtualRegister(RC);
|
||||
|
||||
MVT PVT = getPointerTy(MF->getDataLayout());
|
||||
assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
|
||||
// For v = setjmp(buf), we generate.
|
||||
// Algorithm:
|
||||
//
|
||||
// ---------
|
||||
// | thisMBB |
|
||||
// ---------
|
||||
// |
|
||||
// ------------------------
|
||||
// | |
|
||||
// ---------- ---------------
|
||||
// | mainMBB | | restoreMBB |
|
||||
// | v = 0 | | v = 1 |
|
||||
// ---------- ---------------
|
||||
// | |
|
||||
// -------------------------
|
||||
// |
|
||||
// -----------------------------
|
||||
// | sinkMBB |
|
||||
// | phi(v_mainMBB,v_restoreMBB) |
|
||||
// -----------------------------
|
||||
// thisMBB:
|
||||
// buf[FPOffset] = Frame Pointer if hasFP.
|
||||
// buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB.
|
||||
// buf[BCOffset] = Backchain value if building with -mbackchain.
|
||||
// buf[SPOffset] = Stack Pointer.
|
||||
// buf[LPOffset] = We never write this slot with R13, gcc stores R13 always.
|
||||
// SjLjSetup restoreMBB
|
||||
// mainMBB:
|
||||
// v_main = 0
|
||||
// sinkMBB:
|
||||
// v = phi(v_main, v_restore)
|
||||
// restoreMBB:
|
||||
// v_restore = 1
|
||||
|
||||
MachineBasicBlock *thisMBB = MBB;
|
||||
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
|
||||
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
|
||||
MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
|
||||
|
||||
MF->insert(I, mainMBB);
|
||||
MF->insert(I, sinkMBB);
|
||||
MF->push_back(restoreMBB);
|
||||
restoreMBB->setMachineBlockAddressTaken();
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), MBB,
|
||||
std::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
||||
|
||||
// thisMBB:
|
||||
const int64_t FPOffset = 0; // Slot 1.
|
||||
const int64_t LabelOffset = 1 * PVT.getStoreSize(); // Slot 2.
|
||||
const int64_t BCOffset = 2 * PVT.getStoreSize(); // Slot 3.
|
||||
const int64_t SPOffset = 3 * PVT.getStoreSize(); // Slot 4.
|
||||
|
||||
// Buf address.
|
||||
Register BufReg = MI.getOperand(1).getReg();
|
||||
|
||||
const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
|
||||
unsigned LabelReg = MRI.createVirtualRegister(PtrRC);
|
||||
|
||||
// Prepare IP for longjmp.
|
||||
BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::LARL), LabelReg)
|
||||
.addMBB(restoreMBB);
|
||||
// Store IP for return from jmp, slot 2, offset = 1.
|
||||
BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::STG))
|
||||
.addReg(LabelReg)
|
||||
.addReg(BufReg)
|
||||
.addImm(LabelOffset)
|
||||
.addReg(0);
|
||||
|
||||
auto *SpecialRegs = Subtarget.getSpecialRegisters();
|
||||
bool HasFP = Subtarget.getFrameLowering()->hasFP(*MF);
|
||||
if (HasFP) {
|
||||
BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::STG))
|
||||
.addReg(SpecialRegs->getFramePointerRegister())
|
||||
.addReg(BufReg)
|
||||
.addImm(FPOffset)
|
||||
.addReg(0);
|
||||
}
|
||||
|
||||
// Store SP.
|
||||
BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::STG))
|
||||
.addReg(SpecialRegs->getStackPointerRegister())
|
||||
.addReg(BufReg)
|
||||
.addImm(SPOffset)
|
||||
.addReg(0);
|
||||
|
||||
// Slot 3(Offset = 2) Backchain value (if building with -mbackchain).
|
||||
bool BackChain = MF->getSubtarget<SystemZSubtarget>().hasBackChain();
|
||||
if (BackChain) {
|
||||
Register BCReg = MRI.createVirtualRegister(RC);
|
||||
auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
|
||||
MIB = BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::LG), BCReg)
|
||||
.addReg(SpecialRegs->getStackPointerRegister())
|
||||
.addImm(TFL->getBackchainOffset(*MF))
|
||||
.addReg(0);
|
||||
|
||||
BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::STG))
|
||||
.addReg(BCReg)
|
||||
.addReg(BufReg)
|
||||
.addImm(BCOffset)
|
||||
.addReg(0);
|
||||
}
|
||||
|
||||
// Setup.
|
||||
MIB = BuildMI(*thisMBB, MI, DL, TII->get(SystemZ::EH_SjLj_Setup))
|
||||
.addMBB(restoreMBB);
|
||||
|
||||
const SystemZRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
|
||||
MIB.addRegMask(RegInfo->getNoPreservedMask());
|
||||
|
||||
thisMBB->addSuccessor(mainMBB);
|
||||
thisMBB->addSuccessor(restoreMBB);
|
||||
|
||||
// mainMBB:
|
||||
BuildMI(mainMBB, DL, TII->get(SystemZ::LHI), mainDstReg).addImm(0);
|
||||
mainMBB->addSuccessor(sinkMBB);
|
||||
|
||||
// sinkMBB:
|
||||
BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(SystemZ::PHI), DstReg)
|
||||
.addReg(mainDstReg)
|
||||
.addMBB(mainMBB)
|
||||
.addReg(restoreDstReg)
|
||||
.addMBB(restoreMBB);
|
||||
|
||||
// restoreMBB.
|
||||
BuildMI(restoreMBB, DL, TII->get(SystemZ::LHI), restoreDstReg).addImm(1);
|
||||
BuildMI(restoreMBB, DL, TII->get(SystemZ::J)).addMBB(sinkMBB);
|
||||
restoreMBB->addSuccessor(sinkMBB);
|
||||
|
||||
MI.eraseFromParent();
|
||||
|
||||
return sinkMBB;
|
||||
}
|
||||
|
||||
MachineBasicBlock *
|
||||
SystemZTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
|
||||
DebugLoc DL = MI.getDebugLoc();
|
||||
const TargetInstrInfo *TII = Subtarget.getInstrInfo();
|
||||
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
|
||||
MVT PVT = getPointerTy(MF->getDataLayout());
|
||||
assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
|
||||
Register BufReg = MI.getOperand(0).getReg();
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(BufReg);
|
||||
auto *SpecialRegs = Subtarget.getSpecialRegisters();
|
||||
|
||||
Register Tmp = MRI.createVirtualRegister(RC);
|
||||
Register BCReg = MRI.createVirtualRegister(RC);
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
|
||||
const int64_t FPOffset = 0;
|
||||
const int64_t LabelOffset = 1 * PVT.getStoreSize();
|
||||
const int64_t BCOffset = 2 * PVT.getStoreSize();
|
||||
const int64_t SPOffset = 3 * PVT.getStoreSize();
|
||||
const int64_t LPOffset = 4 * PVT.getStoreSize();
|
||||
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::LG), Tmp)
|
||||
.addReg(BufReg)
|
||||
.addImm(LabelOffset)
|
||||
.addReg(0);
|
||||
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::LG),
|
||||
SpecialRegs->getFramePointerRegister())
|
||||
.addReg(BufReg)
|
||||
.addImm(FPOffset)
|
||||
.addReg(0);
|
||||
|
||||
// We are restoring R13 even though we never stored in setjmp from llvm,
|
||||
// as gcc always stores R13 in builtin_setjmp. We could have mixed code
|
||||
// gcc setjmp and llvm longjmp.
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::LG), SystemZ::R13D)
|
||||
.addReg(BufReg)
|
||||
.addImm(LPOffset)
|
||||
.addReg(0);
|
||||
|
||||
bool BackChain = MF->getSubtarget<SystemZSubtarget>().hasBackChain();
|
||||
if (BackChain) {
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::LG), BCReg)
|
||||
.addReg(BufReg)
|
||||
.addImm(BCOffset)
|
||||
.addReg(0);
|
||||
}
|
||||
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::LG),
|
||||
SpecialRegs->getStackPointerRegister())
|
||||
.addReg(BufReg)
|
||||
.addImm(SPOffset)
|
||||
.addReg(0);
|
||||
|
||||
if (BackChain) {
|
||||
auto *TFL = Subtarget.getFrameLowering<SystemZFrameLowering>();
|
||||
BuildMI(*MBB, MI, DL, TII->get(SystemZ::STG))
|
||||
.addReg(BCReg)
|
||||
.addReg(SpecialRegs->getStackPointerRegister())
|
||||
.addImm(TFL->getBackchainOffset(*MF))
|
||||
.addReg(0);
|
||||
}
|
||||
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(SystemZ::BR)).addReg(Tmp);
|
||||
|
||||
MI.eraseFromParent();
|
||||
return MBB;
|
||||
}
|
||||
|
||||
/// Returns true if stack probing through inline assembly is requested.
|
||||
bool SystemZTargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
|
||||
// If the function specifically requests inline stack probes, emit them.
|
||||
@ -6292,6 +6531,14 @@ SDValue SystemZTargetLowering::LowerOperation(SDValue Op,
|
||||
return lowerGET_ROUNDING(Op, DAG);
|
||||
case ISD::READCYCLECOUNTER:
|
||||
return lowerREADCYCLECOUNTER(Op, DAG);
|
||||
case ISD::EH_SJLJ_SETJMP:
|
||||
case ISD::EH_SJLJ_LONGJMP:
|
||||
// These operations are legal on our platform, but we cannot actually
|
||||
// set the operation action to Legal as common code would treat this
|
||||
// as equivalent to Expand. Instead, we keep the operation action to
|
||||
// Custom and just leave them unchanged here.
|
||||
return Op;
|
||||
|
||||
default:
|
||||
llvm_unreachable("Unexpected node to lower");
|
||||
}
|
||||
@ -9733,6 +9980,10 @@ MachineBasicBlock *SystemZTargetLowering::EmitInstrWithCustomInserter(
|
||||
|
||||
case SystemZ::PROBED_ALLOCA:
|
||||
return emitProbedAlloca(MI, MBB);
|
||||
case SystemZ::EH_SjLj_SetJmp:
|
||||
return emitEHSjLjSetJmp(MI, MBB);
|
||||
case SystemZ::EH_SjLj_LongJmp:
|
||||
return emitEHSjLjLongJmp(MI, MBB);
|
||||
|
||||
case TargetOpcode::STACKMAP:
|
||||
case TargetOpcode::PATCHPOINT:
|
||||
|
@ -476,6 +476,12 @@ public:
|
||||
// LD, and having the full constant in memory enables reg/mem opcodes.
|
||||
return VT != MVT::f64;
|
||||
}
|
||||
MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
|
||||
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
|
||||
bool hasInlineStackProbe(const MachineFunction &MF) const override;
|
||||
AtomicExpansionKind shouldCastAtomicLoadInIR(LoadInst *LI) const override;
|
||||
AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override;
|
||||
|
@ -1862,6 +1862,23 @@ let mayLoad = 1, mayStore = 1, Defs = [CC] in {
|
||||
}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------
|
||||
// Setjmp/Longjmp.
|
||||
//--------------------------------------------------------------------------
|
||||
let isBarrier = 1, hasNoSchedulingInfo = 1 in {
|
||||
let hasSideEffects = 1, usesCustomInserter = 1 in {
|
||||
def EH_SjLj_SetJmp : Pseudo<(outs GR32:$dst), (ins ADDR64:$R2),
|
||||
[(set GR32:$dst, (z_eh_sjlj_setjmp ADDR64:$R2))]>;
|
||||
let isTerminator = 1 in {
|
||||
def EH_SjLj_LongJmp : Pseudo<(outs), (ins ADDR64:$R2),
|
||||
[(z_eh_sjlj_longjmp ADDR64:$R2)]>;
|
||||
}
|
||||
}
|
||||
let isTerminator = 1, isCodeGenOnly = 1, Size = 0 in {
|
||||
def EH_SjLj_Setup : Pseudo<(outs), (ins brtarget32:$dst), []>;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Message-security assist
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -220,7 +220,10 @@ static unsigned getInstSizeInBytes(const MachineInstr &MI,
|
||||
MI.isImplicitDef() || MI.getOpcode() == TargetOpcode::MEMBARRIER ||
|
||||
// These have a size that may be zero:
|
||||
MI.isInlineAsm() || MI.getOpcode() == SystemZ::STACKMAP ||
|
||||
MI.getOpcode() == SystemZ::PATCHPOINT) &&
|
||||
MI.getOpcode() == SystemZ::PATCHPOINT ||
|
||||
// EH_SjLj_Setup is a dummy terminator instruction of size 0,
|
||||
// It is used to handle the clobber register for builtin setjmp.
|
||||
MI.getOpcode() == SystemZ::EH_SjLj_Setup) &&
|
||||
"Missing size value for instruction.");
|
||||
return Size;
|
||||
}
|
||||
|
@ -238,6 +238,12 @@ def SDT_ZTest : SDTypeProfile<1, 2,
|
||||
[SDTCisVT<0, i32>,
|
||||
SDTCisVT<2, i64>]>;
|
||||
|
||||
def SDT_ZSetJmp : SDTypeProfile<1, 1,
|
||||
[SDTCisInt<0>,
|
||||
SDTCisPtrTy<1>]>;
|
||||
def SDT_ZLongJmp : SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Node definitions
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -314,6 +320,12 @@ def z_stckf : SDNode<"SystemZISD::STCKF", SDT_ZStoreInherent,
|
||||
|
||||
def z_tdc : SDNode<"SystemZISD::TDC", SDT_ZTest>;
|
||||
|
||||
def z_eh_sjlj_setjmp : SDNode<"ISD::EH_SJLJ_SETJMP", SDT_ZSetJmp,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
def z_eh_sjlj_longjmp : SDNode<"ISD::EH_SJLJ_LONGJMP", SDT_ZLongJmp,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
|
||||
// Defined because the index is an i32 rather than a pointer.
|
||||
def z_vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
|
||||
SDT_ZInsertVectorElt>;
|
||||
|
@ -254,6 +254,10 @@ SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
|
||||
return Regs->getCallPreservedMask(MF, CC);
|
||||
}
|
||||
|
||||
const uint32_t *SystemZRegisterInfo::getNoPreservedMask() const {
|
||||
return CSR_SystemZ_NoRegs_RegMask;
|
||||
}
|
||||
|
||||
BitVector
|
||||
SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
||||
BitVector Reserved(getNumRegs());
|
||||
|
@ -161,6 +161,7 @@ public:
|
||||
const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
|
||||
const uint32_t *getCallPreservedMask(const MachineFunction &MF,
|
||||
CallingConv::ID CC) const override;
|
||||
const uint32_t *getNoPreservedMask() const override;
|
||||
BitVector getReservedRegs(const MachineFunction &MF) const override;
|
||||
bool eliminateFrameIndex(MachineBasicBlock::iterator MI,
|
||||
int SPAdj, unsigned FIOperandNum,
|
||||
|
49
llvm/test/CodeGen/SystemZ/builtin-longjmp.ll
Normal file
49
llvm/test/CodeGen/SystemZ/builtin-longjmp.ll
Normal file
@ -0,0 +1,49 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
||||
; Test longjmp load from jmp_buf.
|
||||
; Frame pointer from Slot 1.
|
||||
; Jump address from Slot 2.
|
||||
; Backchain Value from Slot 3.
|
||||
; Stack Pointer from Slot 4.
|
||||
; Literal Pool Pointer from Slot 5.
|
||||
|
||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -O2 | FileCheck %s
|
||||
|
||||
@buf = global [20 x ptr] zeroinitializer, align 8
|
||||
|
||||
define void @foo() {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r11, %r15, 88(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: lg %r2, 8(%r1)
|
||||
; CHECK-NEXT: lg %r11, 0(%r1)
|
||||
; CHECK-NEXT: lg %r13, 32(%r1)
|
||||
; CHECK-NEXT: lg %r15, 24(%r1)
|
||||
; CHECK-NEXT: br %r2
|
||||
entry:
|
||||
tail call void @llvm.eh.sjlj.longjmp(ptr nonnull @buf)
|
||||
unreachable
|
||||
}
|
||||
|
||||
define void @bar() "backchain" {
|
||||
; CHECK-LABEL: bar:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r11, %r15, 88(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: lg %r2, 8(%r1)
|
||||
; CHECK-NEXT: lg %r11, 0(%r1)
|
||||
; CHECK-NEXT: lg %r13, 32(%r1)
|
||||
; CHECK-NEXT: lg %r3, 16(%r1)
|
||||
; CHECK-NEXT: lg %r15, 24(%r1)
|
||||
; CHECK-NEXT: stg %r3, 0(%r15)
|
||||
; CHECK-NEXT: br %r2
|
||||
entry:
|
||||
tail call void @llvm.eh.sjlj.longjmp(ptr nonnull @buf)
|
||||
unreachable
|
||||
}
|
156
llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
Normal file
156
llvm/test/CodeGen/SystemZ/builtin-setjmp-alloca.ll
Normal file
@ -0,0 +1,156 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
||||
; Test setjmp store to jmp_buf when frame pointer is used and saved
|
||||
; because of variable size alloca.
|
||||
; Frame Pointer is stored in slot 1.
|
||||
; Return address in slot 2.
|
||||
; Backchain value is stored in slot 3 for -mbackchain option.
|
||||
; Stack Pointer in slot 4.
|
||||
; Clobber %r6-%r15, %f8-%f15.
|
||||
|
||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -O2 | FileCheck %s
|
||||
|
||||
declare i32 @llvm.eh.sjlj.setjmp(ptr)
|
||||
@buf = global [20 x ptr] zeroinitializer, align 8
|
||||
|
||||
define signext i32 @foo() "frame-pointer"="all" {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r6, -112
|
||||
; CHECK-NEXT: .cfi_offset %r7, -104
|
||||
; CHECK-NEXT: .cfi_offset %r8, -96
|
||||
; CHECK-NEXT: .cfi_offset %r9, -88
|
||||
; CHECK-NEXT: .cfi_offset %r10, -80
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r12, -64
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r14, -48
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: aghi %r15, -240
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 400
|
||||
; CHECK-NEXT: lgr %r11, %r15
|
||||
; CHECK-NEXT: .cfi_def_cfa_register %r11
|
||||
; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_offset %f8, -168
|
||||
; CHECK-NEXT: .cfi_offset %f9, -176
|
||||
; CHECK-NEXT: .cfi_offset %f10, -184
|
||||
; CHECK-NEXT: .cfi_offset %f11, -192
|
||||
; CHECK-NEXT: .cfi_offset %f12, -200
|
||||
; CHECK-NEXT: .cfi_offset %f13, -208
|
||||
; CHECK-NEXT: .cfi_offset %f14, -216
|
||||
; CHECK-NEXT: .cfi_offset %f15, -224
|
||||
; CHECK-NEXT: la %r0, 160(%r11)
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: stg %r0, 168(%r11)
|
||||
; CHECK-NEXT: mvhi 160(%r11), 10
|
||||
; CHECK-NEXT: larl %r0, .LBB0_1
|
||||
; CHECK-NEXT: stg %r0, 8(%r1)
|
||||
; CHECK-NEXT: stg %r11, 0(%r1)
|
||||
; CHECK-NEXT: stg %r15, 24(%r1)
|
||||
; CHECK-NEXT: .LBB0_1: # Block address taken
|
||||
; CHECK-NEXT: # %entry
|
||||
; CHECK-NEXT: .LBB0_2: # %entry
|
||||
; CHECK-NEXT: lg %r1, 168(%r11)
|
||||
; CHECK-NEXT: lgf %r2, 0(%r1)
|
||||
; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
|
||||
; CHECK-NEXT: br %r14
|
||||
entry:
|
||||
%val = alloca ptr, align 8
|
||||
%0 = alloca i8, i64 4, align 8
|
||||
store ptr %0, ptr %val, align 8
|
||||
%1 = load ptr, ptr %val, align 8
|
||||
store volatile i32 10, ptr %1, align 4
|
||||
%2 = call i32 @llvm.eh.sjlj.setjmp(ptr @buf)
|
||||
%3 = load ptr, ptr %val, align 8
|
||||
%4 = load volatile i32, ptr %3, align 4
|
||||
ret i32 %4
|
||||
}
|
||||
|
||||
define signext i32 @foo1() "backchain" "frame-pointer"="all" {
|
||||
; CHECK-LABEL: foo1:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r6, -112
|
||||
; CHECK-NEXT: .cfi_offset %r7, -104
|
||||
; CHECK-NEXT: .cfi_offset %r8, -96
|
||||
; CHECK-NEXT: .cfi_offset %r9, -88
|
||||
; CHECK-NEXT: .cfi_offset %r10, -80
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r12, -64
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r14, -48
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: lgr %r1, %r15
|
||||
; CHECK-NEXT: aghi %r15, -240
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 400
|
||||
; CHECK-NEXT: stg %r1, 0(%r15)
|
||||
; CHECK-NEXT: lgr %r11, %r15
|
||||
; CHECK-NEXT: .cfi_def_cfa_register %r11
|
||||
; CHECK-NEXT: std %f8, 232(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f9, 224(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f10, 216(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f11, 208(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f12, 200(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f13, 192(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f14, 184(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f15, 176(%r11) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_offset %f8, -168
|
||||
; CHECK-NEXT: .cfi_offset %f9, -176
|
||||
; CHECK-NEXT: .cfi_offset %f10, -184
|
||||
; CHECK-NEXT: .cfi_offset %f11, -192
|
||||
; CHECK-NEXT: .cfi_offset %f12, -200
|
||||
; CHECK-NEXT: .cfi_offset %f13, -208
|
||||
; CHECK-NEXT: .cfi_offset %f14, -216
|
||||
; CHECK-NEXT: .cfi_offset %f15, -224
|
||||
; CHECK-NEXT: la %r0, 160(%r11)
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: stg %r0, 168(%r11)
|
||||
; CHECK-NEXT: mvhi 160(%r11), 10
|
||||
; CHECK-NEXT: larl %r0, .LBB1_1
|
||||
; CHECK-NEXT: stg %r0, 8(%r1)
|
||||
; CHECK-NEXT: stg %r11, 0(%r1)
|
||||
; CHECK-NEXT: stg %r15, 24(%r1)
|
||||
; CHECK-NEXT: lg %r0, 0(%r15)
|
||||
; CHECK-NEXT: stg %r0, 16(%r1)
|
||||
; CHECK-NEXT: .LBB1_1: # Block address taken
|
||||
; CHECK-NEXT: # %entry
|
||||
; CHECK-NEXT: .LBB1_2: # %entry
|
||||
; CHECK-NEXT: lg %r1, 168(%r11)
|
||||
; CHECK-NEXT: lgf %r2, 0(%r1)
|
||||
; CHECK-NEXT: ld %f8, 232(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f9, 224(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f10, 216(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f11, 208(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f12, 200(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f13, 192(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f14, 184(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f15, 176(%r11) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: lmg %r6, %r15, 288(%r11)
|
||||
; CHECK-NEXT: br %r14
|
||||
entry:
|
||||
%val = alloca ptr, align 8
|
||||
%0 = alloca i8, i64 4, align 8
|
||||
store ptr %0, ptr %val, align 8
|
||||
%1 = load ptr, ptr %val, align 8
|
||||
store volatile i32 10, ptr %1, align 4
|
||||
%2 = call i32 @llvm.eh.sjlj.setjmp(ptr @buf)
|
||||
%3 = load ptr, ptr %val, align 8
|
||||
%4 = load volatile i32, ptr %3, align 4
|
||||
ret i32 %4
|
||||
}
|
||||
|
254
llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
Normal file
254
llvm/test/CodeGen/SystemZ/builtin-setjmp-spills.ll
Normal file
@ -0,0 +1,254 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
||||
; Simulate register pressure around setjmp call and check all virtual registers
|
||||
; are saved to stack before setjmp call and restored from stack after the call.
|
||||
; And these registers are not live across the setjmp call.
|
||||
; setjmp storing to jmp_buf.
|
||||
; Return address in slot 2.
|
||||
; Stack Pointer in slot 4.
|
||||
; Clobber %r6-%r15, %f8-%f15.
|
||||
|
||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -O3 | FileCheck %s
|
||||
|
||||
declare i32 @llvm.eh.sjlj.setjmp(ptr)
|
||||
|
||||
@t = global i32 0, align 4
|
||||
@s = global i32 0, align 4
|
||||
@r = global i32 0, align 4
|
||||
@q = global i32 0, align 4
|
||||
@p = global i32 0, align 4
|
||||
@o = global i32 0, align 4
|
||||
@n = global i32 0, align 4
|
||||
@m = global i32 0, align 4
|
||||
@l = global i32 0, align 4
|
||||
@k = global i32 0, align 4
|
||||
@j = global i32 0, align 4
|
||||
@i = global i32 0, align 4
|
||||
@h = global i32 0, align 4
|
||||
@g = global i32 0, align 4
|
||||
@f = global i32 0, align 4
|
||||
@e = global i32 0, align 4
|
||||
@d = global i32 0, align 4
|
||||
@c = global i32 0, align 4
|
||||
@b = global i32 0, align 4
|
||||
@a = global i32 0, align 4
|
||||
@buf = global [10 x ptr] zeroinitializer, align 8
|
||||
|
||||
define signext i32 @func() {
|
||||
; CHECK-LABEL: func:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r6, -112
|
||||
; CHECK-NEXT: .cfi_offset %r7, -104
|
||||
; CHECK-NEXT: .cfi_offset %r8, -96
|
||||
; CHECK-NEXT: .cfi_offset %r9, -88
|
||||
; CHECK-NEXT: .cfi_offset %r10, -80
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r12, -64
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r14, -48
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: aghi %r15, -384
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 544
|
||||
; CHECK-NEXT: std %f8, 376(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f9, 368(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f10, 360(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f11, 352(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f12, 344(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f13, 336(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f14, 328(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f15, 320(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_offset %f8, -168
|
||||
; CHECK-NEXT: .cfi_offset %f9, -176
|
||||
; CHECK-NEXT: .cfi_offset %f10, -184
|
||||
; CHECK-NEXT: .cfi_offset %f11, -192
|
||||
; CHECK-NEXT: .cfi_offset %f12, -200
|
||||
; CHECK-NEXT: .cfi_offset %f13, -208
|
||||
; CHECK-NEXT: .cfi_offset %f14, -216
|
||||
; CHECK-NEXT: .cfi_offset %f15, -224
|
||||
; CHECK-NEXT: lgrl %r1, t@GOT
|
||||
; CHECK-NEXT: lgrl %r2, s@GOT
|
||||
; CHECK-NEXT: stg %r1, 312(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: lgrl %r1, r@GOT
|
||||
; CHECK-NEXT: lgrl %r3, q@GOT
|
||||
; CHECK-NEXT: stg %r2, 304(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: lgrl %r2, p@GOT
|
||||
; CHECK-NEXT: stg %r1, 296(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: stg %r3, 288(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r3), 1
|
||||
; CHECK-NEXT: lgrl %r1, o@GOT
|
||||
; CHECK-NEXT: stg %r2, 280(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: lgrl %r2, n@GOT
|
||||
; CHECK-NEXT: lgrl %r3, m@GOT
|
||||
; CHECK-NEXT: stg %r1, 272(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: lgrl %r1, l@GOT
|
||||
; CHECK-NEXT: stg %r2, 264(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: stg %r3, 256(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r3), 1
|
||||
; CHECK-NEXT: lgrl %r2, k@GOT
|
||||
; CHECK-NEXT: stg %r1, 248(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: lgrl %r1, j@GOT
|
||||
; CHECK-NEXT: lgrl %r3, i@GOT
|
||||
; CHECK-NEXT: stg %r2, 240(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: lgrl %r2, h@GOT
|
||||
; CHECK-NEXT: stg %r1, 232(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: stg %r3, 224(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r3), 1
|
||||
; CHECK-NEXT: lgrl %r1, g@GOT
|
||||
; CHECK-NEXT: stg %r2, 216(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: lgrl %r2, f@GOT
|
||||
; CHECK-NEXT: lgrl %r3, e@GOT
|
||||
; CHECK-NEXT: stg %r1, 208(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: lgrl %r1, d@GOT
|
||||
; CHECK-NEXT: stg %r2, 200(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: stg %r3, 192(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r3), 1
|
||||
; CHECK-NEXT: lgrl %r2, c@GOT
|
||||
; CHECK-NEXT: stg %r1, 184(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r1), 1
|
||||
; CHECK-NEXT: lgrl %r3, b@GOT
|
||||
; CHECK-NEXT: lgrl %r4, a@GOT
|
||||
; CHECK-NEXT: stg %r2, 176(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r2), 1
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: stg %r3, 168(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r3), 1
|
||||
; CHECK-NEXT: stg %r4, 160(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: mvhi 0(%r4), 1
|
||||
; CHECK-NEXT: larl %r0, .LBB0_2
|
||||
; CHECK-NEXT: stg %r0, 8(%r1)
|
||||
; CHECK-NEXT: stg %r15, 24(%r1)
|
||||
; CHECK-NEXT: .LBB0_1: # %entry
|
||||
; CHECK-NEXT: lhi %r0, 0
|
||||
; CHECK-NEXT: j .LBB0_3
|
||||
; CHECK-NEXT: .LBB0_2: # Block address taken
|
||||
; CHECK-NEXT: # %entry
|
||||
; CHECK-NEXT: lhi %r0, 1
|
||||
; CHECK-NEXT: .LBB0_3: # %entry
|
||||
; CHECK-NEXT: lg %r1, 160(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 168(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 176(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 184(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 192(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 200(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 208(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 216(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 224(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 232(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 240(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 248(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 256(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 264(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 272(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 280(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 288(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 296(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 304(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lg %r1, 312(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: a %r0, 0(%r1)
|
||||
; CHECK-NEXT: lgfr %r2, %r0
|
||||
; CHECK-NEXT: ld %f8, 376(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f9, 368(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f10, 360(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f11, 352(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f12, 344(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f13, 336(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f14, 328(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f15, 320(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: lmg %r6, %r15, 432(%r15)
|
||||
; CHECK-NEXT: br %r14
|
||||
entry:
|
||||
store i32 1, ptr @t, align 4
|
||||
store i32 1, ptr @s, align 4
|
||||
store i32 1, ptr @r, align 4
|
||||
store i32 1, ptr @q, align 4
|
||||
store i32 1, ptr @p, align 4
|
||||
store i32 1, ptr @o, align 4
|
||||
store i32 1, ptr @n, align 4
|
||||
store i32 1, ptr @m, align 4
|
||||
store i32 1, ptr @l, align 4
|
||||
store i32 1, ptr @k, align 4
|
||||
store i32 1, ptr @j, align 4
|
||||
store i32 1, ptr @i, align 4
|
||||
store i32 1, ptr @h, align 4
|
||||
store i32 1, ptr @g, align 4
|
||||
store i32 1, ptr @f, align 4
|
||||
store i32 1, ptr @e, align 4
|
||||
store i32 1, ptr @d, align 4
|
||||
store i32 1, ptr @c, align 4
|
||||
store i32 1, ptr @b, align 4
|
||||
store i32 1, ptr @a, align 4
|
||||
%0 = tail call i32 @llvm.eh.sjlj.setjmp(ptr nonnull @buf)
|
||||
%1 = load i32, ptr @a, align 4
|
||||
%2 = load i32, ptr @b, align 4
|
||||
%3 = load i32, ptr @c, align 4
|
||||
%4 = load i32, ptr @d, align 4
|
||||
%5 = load i32, ptr @e, align 4
|
||||
%6 = load i32, ptr @f, align 4
|
||||
%7 = load i32, ptr @g, align 4
|
||||
%8 = load i32, ptr @h, align 4
|
||||
%9 = load i32, ptr @i, align 4
|
||||
%10 = load i32, ptr @j, align 4
|
||||
%11 = load i32, ptr @k, align 4
|
||||
%12 = load i32, ptr @l, align 4
|
||||
%13 = load i32, ptr @m, align 4
|
||||
%14 = load i32, ptr @n, align 4
|
||||
%15 = load i32, ptr @o, align 4
|
||||
%16 = load i32, ptr @p, align 4
|
||||
%17 = load i32, ptr @q, align 4
|
||||
%18 = load i32, ptr @r, align 4
|
||||
%19 = load i32, ptr @s, align 4
|
||||
%20 = load i32, ptr @t, align 4
|
||||
%add = add i32 %1, %0
|
||||
%add1 = add i32 %add, %2
|
||||
%add2 = add i32 %add1, %3
|
||||
%add3 = add i32 %add2, %4
|
||||
%add4 = add i32 %add3, %5
|
||||
%add5 = add i32 %add4, %6
|
||||
%add6 = add i32 %add5, %7
|
||||
%add7 = add i32 %add6, %8
|
||||
%add8 = add i32 %add7, %9
|
||||
%add9 = add i32 %add8, %10
|
||||
%add10 = add i32 %add9, %11
|
||||
%add11 = add i32 %add10, %12
|
||||
%add12 = add i32 %add11, %13
|
||||
%add13 = add i32 %add12, %14
|
||||
%add14 = add i32 %add13, %15
|
||||
%add15 = add i32 %add14, %16
|
||||
%add16 = add i32 %add15, %17
|
||||
%add17 = add i32 %add16, %18
|
||||
%add18 = add i32 %add17, %19
|
||||
%add19 = add i32 %add18, %20
|
||||
ret i32 %add19
|
||||
}
|
||||
|
124
llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
Normal file
124
llvm/test/CodeGen/SystemZ/builtin-setjmp.ll
Normal file
@ -0,0 +1,124 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
|
||||
; Test setjmp store jmp_buf
|
||||
; Return address in slot 2.
|
||||
; Backchain value is stored in slot 3 for -mbackchain option.
|
||||
; Stack Pointer in slot 4.
|
||||
; Clobber %r6-%r15, %f8-%f15.
|
||||
|
||||
; RUN: llc < %s -mtriple=s390x-linux-gnu -O2 | FileCheck %s
|
||||
|
||||
declare i32 @llvm.eh.sjlj.setjmp(ptr)
|
||||
@buf = global [20 x ptr] zeroinitializer, align 8
|
||||
|
||||
define void @foo() {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r6, -112
|
||||
; CHECK-NEXT: .cfi_offset %r7, -104
|
||||
; CHECK-NEXT: .cfi_offset %r8, -96
|
||||
; CHECK-NEXT: .cfi_offset %r9, -88
|
||||
; CHECK-NEXT: .cfi_offset %r10, -80
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r12, -64
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r14, -48
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: aghi %r15, -64
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 224
|
||||
; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_offset %f8, -168
|
||||
; CHECK-NEXT: .cfi_offset %f9, -176
|
||||
; CHECK-NEXT: .cfi_offset %f10, -184
|
||||
; CHECK-NEXT: .cfi_offset %f11, -192
|
||||
; CHECK-NEXT: .cfi_offset %f12, -200
|
||||
; CHECK-NEXT: .cfi_offset %f13, -208
|
||||
; CHECK-NEXT: .cfi_offset %f14, -216
|
||||
; CHECK-NEXT: .cfi_offset %f15, -224
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: larl %r0, .LBB0_1
|
||||
; CHECK-NEXT: stg %r0, 8(%r1)
|
||||
; CHECK-NEXT: stg %r15, 24(%r1)
|
||||
; CHECK-NEXT: .LBB0_1: # Block address taken
|
||||
; CHECK-NEXT: # %entry
|
||||
; CHECK-NEXT: .LBB0_2: # %entry
|
||||
; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
|
||||
; CHECK-NEXT: br %r14
|
||||
entry:
|
||||
%0 = tail call i32 @llvm.eh.sjlj.setjmp(ptr nonnull @buf)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @foo1() "backchain" {
|
||||
; CHECK-LABEL: foo1:
|
||||
; CHECK: # %bb.0: # %entry
|
||||
; CHECK-NEXT: stmg %r6, %r15, 48(%r15)
|
||||
; CHECK-NEXT: .cfi_offset %r6, -112
|
||||
; CHECK-NEXT: .cfi_offset %r7, -104
|
||||
; CHECK-NEXT: .cfi_offset %r8, -96
|
||||
; CHECK-NEXT: .cfi_offset %r9, -88
|
||||
; CHECK-NEXT: .cfi_offset %r10, -80
|
||||
; CHECK-NEXT: .cfi_offset %r11, -72
|
||||
; CHECK-NEXT: .cfi_offset %r12, -64
|
||||
; CHECK-NEXT: .cfi_offset %r13, -56
|
||||
; CHECK-NEXT: .cfi_offset %r14, -48
|
||||
; CHECK-NEXT: .cfi_offset %r15, -40
|
||||
; CHECK-NEXT: lgr %r1, %r15
|
||||
; CHECK-NEXT: aghi %r15, -64
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 224
|
||||
; CHECK-NEXT: stg %r1, 0(%r15)
|
||||
; CHECK-NEXT: std %f8, 56(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f9, 48(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f10, 40(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f11, 32(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f12, 24(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f13, 16(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f14, 8(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: std %f15, 0(%r15) # 8-byte Folded Spill
|
||||
; CHECK-NEXT: .cfi_offset %f8, -168
|
||||
; CHECK-NEXT: .cfi_offset %f9, -176
|
||||
; CHECK-NEXT: .cfi_offset %f10, -184
|
||||
; CHECK-NEXT: .cfi_offset %f11, -192
|
||||
; CHECK-NEXT: .cfi_offset %f12, -200
|
||||
; CHECK-NEXT: .cfi_offset %f13, -208
|
||||
; CHECK-NEXT: .cfi_offset %f14, -216
|
||||
; CHECK-NEXT: .cfi_offset %f15, -224
|
||||
; CHECK-NEXT: lgrl %r1, buf@GOT
|
||||
; CHECK-NEXT: larl %r0, .LBB1_1
|
||||
; CHECK-NEXT: stg %r0, 8(%r1)
|
||||
; CHECK-NEXT: stg %r15, 24(%r1)
|
||||
; CHECK-NEXT: lg %r0, 0(%r15)
|
||||
; CHECK-NEXT: stg %r0, 16(%r1)
|
||||
; CHECK-NEXT: .LBB1_1: # Block address taken
|
||||
; CHECK-NEXT: # %entry
|
||||
; CHECK-NEXT: .LBB1_2: # %entry
|
||||
; CHECK-NEXT: ld %f8, 56(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f9, 48(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f10, 40(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f11, 32(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f12, 24(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f13, 16(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f14, 8(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: ld %f15, 0(%r15) # 8-byte Folded Reload
|
||||
; CHECK-NEXT: lmg %r6, %r15, 112(%r15)
|
||||
; CHECK-NEXT: br %r14
|
||||
entry:
|
||||
%0 = tail call i32 @llvm.eh.sjlj.setjmp(ptr nonnull @buf)
|
||||
ret void
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user