mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-17 18:16:42 +00:00
[X86] Use Register in FastISel. NFC
Replace 'Reg == 0' with '!Reg'
This commit is contained in:
parent
2317a72489
commit
74ca5799ca
@ -75,15 +75,15 @@ private:
|
||||
const DebugLoc &DL);
|
||||
|
||||
bool X86FastEmitLoad(MVT VT, X86AddressMode &AM, MachineMemOperand *MMO,
|
||||
unsigned &ResultReg, unsigned Alignment = 1);
|
||||
Register &ResultReg, unsigned Alignment = 1);
|
||||
|
||||
bool X86FastEmitStore(EVT VT, const Value *Val, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO = nullptr, bool Aligned = false);
|
||||
bool X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
|
||||
bool X86FastEmitStore(EVT VT, Register ValReg, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO = nullptr, bool Aligned = false);
|
||||
|
||||
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, unsigned Src, EVT SrcVT,
|
||||
unsigned &ResultReg);
|
||||
bool X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, Register Src, EVT SrcVT,
|
||||
Register &ResultReg);
|
||||
|
||||
bool X86SelectAddress(const Value *V, X86AddressMode &AM);
|
||||
bool X86SelectCallAddress(const Value *V, X86AddressMode &AM);
|
||||
@ -134,9 +134,9 @@ private:
|
||||
|
||||
bool handleConstantAddresses(const Value *V, X86AddressMode &AM);
|
||||
|
||||
unsigned X86MaterializeInt(const ConstantInt *CI, MVT VT);
|
||||
unsigned X86MaterializeFP(const ConstantFP *CFP, MVT VT);
|
||||
unsigned X86MaterializeGV(const GlobalValue *GV, MVT VT);
|
||||
Register X86MaterializeInt(const ConstantInt *CI, MVT VT);
|
||||
Register X86MaterializeFP(const ConstantFP *CFP, MVT VT);
|
||||
Register X86MaterializeGV(const GlobalValue *GV, MVT VT);
|
||||
Register fastMaterializeConstant(const Constant *C) override;
|
||||
|
||||
Register fastMaterializeAlloca(const AllocaInst *C) override;
|
||||
@ -163,9 +163,9 @@ private:
|
||||
const MachineInstrBuilder &addFullAddress(const MachineInstrBuilder &MIB,
|
||||
X86AddressMode &AM);
|
||||
|
||||
unsigned fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, unsigned Op0,
|
||||
unsigned Op1, unsigned Op2, unsigned Op3);
|
||||
Register fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC, Register Op0,
|
||||
Register Op1, Register Op2, Register Op3);
|
||||
};
|
||||
|
||||
} // end anonymous namespace.
|
||||
@ -313,7 +313,7 @@ bool X86FastISel::isTypeLegal(Type *Ty, MVT &VT, bool AllowI1) {
|
||||
/// The address is either pre-computed, i.e. Ptr, or a GlobalAddress, i.e. GV.
|
||||
/// Return true and the result register by reference if it is possible.
|
||||
bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO, unsigned &ResultReg,
|
||||
MachineMemOperand *MMO, Register &ResultReg,
|
||||
unsigned Alignment) {
|
||||
bool HasSSE1 = Subtarget->hasSSE1();
|
||||
bool HasSSE2 = Subtarget->hasSSE2();
|
||||
@ -475,7 +475,7 @@ bool X86FastISel::X86FastEmitLoad(MVT VT, X86AddressMode &AM,
|
||||
/// type VT. The address is either pre-computed, consisted of a base ptr, Ptr
|
||||
/// and a displacement offset, or a GlobalAddress,
|
||||
/// i.e. V. Return true if it is possible.
|
||||
bool X86FastISel::X86FastEmitStore(EVT VT, unsigned ValReg, X86AddressMode &AM,
|
||||
bool X86FastISel::X86FastEmitStore(EVT VT, Register ValReg, X86AddressMode &AM,
|
||||
MachineMemOperand *MMO, bool Aligned) {
|
||||
bool HasSSE1 = Subtarget->hasSSE1();
|
||||
bool HasSSE2 = Subtarget->hasSSE2();
|
||||
@ -686,7 +686,7 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
}
|
||||
|
||||
Register ValReg = getRegForValue(Val);
|
||||
if (ValReg == 0)
|
||||
if (!ValReg)
|
||||
return false;
|
||||
|
||||
return X86FastEmitStore(VT, ValReg, AM, MMO, Aligned);
|
||||
@ -695,11 +695,10 @@ bool X86FastISel::X86FastEmitStore(EVT VT, const Value *Val,
|
||||
/// X86FastEmitExtend - Emit a machine instruction to extend a value Src of
|
||||
/// type SrcVT to type DstVT using the specified extension opcode Opc (e.g.
|
||||
/// ISD::SIGN_EXTEND).
|
||||
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT,
|
||||
unsigned Src, EVT SrcVT,
|
||||
unsigned &ResultReg) {
|
||||
unsigned RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
|
||||
if (RR == 0)
|
||||
bool X86FastISel::X86FastEmitExtend(ISD::NodeType Opc, EVT DstVT, Register Src,
|
||||
EVT SrcVT, Register &ResultReg) {
|
||||
Register RR = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(), Opc, Src);
|
||||
if (!RR)
|
||||
return false;
|
||||
|
||||
ResultReg = RR;
|
||||
@ -899,7 +898,7 @@ redo_gep:
|
||||
|
||||
// Pattern-match simple GEPs.
|
||||
uint64_t Disp = (int32_t)AM.Disp;
|
||||
unsigned IndexReg = AM.IndexReg;
|
||||
Register IndexReg = AM.IndexReg;
|
||||
unsigned Scale = AM.Scale;
|
||||
MVT PtrVT = TLI.getValueType(DL, U->getType()).getSimpleVT();
|
||||
|
||||
@ -933,13 +932,12 @@ redo_gep:
|
||||
Op = cast<AddOperator>(Op)->getOperand(0);
|
||||
continue;
|
||||
}
|
||||
if (IndexReg == 0 &&
|
||||
(!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
|
||||
if (!IndexReg && (!AM.GV || !Subtarget->isPICStyleRIPRel()) &&
|
||||
(S == 1 || S == 2 || S == 4 || S == 8)) {
|
||||
// Scaled-index addressing.
|
||||
Scale = S;
|
||||
IndexReg = getRegForGEPIndex(PtrVT, Op);
|
||||
if (IndexReg == 0)
|
||||
if (!IndexReg)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
@ -1206,7 +1204,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
// Build a list of return value registers.
|
||||
SmallVector<unsigned, 4> RetRegs;
|
||||
SmallVector<Register, 4> RetRegs;
|
||||
|
||||
if (Ret->getNumOperands() > 0) {
|
||||
SmallVector<ISD::OutputArg, 4> Outs;
|
||||
@ -1219,7 +1217,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
|
||||
const Value *RV = Ret->getOperand(0);
|
||||
Register Reg = getRegForValue(RV);
|
||||
if (Reg == 0)
|
||||
if (!Reg)
|
||||
return false;
|
||||
|
||||
// Only handle a single return value for now.
|
||||
@ -1240,7 +1238,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
|
||||
return false;
|
||||
|
||||
unsigned SrcReg = Reg + VA.getValNo();
|
||||
Register SrcReg = Reg + VA.getValNo();
|
||||
EVT SrcVT = TLI.getValueType(DL, RV->getType());
|
||||
EVT DstVT = VA.getValVT();
|
||||
// Special handling for extended integers.
|
||||
@ -1290,7 +1288,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
Register Reg = X86MFInfo->getSRetReturnReg();
|
||||
assert(Reg &&
|
||||
"SRetReturnReg should have been set in LowerFormalArguments()!");
|
||||
unsigned RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
|
||||
Register RetReg = Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
TII.get(TargetOpcode::COPY), RetReg).addReg(Reg);
|
||||
RetRegs.push_back(RetReg);
|
||||
@ -1306,7 +1304,7 @@ bool X86FastISel::X86SelectRet(const Instruction *I) {
|
||||
MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
TII.get(Subtarget->is64Bit() ? X86::RET64 : X86::RET32));
|
||||
}
|
||||
for (unsigned Reg : RetRegs)
|
||||
for (Register Reg : RetRegs)
|
||||
MIB.addReg(Reg, RegState::Implicit);
|
||||
return true;
|
||||
}
|
||||
@ -1345,7 +1343,7 @@ bool X86FastISel::X86SelectLoad(const Instruction *I) {
|
||||
if (!X86SelectAddress(Ptr, AM))
|
||||
return false;
|
||||
|
||||
unsigned ResultReg = 0;
|
||||
Register ResultReg;
|
||||
if (!X86FastEmitLoad(VT, AM, createMachineMemOperandFor(LI), ResultReg,
|
||||
LI->getAlign().value()))
|
||||
return false;
|
||||
@ -1402,7 +1400,8 @@ static unsigned X86ChooseCmpImmediateOpcode(EVT VT, const ConstantInt *RHSC) {
|
||||
bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
|
||||
const DebugLoc &CurMIMD) {
|
||||
Register Op0Reg = getRegForValue(Op0);
|
||||
if (Op0Reg == 0) return false;
|
||||
if (!Op0Reg)
|
||||
return false;
|
||||
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Op1))
|
||||
@ -1424,7 +1423,8 @@ bool X86FastISel::X86FastEmitCompare(const Value *Op0, const Value *Op1, EVT VT,
|
||||
if (CompareOpc == 0) return false;
|
||||
|
||||
Register Op1Reg = getRegForValue(Op1);
|
||||
if (Op1Reg == 0) return false;
|
||||
if (!Op1Reg)
|
||||
return false;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, CurMIMD, TII.get(CompareOpc))
|
||||
.addReg(Op0Reg)
|
||||
.addReg(Op1Reg);
|
||||
@ -1445,7 +1445,7 @@ bool X86FastISel::X86SelectCmp(const Instruction *I) {
|
||||
|
||||
// Try to optimize or fold the cmp.
|
||||
CmpInst::Predicate Predicate = optimizeCmpPredicate(CI);
|
||||
unsigned ResultReg = 0;
|
||||
Register ResultReg;
|
||||
switch (Predicate) {
|
||||
default: break;
|
||||
case CmpInst::FCMP_FALSE: {
|
||||
@ -1535,7 +1535,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
Register ResultReg = getRegForValue(I->getOperand(0));
|
||||
if (ResultReg == 0)
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
// Handle zero-extension from i1 to i8, which is common.
|
||||
@ -1545,7 +1545,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
ResultReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
|
||||
SrcVT = MVT::i8;
|
||||
|
||||
if (ResultReg == 0)
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1579,7 +1579,7 @@ bool X86FastISel::X86SelectZExt(const Instruction *I) {
|
||||
} else if (DstVT != MVT::i8) {
|
||||
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::ZERO_EXTEND,
|
||||
ResultReg);
|
||||
if (ResultReg == 0)
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1593,7 +1593,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
Register ResultReg = getRegForValue(I->getOperand(0));
|
||||
if (ResultReg == 0)
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
// Handle sign-extension from i1 to i8.
|
||||
@ -1601,7 +1601,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
|
||||
if (SrcVT == MVT::i1) {
|
||||
// Set the high bits to zero.
|
||||
Register ZExtReg = fastEmitZExtFromI1(MVT::i8, ResultReg);
|
||||
if (ZExtReg == 0)
|
||||
if (!ZExtReg)
|
||||
return false;
|
||||
|
||||
// Negate the result to make an 8-bit sign extended value.
|
||||
@ -1623,7 +1623,7 @@ bool X86FastISel::X86SelectSExt(const Instruction *I) {
|
||||
} else if (DstVT != MVT::i8) {
|
||||
ResultReg = fastEmit_r(MVT::i8, DstVT.getSimpleVT(), ISD::SIGN_EXTEND,
|
||||
ResultReg);
|
||||
if (ResultReg == 0)
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1730,7 +1730,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
}
|
||||
if (TestOpc) {
|
||||
Register OpReg = getRegForValue(TI->getOperand(0));
|
||||
if (OpReg == 0) return false;
|
||||
if (!OpReg)
|
||||
return false;
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TestOpc))
|
||||
.addReg(OpReg).addImm(1);
|
||||
@ -1752,7 +1753,7 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
// Fake request the condition, otherwise the intrinsic might be completely
|
||||
// optimized away.
|
||||
Register TmpReg = getRegForValue(BI->getCondition());
|
||||
if (TmpReg == 0)
|
||||
if (!TmpReg)
|
||||
return false;
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(X86::JCC_1))
|
||||
@ -1765,11 +1766,12 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
// Note that i1 essentially gets ANY_EXTEND'ed to i8 where it isn't used
|
||||
// in an explicit cast, so make sure to handle that correctly.
|
||||
Register OpReg = getRegForValue(BI->getCondition());
|
||||
if (OpReg == 0) return false;
|
||||
if (!OpReg)
|
||||
return false;
|
||||
|
||||
// In case OpReg is a K register, COPY to a GPR
|
||||
if (MRI.getRegClass(OpReg) == &X86::VK1RegClass) {
|
||||
unsigned KOpReg = OpReg;
|
||||
Register KOpReg = OpReg;
|
||||
OpReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
TII.get(TargetOpcode::COPY), OpReg)
|
||||
@ -1786,7 +1788,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) {
|
||||
}
|
||||
|
||||
bool X86FastISel::X86SelectShift(const Instruction *I) {
|
||||
unsigned CReg = 0, OpReg = 0;
|
||||
Register CReg;
|
||||
unsigned OpReg;
|
||||
const TargetRegisterClass *RC = nullptr;
|
||||
if (I->getType()->isIntegerTy(8)) {
|
||||
CReg = X86::CL;
|
||||
@ -1833,10 +1836,12 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
|
||||
return false;
|
||||
|
||||
Register Op0Reg = getRegForValue(I->getOperand(0));
|
||||
if (Op0Reg == 0) return false;
|
||||
if (!Op0Reg)
|
||||
return false;
|
||||
|
||||
Register Op1Reg = getRegForValue(I->getOperand(1));
|
||||
if (Op1Reg == 0) return false;
|
||||
if (!Op1Reg)
|
||||
return false;
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TargetOpcode::COPY),
|
||||
CReg).addReg(Op1Reg);
|
||||
|
||||
@ -1942,10 +1947,10 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
||||
const DivRemEntry &TypeEntry = OpTable[TypeIndex];
|
||||
const DivRemEntry::DivRemResult &OpEntry = TypeEntry.ResultTable[OpIndex];
|
||||
Register Op0Reg = getRegForValue(I->getOperand(0));
|
||||
if (Op0Reg == 0)
|
||||
if (!Op0Reg)
|
||||
return false;
|
||||
Register Op1Reg = getRegForValue(I->getOperand(1));
|
||||
if (Op1Reg == 0)
|
||||
if (!Op1Reg)
|
||||
return false;
|
||||
|
||||
// Move op0 into low-order input register.
|
||||
@ -1990,7 +1995,7 @@ bool X86FastISel::X86SelectDivRem(const Instruction *I) {
|
||||
// won't generate explicit references to the GR8_NOREX registers. If
|
||||
// the allocator and/or the backend get enhanced to be more robust in
|
||||
// that regard, this can be, and should be, removed.
|
||||
unsigned ResultReg = 0;
|
||||
Register ResultReg;
|
||||
if ((I->getOpcode() == Instruction::SRem ||
|
||||
I->getOpcode() == Instruction::URem) &&
|
||||
OpEntry.DivRemResultReg == X86::AH && Subtarget->is64Bit()) {
|
||||
@ -2095,7 +2100,7 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
// Fake request the condition, otherwise the intrinsic might be completely
|
||||
// optimized away.
|
||||
Register TmpReg = getRegForValue(Cond);
|
||||
if (TmpReg == 0)
|
||||
if (!TmpReg)
|
||||
return false;
|
||||
|
||||
NeedTest = false;
|
||||
@ -2108,12 +2113,12 @@ bool X86FastISel::X86FastEmitCMoveSelect(MVT RetVT, const Instruction *I) {
|
||||
// whereas lsb is zero. Therefore, we have to truncate Op0Reg to i1 for
|
||||
// the select. This is achieved by performing TEST against 1.
|
||||
Register CondReg = getRegForValue(Cond);
|
||||
if (CondReg == 0)
|
||||
if (!CondReg)
|
||||
return false;
|
||||
|
||||
// In case OpReg is a K register, COPY to a GPR
|
||||
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
|
||||
unsigned KCondReg = CondReg;
|
||||
Register KCondReg = CondReg;
|
||||
CondReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
TII.get(TargetOpcode::COPY), CondReg)
|
||||
@ -2192,7 +2197,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
return false;
|
||||
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
unsigned ResultReg;
|
||||
Register ResultReg;
|
||||
|
||||
if (Subtarget->hasAVX512()) {
|
||||
// If we have AVX512 we can use a mask compare and masked movss/sd.
|
||||
@ -2214,7 +2219,7 @@ bool X86FastISel::X86FastEmitSSESelect(MVT RetVT, const Instruction *I) {
|
||||
// LHS in the input. The mask input comes from the compare.
|
||||
unsigned MovOpcode =
|
||||
(RetVT == MVT::f32) ? X86::VMOVSSZrrk : X86::VMOVSDZrrk;
|
||||
unsigned MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
|
||||
Register MovReg = fastEmitInst_rrrr(MovOpcode, VR128X, RHSReg, CmpReg,
|
||||
ImplicitDefReg, LHSReg);
|
||||
|
||||
ResultReg = createResultReg(RC);
|
||||
@ -2309,12 +2314,12 @@ bool X86FastISel::X86FastEmitPseudoSelect(MVT RetVT, const Instruction *I) {
|
||||
return false;
|
||||
} else {
|
||||
Register CondReg = getRegForValue(Cond);
|
||||
if (CondReg == 0)
|
||||
if (!CondReg)
|
||||
return false;
|
||||
|
||||
// In case OpReg is a K register, COPY to a GPR
|
||||
if (MRI.getRegClass(CondReg) == &X86::VK1RegClass) {
|
||||
unsigned KCondReg = CondReg;
|
||||
Register KCondReg = CondReg;
|
||||
CondReg = createResultReg(&X86::GR32RegClass);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
TII.get(TargetOpcode::COPY), CondReg)
|
||||
@ -2359,7 +2364,7 @@ bool X86FastISel::X86SelectSelect(const Instruction *I) {
|
||||
// No need for a select anymore - this is an unconditional move.
|
||||
if (Opnd) {
|
||||
Register OpReg = getRegForValue(Opnd);
|
||||
if (OpReg == 0)
|
||||
if (!OpReg)
|
||||
return false;
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(RetVT);
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -2404,7 +2409,7 @@ bool X86FastISel::X86SelectIntToFP(const Instruction *I, bool IsSigned) {
|
||||
|
||||
// Select integer to float/double conversion.
|
||||
Register OpReg = getRegForValue(I->getOperand(0));
|
||||
if (OpReg == 0)
|
||||
if (!OpReg)
|
||||
return false;
|
||||
|
||||
unsigned Opcode;
|
||||
@ -2458,10 +2463,10 @@ bool X86FastISel::X86SelectFPExtOrFPTrunc(const Instruction *I,
|
||||
bool HasAVX = Subtarget->hasAVX();
|
||||
|
||||
Register OpReg = getRegForValue(I->getOperand(0));
|
||||
if (OpReg == 0)
|
||||
if (!OpReg)
|
||||
return false;
|
||||
|
||||
unsigned ImplicitDefReg;
|
||||
Register ImplicitDefReg;
|
||||
if (HasAVX) {
|
||||
ImplicitDefReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
@ -2566,7 +2571,7 @@ bool X86FastISel::TryEmitSmallMemcpy(X86AddressMode DestAM,
|
||||
else
|
||||
VT = MVT::i8;
|
||||
|
||||
unsigned Reg;
|
||||
Register Reg;
|
||||
bool RV = X86FastEmitLoad(VT, SrcAM, nullptr, Reg);
|
||||
RV &= X86FastEmitStore(VT, Reg, DestAM);
|
||||
assert(RV && "Failed to emit load or store??");
|
||||
@ -2592,7 +2597,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
|
||||
const Value *Op = II->getArgOperand(0);
|
||||
Register InputReg = getRegForValue(Op);
|
||||
if (InputReg == 0)
|
||||
if (!InputReg)
|
||||
return false;
|
||||
|
||||
// F16C only allows converting from float to half and from half to float.
|
||||
@ -2605,7 +2610,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned ResultReg = 0;
|
||||
Register ResultReg;
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(MVT::v8i16);
|
||||
if (IsFloatToHalf) {
|
||||
// 'InputReg' is implicitly promoted from register class FR32 to
|
||||
@ -2816,11 +2821,11 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
const Value *SrcVal = II->getArgOperand(0);
|
||||
Register SrcReg = getRegForValue(SrcVal);
|
||||
|
||||
if (SrcReg == 0)
|
||||
if (!SrcReg)
|
||||
return false;
|
||||
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
|
||||
unsigned ImplicitDefReg = 0;
|
||||
Register ImplicitDefReg;
|
||||
if (AVXLevel > 0) {
|
||||
ImplicitDefReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
|
||||
@ -2887,10 +2892,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
}
|
||||
|
||||
Register LHSReg = getRegForValue(LHS);
|
||||
if (LHSReg == 0)
|
||||
if (!LHSReg)
|
||||
return false;
|
||||
|
||||
unsigned ResultReg = 0;
|
||||
Register ResultReg;
|
||||
// Check if we have an immediate version.
|
||||
if (const auto *CI = dyn_cast<ConstantInt>(RHS)) {
|
||||
static const uint16_t Opc[2][4] = {
|
||||
@ -2910,10 +2915,10 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
ResultReg = fastEmit_ri(VT, VT, BaseOpc, LHSReg, CI->getZExtValue());
|
||||
}
|
||||
|
||||
unsigned RHSReg;
|
||||
Register RHSReg;
|
||||
if (!ResultReg) {
|
||||
RHSReg = getRegForValue(RHS);
|
||||
if (RHSReg == 0)
|
||||
if (!RHSReg)
|
||||
return false;
|
||||
ResultReg = fastEmit_rr(VT, VT, BaseOpc, LHSReg, RHSReg);
|
||||
}
|
||||
@ -3010,7 +3015,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
break;
|
||||
unsigned Idx = cast<ConstantInt>(Index)->getZExtValue();
|
||||
|
||||
if (Idx == 0) {
|
||||
if (!Idx) {
|
||||
Op = IE->getOperand(1);
|
||||
break;
|
||||
}
|
||||
@ -3018,7 +3023,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
|
||||
}
|
||||
|
||||
Register Reg = getRegForValue(Op);
|
||||
if (Reg == 0)
|
||||
if (!Reg)
|
||||
return false;
|
||||
|
||||
Register ResultReg = createResultReg(TLI.getRegClassFor(VT));
|
||||
@ -3162,7 +3167,7 @@ bool X86FastISel::fastLowerArguments() {
|
||||
for (auto const &Arg : F->args()) {
|
||||
MVT VT = TLI.getSimpleValueType(DL, Arg.getType());
|
||||
const TargetRegisterClass *RC = TLI.getRegClassFor(VT);
|
||||
unsigned SrcReg;
|
||||
MCRegister SrcReg;
|
||||
switch (VT.SimpleTy) {
|
||||
default: llvm_unreachable("Unexpected value type.");
|
||||
case MVT::i32: SrcReg = GPR32ArgRegs[GPRIdx++]; break;
|
||||
@ -3281,7 +3286,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
return false;
|
||||
|
||||
SmallVector<MVT, 16> OutVTs;
|
||||
SmallVector<unsigned, 16> ArgRegs;
|
||||
SmallVector<Register, 16> ArgRegs;
|
||||
|
||||
// If this is a constant i1/i8/i16 argument, promote to i32 to avoid an extra
|
||||
// instruction. This is safe because it is common to all FastISel supported
|
||||
@ -3302,7 +3307,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
// Codegen this as an argument + "and 1".
|
||||
MVT VT;
|
||||
auto *TI = dyn_cast<TruncInst>(Val);
|
||||
unsigned ResultReg;
|
||||
Register ResultReg;
|
||||
if (TI && TI->getType()->isIntegerTy(1) && CLI.CB &&
|
||||
(TI->getParent() == CLI.CB->getParent()) && TI->hasOneUse()) {
|
||||
Value *PrevVal = TI->getOperand(0);
|
||||
@ -3356,7 +3361,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
if (ArgVT == MVT::x86mmx)
|
||||
return false;
|
||||
|
||||
unsigned ArgReg = ArgRegs[VA.getValNo()];
|
||||
Register ArgReg = ArgRegs[VA.getValNo()];
|
||||
|
||||
// Promote the value if needed.
|
||||
switch (VA.getLocInfo()) {
|
||||
@ -3384,7 +3389,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
ArgReg = fastEmitZExtFromI1(MVT::i8, ArgReg);
|
||||
ArgVT = MVT::i8;
|
||||
|
||||
if (ArgReg == 0)
|
||||
if (!ArgReg)
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -3588,7 +3593,7 @@ bool X86FastISel::fastLowerCall(CallLoweringInfo &CLI) {
|
||||
for (unsigned i = 0; i != RVLocs.size(); ++i) {
|
||||
CCValAssign &VA = RVLocs[i];
|
||||
EVT CopyVT = VA.getValVT();
|
||||
unsigned CopyReg = ResultReg + i;
|
||||
Register CopyReg = ResultReg + i;
|
||||
Register SrcReg = VA.getLocReg();
|
||||
|
||||
// If this is x86-64, and we disabled SSE, we can't return FP values
|
||||
@ -3683,7 +3688,8 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
|
||||
if (DstVT.bitsLT(SrcVT))
|
||||
return X86SelectTrunc(I);
|
||||
Register Reg = getRegForValue(I->getOperand(0));
|
||||
if (Reg == 0) return false;
|
||||
if (!Reg)
|
||||
return false;
|
||||
updateValueMap(I, Reg);
|
||||
return true;
|
||||
}
|
||||
@ -3722,9 +3728,9 @@ X86FastISel::fastSelectInstruction(const Instruction *I) {
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
Register X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
if (VT > MVT::i64)
|
||||
return 0;
|
||||
return Register();
|
||||
|
||||
uint64_t Imm = CI->getZExtValue();
|
||||
if (Imm == 0) {
|
||||
@ -3770,7 +3776,7 @@ unsigned X86FastISel::X86MaterializeInt(const ConstantInt *CI, MVT VT) {
|
||||
return fastEmitInst_i(Opc, TLI.getRegClassFor(VT), Imm);
|
||||
}
|
||||
|
||||
unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
Register X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
if (CFP->isNullValue())
|
||||
return fastMaterializeFloatZero(CFP);
|
||||
|
||||
@ -3846,13 +3852,13 @@ unsigned X86FastISel::X86MaterializeFP(const ConstantFP *CFP, MVT VT) {
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
|
||||
Register X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
|
||||
// Can't handle large GlobalValues yet.
|
||||
if (TM.getCodeModel() != CodeModel::Small &&
|
||||
TM.getCodeModel() != CodeModel::Medium)
|
||||
return 0;
|
||||
return Register();
|
||||
if (TM.isLargeGlobalValue(GV))
|
||||
return 0;
|
||||
return Register();
|
||||
|
||||
// Materialize addresses with LEA/MOV instructions.
|
||||
X86AddressMode AM;
|
||||
@ -3881,7 +3887,7 @@ unsigned X86FastISel::X86MaterializeGV(const GlobalValue *GV, MVT VT) {
|
||||
}
|
||||
return ResultReg;
|
||||
}
|
||||
return 0;
|
||||
return Register();
|
||||
}
|
||||
|
||||
Register X86FastISel::fastMaterializeConstant(const Constant *C) {
|
||||
@ -4034,10 +4040,10 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
Register X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
const TargetRegisterClass *RC,
|
||||
unsigned Op0, unsigned Op1,
|
||||
unsigned Op2, unsigned Op3) {
|
||||
Register Op0, Register Op1,
|
||||
Register Op2, Register Op3) {
|
||||
const MCInstrDesc &II = TII.get(MachineInstOpcode);
|
||||
|
||||
Register ResultReg = createResultReg(RC);
|
||||
@ -4065,7 +4071,6 @@ unsigned X86FastISel::fastEmitInst_rrrr(unsigned MachineInstOpcode,
|
||||
return ResultReg;
|
||||
}
|
||||
|
||||
|
||||
namespace llvm {
|
||||
FastISel *X86::createFastISel(FunctionLoweringInfo &funcInfo,
|
||||
const TargetLibraryInfo *libInfo) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user