[InstCombine] Make InstCombine's IRBuilder be passed by reference everywhere

Previously the InstCombiner class contained a pointer to an IR builder that had been passed to the constructor. Sometimes this would be passed to helper functions as either a pointer or the pointer would be dereferenced to be passed by reference.

This patch makes it a reference everywhere including the InstCombiner class itself so there is more inconsistency. This a large, but mechanical patch. I've done very minimal formatting changes on it despite what clang-format wanted to do.

llvm-svn: 307451
This commit is contained in:
Craig Topper 2017-07-07 23:16:26 +00:00
parent f29a1b921c
commit bb4069e439
14 changed files with 778 additions and 793 deletions

View File

@ -164,7 +164,7 @@ namespace {
/// ///
class FAddCombine { class FAddCombine {
public: public:
FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {} FAddCombine(InstCombiner::BuilderTy &B) : Builder(B), Instr(nullptr) {}
Value *simplify(Instruction *FAdd); Value *simplify(Instruction *FAdd);
private: private:
@ -187,7 +187,7 @@ namespace {
Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota); Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
void createInstPostProc(Instruction *NewInst, bool NoNumber = false); void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
InstCombiner::BuilderTy *Builder; InstCombiner::BuilderTy &Builder;
Instruction *Instr; Instruction *Instr;
// Debugging stuff are clustered here. // Debugging stuff are clustered here.
@ -735,7 +735,7 @@ Value *FAddCombine::createNaryFAdd
} }
Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) { Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
Value *V = Builder->CreateFSub(Opnd0, Opnd1); Value *V = Builder.CreateFSub(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V)) if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I); createInstPostProc(I);
return V; return V;
@ -750,21 +750,21 @@ Value *FAddCombine::createFNeg(Value *V) {
} }
Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) { Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
Value *V = Builder->CreateFAdd(Opnd0, Opnd1); Value *V = Builder.CreateFAdd(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V)) if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I); createInstPostProc(I);
return V; return V;
} }
Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) { Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
Value *V = Builder->CreateFMul(Opnd0, Opnd1); Value *V = Builder.CreateFMul(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V)) if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I); createInstPostProc(I);
return V; return V;
} }
Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) { Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
Value *V = Builder->CreateFDiv(Opnd0, Opnd1); Value *V = Builder.CreateFDiv(Opnd0, Opnd1);
if (Instruction *I = dyn_cast<Instruction>(V)) if (Instruction *I = dyn_cast<Instruction>(V))
createInstPostProc(I); createInstPostProc(I);
return V; return V;
@ -895,7 +895,7 @@ bool InstCombiner::willNotOverflowUnsignedSub(const Value *LHS,
// ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C)) // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
// XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
static Value *checkForNegativeOperand(BinaryOperator &I, static Value *checkForNegativeOperand(BinaryOperator &I,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1); Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
// This function creates 2 instructions to replace ADD, we need at least one // This function creates 2 instructions to replace ADD, we need at least one
@ -919,13 +919,13 @@ static Value *checkForNegativeOperand(BinaryOperator &I,
// X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1)) // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
// ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1)) // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) { if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
Value *NewAnd = Builder->CreateAnd(Z, *C1); Value *NewAnd = Builder.CreateAnd(Z, *C1);
return Builder->CreateSub(RHS, NewAnd, "sub"); return Builder.CreateSub(RHS, NewAnd, "sub");
} else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) { } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
// X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1)) // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
// ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1)) // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
Value *NewOr = Builder->CreateOr(Z, ~(*C1)); Value *NewOr = Builder.CreateOr(Z, ~(*C1));
return Builder->CreateSub(RHS, NewOr, "sub"); return Builder.CreateSub(RHS, NewOr, "sub");
} }
} }
} }
@ -944,8 +944,8 @@ static Value *checkForNegativeOperand(BinaryOperator &I,
if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1)))) if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
if (C1->countTrailingZeros() == 0) if (C1->countTrailingZeros() == 0)
if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) { if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
Value *NewOr = Builder->CreateOr(Z, ~(*C2)); Value *NewOr = Builder.CreateOr(Z, ~(*C2));
return Builder->CreateSub(RHS, NewOr, "sub"); return Builder.CreateSub(RHS, NewOr, "sub");
} }
return nullptr; return nullptr;
} }
@ -1027,7 +1027,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *V = SimplifyUsingDistributiveLaws(I)) if (Value *V = SimplifyUsingDistributiveLaws(I))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Instruction *X = foldAddWithConstant(I, *Builder)) if (Instruction *X = foldAddWithConstant(I, Builder))
return X; return X;
// FIXME: This should be moved into the above helper function to allow these // FIXME: This should be moved into the above helper function to allow these
@ -1060,7 +1060,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (ExtendAmt) { if (ExtendAmt) {
Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt); Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext"); Value *NewShl = Builder.CreateShl(XorLHS, ShAmt, "sext");
return BinaryOperator::CreateAShr(NewShl, ShAmt); return BinaryOperator::CreateAShr(NewShl, ShAmt);
} }
@ -1101,7 +1101,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (Value *LHSV = dyn_castNegVal(LHS)) { if (Value *LHSV = dyn_castNegVal(LHS)) {
if (!isa<Constant>(RHS)) if (!isa<Constant>(RHS))
if (Value *RHSV = dyn_castNegVal(RHS)) { if (Value *RHSV = dyn_castNegVal(RHS)) {
Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum"); Value *NewAdd = Builder.CreateAdd(LHSV, RHSV, "sum");
return BinaryOperator::CreateNeg(NewAdd); return BinaryOperator::CreateNeg(NewAdd);
} }
@ -1148,7 +1148,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
if (AddRHSHighBits == AddRHSHighBitsAnd) { if (AddRHSHighBits == AddRHSHighBitsAnd) {
// Okay, the xform is safe. Insert the new add pronto. // Okay, the xform is safe. Insert the new add pronto.
Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName()); Value *NewAdd = Builder.CreateAdd(X, CRHS, LHS->getName());
return BinaryOperator::CreateAnd(NewAdd, C2); return BinaryOperator::CreateAnd(NewAdd, C2);
} }
} }
@ -1191,7 +1191,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) { willNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new, smaller add. // Insert the new, smaller add.
Value *NewAdd = Value *NewAdd =
Builder->CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv"); Builder.CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
return new SExtInst(NewAdd, I.getType()); return new SExtInst(NewAdd, I.getType());
} }
} }
@ -1208,7 +1208,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowSignedAdd(LHSConv->getOperand(0), willNotOverflowSignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), I)) { RHSConv->getOperand(0), I)) {
// Insert the new integer add. // Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0), Value *NewAdd = Builder.CreateNSWAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), "addconv"); RHSConv->getOperand(0), "addconv");
return new SExtInst(NewAdd, I.getType()); return new SExtInst(NewAdd, I.getType());
} }
@ -1227,7 +1227,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowUnsignedAdd(LHSConv->getOperand(0), CI, I)) { willNotOverflowUnsignedAdd(LHSConv->getOperand(0), CI, I)) {
// Insert the new, smaller add. // Insert the new, smaller add.
Value *NewAdd = Value *NewAdd =
Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv"); Builder.CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
return new ZExtInst(NewAdd, I.getType()); return new ZExtInst(NewAdd, I.getType());
} }
} }
@ -1244,7 +1244,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
willNotOverflowUnsignedAdd(LHSConv->getOperand(0), willNotOverflowUnsignedAdd(LHSConv->getOperand(0),
RHSConv->getOperand(0), I)) { RHSConv->getOperand(0), I)) {
// Insert the new integer add. // Insert the new integer add.
Value *NewAdd = Builder->CreateNUWAdd( Value *NewAdd = Builder.CreateNUWAdd(
LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv"); LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
return new ZExtInst(NewAdd, I.getType()); return new ZExtInst(NewAdd, I.getType());
} }
@ -1362,8 +1362,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
ConstantExpr::getSIToFP(CI, I.getType()) == CFP && ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
willNotOverflowSignedAdd(LHSIntVal, CI, I)) { willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
// Insert the new integer add. // Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal, Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
CI, "addconv");
return new SIToFPInst(NewAdd, I.getType()); return new SIToFPInst(NewAdd, I.getType());
} }
} }
@ -1381,8 +1380,7 @@ Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
(LHSConv->hasOneUse() || RHSConv->hasOneUse()) && (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) { willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
// Insert the new integer add. // Insert the new integer add.
Value *NewAdd = Builder->CreateNSWAdd(LHSIntVal, Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
RHSIntVal, "addconv");
return new SIToFPInst(NewAdd, I.getType()); return new SIToFPInst(NewAdd, I.getType());
} }
} }
@ -1480,14 +1478,14 @@ Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
// pointer, subtract it from the offset we have. // pointer, subtract it from the offset we have.
if (GEP2) { if (GEP2) {
Value *Offset = EmitGEPOffset(GEP2); Value *Offset = EmitGEPOffset(GEP2);
Result = Builder->CreateSub(Result, Offset); Result = Builder.CreateSub(Result, Offset);
} }
// If we have p - gep(p, ...) then we have to negate the result. // If we have p - gep(p, ...) then we have to negate the result.
if (Swapped) if (Swapped)
Result = Builder->CreateNeg(Result, "diff.neg"); Result = Builder.CreateNeg(Result, "diff.neg");
return Builder->CreateIntCast(Result, Ty, true); return Builder.CreateIntCast(Result, Ty, true);
} }
Instruction *InstCombiner::visitSub(BinaryOperator &I) { Instruction *InstCombiner::visitSub(BinaryOperator &I) {
@ -1615,7 +1613,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// ((X | Y) - X) --> (~X & Y) // ((X | Y) - X) --> (~X & Y)
if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1))))) if (match(Op0, m_OneUse(m_c_Or(m_Value(Y), m_Specific(Op1)))))
return BinaryOperator::CreateAnd( return BinaryOperator::CreateAnd(
Y, Builder->CreateNot(Op1, Op1->getName() + ".not")); Y, Builder.CreateNot(Op1, Op1->getName() + ".not"));
} }
if (Op1->hasOneUse()) { if (Op1->hasOneUse()) {
@ -1625,13 +1623,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// (X - (Y - Z)) --> (X + (Z - Y)). // (X - (Y - Z)) --> (X + (Z - Y)).
if (match(Op1, m_Sub(m_Value(Y), m_Value(Z)))) if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
return BinaryOperator::CreateAdd(Op0, return BinaryOperator::CreateAdd(Op0,
Builder->CreateSub(Z, Y, Op1->getName())); Builder.CreateSub(Z, Y, Op1->getName()));
// (X - (X & Y)) --> (X & ~Y) // (X - (X & Y)) --> (X & ~Y)
// //
if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0)))) if (match(Op1, m_c_And(m_Value(Y), m_Specific(Op0))))
return BinaryOperator::CreateAnd(Op0, return BinaryOperator::CreateAnd(Op0,
Builder->CreateNot(Y, Y->getName() + ".not")); Builder.CreateNot(Y, Y->getName() + ".not"));
// 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow. // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) && if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
@ -1648,7 +1646,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// 'nuw' is dropped in favor of the canonical form. // 'nuw' is dropped in favor of the canonical form.
if (match(Op1, m_SExt(m_Value(Y))) && if (match(Op1, m_SExt(m_Value(Y))) &&
Y->getType()->getScalarSizeInBits() == 1) { Y->getType()->getScalarSizeInBits() == 1) {
Value *Zext = Builder->CreateZExt(Y, I.getType()); Value *Zext = Builder.CreateZExt(Y, I.getType());
BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext); BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
Add->setHasNoSignedWrap(I.hasNoSignedWrap()); Add->setHasNoSignedWrap(I.hasNoSignedWrap());
return Add; return Add;
@ -1659,13 +1657,13 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
Value *A, *B; Value *A, *B;
Constant *CI; Constant *CI;
if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B))))) if (match(Op1, m_c_Mul(m_Value(A), m_Neg(m_Value(B)))))
return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B)); return BinaryOperator::CreateAdd(Op0, Builder.CreateMul(A, B));
// X - A*CI -> X + A*-CI // X - A*CI -> X + A*-CI
// No need to handle commuted multiply because multiply handling will // No need to handle commuted multiply because multiply handling will
// ensure constant will be move to the right hand side. // ensure constant will be move to the right hand side.
if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) { if (match(Op1, m_Mul(m_Value(A), m_Constant(CI)))) {
Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI)); Value *NewMul = Builder.CreateMul(A, ConstantExpr::getNeg(CI));
return BinaryOperator::CreateAdd(Op0, NewMul); return BinaryOperator::CreateAdd(Op0, NewMul);
} }
} }
@ -1729,14 +1727,14 @@ Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
} }
if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) { if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) { if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType()); Value *NewTrunc = Builder.CreateFPTrunc(V, I.getType());
Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc); Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
NewI->copyFastMathFlags(&I); NewI->copyFastMathFlags(&I);
return NewI; return NewI;
} }
} else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) { } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) { if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
Value *NewExt = Builder->CreateFPExt(V, I.getType()); Value *NewExt = Builder.CreateFPExt(V, I.getType());
Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt); Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
NewI->copyFastMathFlags(&I); NewI->copyFastMathFlags(&I);
return NewI; return NewI;

View File

@ -54,17 +54,17 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC) {
/// instruction. The sign is passed in to determine which kind of predicate to /// instruction. The sign is passed in to determine which kind of predicate to
/// use in the new icmp instruction. /// use in the new icmp instruction.
static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS, static Value *getNewICmpValue(bool Sign, unsigned Code, Value *LHS, Value *RHS,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
ICmpInst::Predicate NewPred; ICmpInst::Predicate NewPred;
if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred)) if (Value *NewConstant = getICmpValue(Sign, Code, LHS, RHS, NewPred))
return NewConstant; return NewConstant;
return Builder->CreateICmp(NewPred, LHS, RHS); return Builder.CreateICmp(NewPred, LHS, RHS);
} }
/// This is the complement of getFCmpCode, which turns an opcode and two /// This is the complement of getFCmpCode, which turns an opcode and two
/// operands into either a FCmp instruction, or a true/false constant. /// operands into either a FCmp instruction, or a true/false constant.
static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS, static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
const auto Pred = static_cast<FCmpInst::Predicate>(Code); const auto Pred = static_cast<FCmpInst::Predicate>(Code);
assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE && assert(FCmpInst::FCMP_FALSE <= Pred && Pred <= FCmpInst::FCMP_TRUE &&
"Unexpected FCmp predicate!"); "Unexpected FCmp predicate!");
@ -72,7 +72,7 @@ static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0); return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 0);
if (Pred == FCmpInst::FCMP_TRUE) if (Pred == FCmpInst::FCMP_TRUE)
return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1); return ConstantInt::get(CmpInst::makeCmpResultType(LHS->getType()), 1);
return Builder->CreateFCmp(Pred, LHS, RHS); return Builder.CreateFCmp(Pred, LHS, RHS);
} }
/// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or /// \brief Transform BITWISE_OP(BSWAP(A),BSWAP(B)) or
@ -81,7 +81,7 @@ static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
/// \return Pointer to node that must replace the original binary operator, or /// \return Pointer to node that must replace the original binary operator, or
/// null pointer if no transformation was made. /// null pointer if no transformation was made.
static Value *SimplifyBSwap(BinaryOperator &I, static Value *SimplifyBSwap(BinaryOperator &I,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying"); assert(I.isBitwiseLogicOp() && "Unexpected opcode for bswap simplifying");
Value *OldLHS = I.getOperand(0); Value *OldLHS = I.getOperand(0);
@ -107,10 +107,10 @@ static Value *SimplifyBSwap(BinaryOperator &I,
} else } else
return nullptr; return nullptr;
Value *BinOp = Builder->CreateBinOp(I.getOpcode(), NewLHS, NewRHS); Value *BinOp = Builder.CreateBinOp(I.getOpcode(), NewLHS, NewRHS);
Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap, Function *F = Intrinsic::getDeclaration(I.getModule(), Intrinsic::bswap,
I.getType()); I.getType());
return Builder->CreateCall(F, BinOp); return Builder.CreateCall(F, BinOp);
} }
/// This handles expressions of the form ((val OP C1) & C2). Where /// This handles expressions of the form ((val OP C1) & C2). Where
@ -129,7 +129,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
case Instruction::Xor: case Instruction::Xor:
if (Op->hasOneUse()) { if (Op->hasOneUse()) {
// (X ^ C1) & C2 --> (X & C2) ^ (C1&C2) // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
Value *And = Builder->CreateAnd(X, AndRHS); Value *And = Builder.CreateAnd(X, AndRHS);
And->takeName(Op); And->takeName(Op);
return BinaryOperator::CreateXor(And, Together); return BinaryOperator::CreateXor(And, Together);
} }
@ -142,7 +142,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
// NOTE: This reduces the number of bits set in the & mask, which // NOTE: This reduces the number of bits set in the & mask, which
// can expose opportunities for store narrowing. // can expose opportunities for store narrowing.
Together = ConstantExpr::getXor(AndRHS, Together); Together = ConstantExpr::getXor(AndRHS, Together);
Value *And = Builder->CreateAnd(X, Together); Value *And = Builder.CreateAnd(X, Together);
And->takeName(Op); And->takeName(Op);
return BinaryOperator::CreateOr(And, OpRHS); return BinaryOperator::CreateOr(And, OpRHS);
} }
@ -174,7 +174,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
return &TheAnd; return &TheAnd;
} else { } else {
// Pull the XOR out of the AND. // Pull the XOR out of the AND.
Value *NewAnd = Builder->CreateAnd(X, AndRHS); Value *NewAnd = Builder.CreateAnd(X, AndRHS);
NewAnd->takeName(Op); NewAnd->takeName(Op);
return BinaryOperator::CreateXor(NewAnd, AndRHS); return BinaryOperator::CreateXor(NewAnd, AndRHS);
} }
@ -190,7 +190,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal)); APInt ShlMask(APInt::getHighBitsSet(BitWidth, BitWidth-OpRHSVal));
ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShlMask); ConstantInt *CI = Builder.getInt(AndRHS->getValue() & ShlMask);
if (CI->getValue() == ShlMask) if (CI->getValue() == ShlMask)
// Masking out bits that the shift already masks. // Masking out bits that the shift already masks.
@ -210,7 +210,7 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
ConstantInt *CI = Builder->getInt(AndRHS->getValue() & ShrMask); ConstantInt *CI = Builder.getInt(AndRHS->getValue() & ShrMask);
if (CI->getValue() == ShrMask) if (CI->getValue() == ShrMask)
// Masking out bits that the shift already masks. // Masking out bits that the shift already masks.
@ -230,12 +230,12 @@ Instruction *InstCombiner::OptAndOp(BinaryOperator *Op,
uint32_t BitWidth = AndRHS->getType()->getBitWidth(); uint32_t BitWidth = AndRHS->getType()->getBitWidth();
uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth); uint32_t OpRHSVal = OpRHS->getLimitedValue(BitWidth);
APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal)); APInt ShrMask(APInt::getLowBitsSet(BitWidth, BitWidth - OpRHSVal));
Constant *C = Builder->getInt(AndRHS->getValue() & ShrMask); Constant *C = Builder.getInt(AndRHS->getValue() & ShrMask);
if (C == AndRHS) { // Masking out bits shifted in. if (C == AndRHS) { // Masking out bits shifted in.
// (Val ashr C1) & C2 -> (Val lshr C1) & C2 // (Val ashr C1) & C2 -> (Val lshr C1) & C2
// Make the argument unsigned. // Make the argument unsigned.
Value *ShVal = Op->getOperand(0); Value *ShVal = Op->getOperand(0);
ShVal = Builder->CreateLShr(ShVal, OpRHS, Op->getName()); ShVal = Builder.CreateLShr(ShVal, OpRHS, Op->getName());
return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName()); return BinaryOperator::CreateAnd(ShVal, AndRHS, TheAnd.getName());
} }
} }
@ -261,15 +261,15 @@ Value *InstCombiner::insertRangeTest(Value *V, const APInt &Lo, const APInt &Hi,
ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE; ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) { if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred; Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
return Builder->CreateICmp(Pred, V, ConstantInt::get(Ty, Hi)); return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
} }
// V >= Lo && V < Hi --> V - Lo u< Hi - Lo // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
// V < Lo || V >= Hi --> V - Lo u>= Hi - Lo // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
Value *VMinusLo = Value *VMinusLo =
Builder->CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off"); Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo); Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
return Builder->CreateICmp(Pred, VMinusLo, HiMinusLo); return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
} }
/// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
@ -515,7 +515,7 @@ static unsigned getMaskedTypeForICmpPair(Value *&A, Value *&B, Value *&C,
/// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
/// into a single (icmp(A & X) ==/!= Y). /// into a single (icmp(A & X) ==/!= Y).
static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
llvm::InstCombiner::BuilderTy *Builder) { llvm::InstCombiner::BuilderTy &Builder) {
Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr; Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate(); ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
unsigned Mask = unsigned Mask =
@ -548,27 +548,27 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
if (Mask & Mask_AllZeros) { if (Mask & Mask_AllZeros) {
// (icmp eq (A & B), 0) & (icmp eq (A & D), 0) // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
// -> (icmp eq (A & (B|D)), 0) // -> (icmp eq (A & (B|D)), 0)
Value *NewOr = Builder->CreateOr(B, D); Value *NewOr = Builder.CreateOr(B, D);
Value *NewAnd = Builder->CreateAnd(A, NewOr); Value *NewAnd = Builder.CreateAnd(A, NewOr);
// We can't use C as zero because we might actually handle // We can't use C as zero because we might actually handle
// (icmp ne (A & B), B) & (icmp ne (A & D), D) // (icmp ne (A & B), B) & (icmp ne (A & D), D)
// with B and D, having a single bit set. // with B and D, having a single bit set.
Value *Zero = Constant::getNullValue(A->getType()); Value *Zero = Constant::getNullValue(A->getType());
return Builder->CreateICmp(NewCC, NewAnd, Zero); return Builder.CreateICmp(NewCC, NewAnd, Zero);
} }
if (Mask & BMask_AllOnes) { if (Mask & BMask_AllOnes) {
// (icmp eq (A & B), B) & (icmp eq (A & D), D) // (icmp eq (A & B), B) & (icmp eq (A & D), D)
// -> (icmp eq (A & (B|D)), (B|D)) // -> (icmp eq (A & (B|D)), (B|D))
Value *NewOr = Builder->CreateOr(B, D); Value *NewOr = Builder.CreateOr(B, D);
Value *NewAnd = Builder->CreateAnd(A, NewOr); Value *NewAnd = Builder.CreateAnd(A, NewOr);
return Builder->CreateICmp(NewCC, NewAnd, NewOr); return Builder.CreateICmp(NewCC, NewAnd, NewOr);
} }
if (Mask & AMask_AllOnes) { if (Mask & AMask_AllOnes) {
// (icmp eq (A & B), A) & (icmp eq (A & D), A) // (icmp eq (A & B), A) & (icmp eq (A & D), A)
// -> (icmp eq (A & (B&D)), A) // -> (icmp eq (A & (B&D)), A)
Value *NewAnd1 = Builder->CreateAnd(B, D); Value *NewAnd1 = Builder.CreateAnd(B, D);
Value *NewAnd2 = Builder->CreateAnd(A, NewAnd1); Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
return Builder->CreateICmp(NewCC, NewAnd2, A); return Builder.CreateICmp(NewCC, NewAnd2, A);
} }
// Remaining cases assume at least that B and D are constant, and depend on // Remaining cases assume at least that B and D are constant, and depend on
@ -636,10 +636,10 @@ static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
(CCst->getValue() ^ ECst->getValue())).getBoolValue()) (CCst->getValue() ^ ECst->getValue())).getBoolValue())
return ConstantInt::get(LHS->getType(), !IsAnd); return ConstantInt::get(LHS->getType(), !IsAnd);
Value *NewOr1 = Builder->CreateOr(B, D); Value *NewOr1 = Builder.CreateOr(B, D);
Value *NewOr2 = ConstantExpr::getOr(CCst, ECst); Value *NewOr2 = ConstantExpr::getOr(CCst, ECst);
Value *NewAnd = Builder->CreateAnd(A, NewOr1); Value *NewAnd = Builder.CreateAnd(A, NewOr1);
return Builder->CreateICmp(NewCC, NewAnd, NewOr2); return Builder.CreateICmp(NewCC, NewAnd, NewOr2);
} }
return nullptr; return nullptr;
@ -697,13 +697,13 @@ Value *InstCombiner::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
if (Inverted) if (Inverted)
NewPred = ICmpInst::getInversePredicate(NewPred); NewPred = ICmpInst::getInversePredicate(NewPred);
return Builder->CreateICmp(NewPred, Input, RangeEnd); return Builder.CreateICmp(NewPred, Input, RangeEnd);
} }
static Value * static Value *
foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS, foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
bool JoinedByAnd, bool JoinedByAnd,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
Value *X = LHS->getOperand(0); Value *X = LHS->getOperand(0);
if (X != RHS->getOperand(0)) if (X != RHS->getOperand(0))
return nullptr; return nullptr;
@ -734,8 +734,8 @@ foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
// (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2 // (X != C1 && X != C2) --> (X | (C1 ^ C2)) != C2
// We choose an 'or' with a Pow2 constant rather than the inverse mask with // We choose an 'or' with a Pow2 constant rather than the inverse mask with
// 'and' because that may lead to smaller codegen from a smaller constant. // 'and' because that may lead to smaller codegen from a smaller constant.
Value *Or = Builder->CreateOr(X, ConstantInt::get(X->getType(), Xor)); Value *Or = Builder.CreateOr(X, ConstantInt::get(X->getType(), Xor));
return Builder->CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2)); return Builder.CreateICmp(Pred, Or, ConstantInt::get(X->getType(), *C2));
} }
// Special case: get the ordering right when the values wrap around zero. // Special case: get the ordering right when the values wrap around zero.
@ -747,9 +747,9 @@ foldAndOrOfEqualityCmpsWithConstants(ICmpInst *LHS, ICmpInst *RHS,
// (X == 13 || X == 14) --> X - 13 <=u 1 // (X == 13 || X == 14) --> X - 13 <=u 1
// (X != 13 && X != 14) --> X - 13 >u 1 // (X != 13 && X != 14) --> X - 13 >u 1
// An 'add' is the canonical IR form, so favor that over a 'sub'. // An 'add' is the canonical IR form, so favor that over a 'sub'.
Value *Add = Builder->CreateAdd(X, ConstantInt::get(X->getType(), -(*C1))); Value *Add = Builder.CreateAdd(X, ConstantInt::get(X->getType(), -(*C1)));
auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE; auto NewPred = JoinedByAnd ? ICmpInst::ICMP_UGT : ICmpInst::ICMP_ULE;
return Builder->CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1)); return Builder.CreateICmp(NewPred, Add, ConstantInt::get(X->getType(), 1));
} }
return nullptr; return nullptr;
@ -785,10 +785,10 @@ Value *InstCombiner::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS, ICmpInst *RHS,
if (A == C && if (A == C &&
isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) && isKnownToBeAPowerOfTwo(B, false, 0, &CxtI) &&
isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) { isKnownToBeAPowerOfTwo(D, false, 0, &CxtI)) {
Value *Mask = Builder->CreateOr(B, D); Value *Mask = Builder.CreateOr(B, D);
Value *Masked = Builder->CreateAnd(A, Mask); Value *Masked = Builder.CreateAnd(A, Mask);
auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE; auto NewPred = JoinedByAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
return Builder->CreateICmp(NewPred, Masked, Mask); return Builder.CreateICmp(NewPred, Masked, Mask);
} }
} }
@ -847,8 +847,8 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
// (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0) // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) || if ((PredL == ICmpInst::ICMP_ULT && LHSC->getValue().isPowerOf2()) ||
(PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) { (PredL == ICmpInst::ICMP_EQ && LHSC->isZero())) {
Value *NewOr = Builder->CreateOr(LHS0, RHS0); Value *NewOr = Builder.CreateOr(LHS0, RHS0);
return Builder->CreateICmp(PredL, NewOr, LHSC); return Builder.CreateICmp(PredL, NewOr, LHSC);
} }
} }
@ -880,10 +880,10 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize); APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
if ((Low & AndC->getValue()).isNullValue() && if ((Low & AndC->getValue()).isNullValue() &&
(Low & BigC->getValue()).isNullValue()) { (Low & BigC->getValue()).isNullValue()) {
Value *NewAnd = Builder->CreateAnd(V, Low | AndC->getValue()); Value *NewAnd = Builder.CreateAnd(V, Low | AndC->getValue());
APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue(); APInt N = SmallC->getValue().zext(BigBitSize) | BigC->getValue();
Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N); Value *NewVal = ConstantInt::get(AndC->getType()->getContext(), N);
return Builder->CreateICmp(PredL, NewAnd, NewVal); return Builder.CreateICmp(PredL, NewAnd, NewVal);
} }
} }
} }
@ -935,14 +935,14 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!"); llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULT:
if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13 if (LHSC == SubOne(RHSC)) // (X != 13 & X u< 14) -> X < 13
return Builder->CreateICmpULT(LHS0, LHSC); return Builder.CreateICmpULT(LHS0, LHSC);
if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13 if (LHSC->isZero()) // (X != 0 & X u< 14) -> X-1 u< 13
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
false, true); false, true);
break; // (X != 13 & X u< 15) -> no change break; // (X != 13 & X u< 15) -> no change
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLT:
if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13 if (LHSC == SubOne(RHSC)) // (X != 13 & X s< 14) -> X < 13
return Builder->CreateICmpSLT(LHS0, LHSC); return Builder.CreateICmpSLT(LHS0, LHSC);
break; // (X != 13 & X s< 15) -> no change break; // (X != 13 & X s< 15) -> no change
case ICmpInst::ICMP_NE: case ICmpInst::ICMP_NE:
// Potential folds for this case should already be handled. // Potential folds for this case should already be handled.
@ -955,7 +955,7 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!"); llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_NE: case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14 if (RHSC == AddOne(LHSC)) // (X u> 13 & X != 14) -> X u> 14
return Builder->CreateICmp(PredL, LHS0, RHSC); return Builder.CreateICmp(PredL, LHS0, RHSC);
break; // (X u> 13 & X != 15) -> no change break; // (X u> 13 & X != 15) -> no change
case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1 case ICmpInst::ICMP_ULT: // (X u> 13 & X u< 15) -> (X-14) <u 1
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(),
@ -968,7 +968,7 @@ Value *InstCombiner::foldAndOfICmps(ICmpInst *LHS, ICmpInst *RHS,
llvm_unreachable("Unknown integer condition code!"); llvm_unreachable("Unknown integer condition code!");
case ICmpInst::ICMP_NE: case ICmpInst::ICMP_NE:
if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14 if (RHSC == AddOne(LHSC)) // (X s> 13 & X != 14) -> X s> 14
return Builder->CreateICmp(PredL, LHS0, RHSC); return Builder.CreateICmp(PredL, LHS0, RHSC);
break; // (X s> 13 & X != 15) -> no change break; // (X s> 13 & X != 15) -> no change
case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1 case ICmpInst::ICMP_SLT: // (X s> 13 & X s< 15) -> (X-14) s< 1
return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true, return insertRangeTest(LHS0, LHSC->getValue() + 1, RHSC->getValue(), true,
@ -1017,15 +1017,15 @@ Value *InstCombiner::foldAndOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
// If either of the constants are nans, then the whole thing returns // If either of the constants are nans, then the whole thing returns
// false. // false.
if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
return Builder->getFalse(); return Builder.getFalse();
return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); return Builder.CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
} }
// Handle vector zeros. This occurs because the canonical form of // Handle vector zeros. This occurs because the canonical form of
// "fcmp ord x,x" is "fcmp ord x, 0". // "fcmp ord x,x" is "fcmp ord x, 0".
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1))) isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0)); return Builder.CreateFCmpORD(LHS->getOperand(0), RHS->getOperand(0));
return nullptr; return nullptr;
} }
@ -1080,7 +1080,7 @@ bool InstCombiner::shouldOptimizeCast(CastInst *CI) {
/// Fold {and,or,xor} (cast X), C. /// Fold {and,or,xor} (cast X), C.
static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast, static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
Constant *C; Constant *C;
if (!match(Logic.getOperand(1), m_Constant(C))) if (!match(Logic.getOperand(1), m_Constant(C)))
return nullptr; return nullptr;
@ -1099,7 +1099,7 @@ static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy); Constant *ZextTruncC = ConstantExpr::getZExt(TruncC, DestTy);
if (ZextTruncC == C) { if (ZextTruncC == C) {
// LogicOpc (zext X), C --> zext (LogicOpc X, C) // LogicOpc (zext X), C --> zext (LogicOpc X, C)
Value *NewOp = Builder->CreateBinOp(LogicOpc, X, TruncC); Value *NewOp = Builder.CreateBinOp(LogicOpc, X, TruncC);
return new ZExtInst(NewOp, DestTy); return new ZExtInst(NewOp, DestTy);
} }
} }
@ -1142,7 +1142,7 @@ Instruction *InstCombiner::foldCastedBitwiseLogic(BinaryOperator &I) {
// fold logic(cast(A), cast(B)) -> cast(logic(A, B)) // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) { if (shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
Value *NewOp = Builder->CreateBinOp(LogicOpc, Cast0Src, Cast1Src, Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
I.getName()); I.getName());
return CastInst::Create(CastOpcode, NewOp, DestTy); return CastInst::Create(CastOpcode, NewOp, DestTy);
} }
@ -1275,7 +1275,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
return &I; return &I;
// Do this before using distributive laws to catch simple and/or/not patterns. // Do this before using distributive laws to catch simple and/or/not patterns.
if (Instruction *Xor = foldAndToXor(I, *Builder)) if (Instruction *Xor = foldAndToXor(I, Builder))
return Xor; return Xor;
// (A|B)&(A|C) -> A|(B&C) etc // (A|B)&(A|C) -> A|(B&C) etc
@ -1302,15 +1302,15 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
APInt NotAndRHS(~AndRHSMask); APInt NotAndRHS(~AndRHSMask);
if (MaskedValueIsZero(Op0LHS, NotAndRHS, 0, &I)) { if (MaskedValueIsZero(Op0LHS, NotAndRHS, 0, &I)) {
// Not masking anything out for the LHS, move to RHS. // Not masking anything out for the LHS, move to RHS.
Value *NewRHS = Builder->CreateAnd(Op0RHS, AndRHS, Value *NewRHS = Builder.CreateAnd(Op0RHS, AndRHS,
Op0RHS->getName()+".masked"); Op0RHS->getName()+".masked");
return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS); return BinaryOperator::Create(Op0I->getOpcode(), Op0LHS, NewRHS);
} }
if (!isa<Constant>(Op0RHS) && if (!isa<Constant>(Op0RHS) &&
MaskedValueIsZero(Op0RHS, NotAndRHS, 0, &I)) { MaskedValueIsZero(Op0RHS, NotAndRHS, 0, &I)) {
// Not masking anything out for the RHS, move to LHS. // Not masking anything out for the RHS, move to LHS.
Value *NewLHS = Builder->CreateAnd(Op0LHS, AndRHS, Value *NewLHS = Builder.CreateAnd(Op0LHS, AndRHS,
Op0LHS->getName()+".masked"); Op0LHS->getName()+".masked");
return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS); return BinaryOperator::Create(Op0I->getOpcode(), NewLHS, Op0RHS);
} }
@ -1329,7 +1329,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// (1 >> x) & 1 --> zext(x == 0) // (1 >> x) & 1 --> zext(x == 0)
if (AndRHSMask.isOneValue() && Op0LHS == AndRHS) { if (AndRHSMask.isOneValue() && Op0LHS == AndRHS) {
Value *NewICmp = Value *NewICmp =
Builder->CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType())); Builder.CreateICmpEQ(Op0RHS, Constant::getNullValue(I.getType()));
return new ZExtInst(NewICmp, I.getType()); return new ZExtInst(NewICmp, I.getType());
} }
break; break;
@ -1352,11 +1352,11 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType()); auto *TruncC1 = ConstantExpr::getTrunc(C1, X->getType());
Value *BinOp; Value *BinOp;
if (isa<ZExtInst>(Op0LHS)) if (isa<ZExtInst>(Op0LHS))
BinOp = Builder->CreateBinOp(Op0I->getOpcode(), X, TruncC1); BinOp = Builder.CreateBinOp(Op0I->getOpcode(), X, TruncC1);
else else
BinOp = Builder->CreateBinOp(Op0I->getOpcode(), TruncC1, X); BinOp = Builder.CreateBinOp(Op0I->getOpcode(), TruncC1, X);
auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType()); auto *TruncC2 = ConstantExpr::getTrunc(AndRHS, X->getType());
auto *And = Builder->CreateAnd(BinOp, TruncC2); auto *And = Builder.CreateAnd(BinOp, TruncC2);
return new ZExtInst(And, I.getType()); return new ZExtInst(And, I.getType());
} }
} }
@ -1376,7 +1376,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// into : and (trunc X to T), trunc(YC) & C2 // into : and (trunc X to T), trunc(YC) & C2
// This will fold the two constants together, which may allow // This will fold the two constants together, which may allow
// other simplifications. // other simplifications.
Value *NewCast = Builder->CreateTrunc(X, I.getType(), "and.shrunk"); Value *NewCast = Builder.CreateTrunc(X, I.getType(), "and.shrunk");
Constant *C3 = ConstantExpr::getTrunc(YC, I.getType()); Constant *C3 = ConstantExpr::getTrunc(YC, I.getType());
C3 = ConstantExpr::getAnd(C3, AndRHS); C3 = ConstantExpr::getAnd(C3, AndRHS);
return BinaryOperator::CreateAnd(NewCast, C3); return BinaryOperator::CreateAnd(NewCast, C3);
@ -1388,7 +1388,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I)) if (Instruction *FoldedLogic = foldOpWithConstantIntoOperand(I))
return FoldedLogic; return FoldedLogic;
if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder)) if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
return DeMorgan; return DeMorgan;
{ {
@ -1414,7 +1414,7 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
// an endless loop. By checking that A is non-constant we ensure that // an endless loop. By checking that A is non-constant we ensure that
// we will never get to the loop. // we will never get to the loop.
if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B if (A == tmpOp0 && !isa<Constant>(A)) // A&(A^B) -> A & ~B
return BinaryOperator::CreateAnd(A, Builder->CreateNot(B)); return BinaryOperator::CreateAnd(A, Builder.CreateNot(B));
} }
} }
@ -1428,13 +1428,13 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (match(Op0, m_Xor(m_Value(A), m_Value(B)))) if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) if (match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A))))
if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse())) if (Op1->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
return BinaryOperator::CreateAnd(Op0, Builder->CreateNot(C)); return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(C));
// ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B)))) if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))))
if (match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) if (match(Op1, m_Xor(m_Specific(B), m_Specific(A))))
if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse())) if (Op0->hasOneUse() || IsFreeToInvert(C, C->hasOneUse()))
return BinaryOperator::CreateAnd(Op1, Builder->CreateNot(C)); return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
// (A | B) & ((~A) ^ B) -> (A & B) // (A | B) & ((~A) ^ B) -> (A & B)
// (A | B) & (B ^ (~A)) -> (A & B) // (A | B) & (B ^ (~A)) -> (A & B)
@ -1466,18 +1466,18 @@ Instruction *InstCombiner::visitAnd(BinaryOperator &I) {
if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { if (LHS && match(Op1, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X)) if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y)); return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y)) if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldAndOfICmps(LHS, Cmp, I)) if (Value *Res = foldAndOfICmps(LHS, Cmp, I))
return replaceInstUsesWith(I, Builder->CreateAnd(Res, X)); return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
} }
if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) { if (RHS && match(Op0, m_OneUse(m_And(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X)) if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
return replaceInstUsesWith(I, Builder->CreateAnd(Res, Y)); return replaceInstUsesWith(I, Builder.CreateAnd(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y)) if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldAndOfICmps(Cmp, RHS, I)) if (Value *Res = foldAndOfICmps(Cmp, RHS, I))
return replaceInstUsesWith(I, Builder->CreateAnd(Res, X)); return replaceInstUsesWith(I, Builder.CreateAnd(Res, X));
} }
} }
@ -1679,9 +1679,9 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
RangeDiff.ugt(LHSC->getValue())) { RangeDiff.ugt(LHSC->getValue())) {
Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC); Value *MaskC = ConstantInt::get(LAddC->getType(), ~DiffC);
Value *NewAnd = Builder->CreateAnd(LAddOpnd, MaskC); Value *NewAnd = Builder.CreateAnd(LAddOpnd, MaskC);
Value *NewAdd = Builder->CreateAdd(NewAnd, MaxAddC); Value *NewAdd = Builder.CreateAdd(NewAnd, MaxAddC);
return (Builder->CreateICmp(LHS->getPredicate(), NewAdd, LHSC)); return Builder.CreateICmp(LHS->getPredicate(), NewAdd, LHSC);
} }
} }
} }
@ -1728,9 +1728,9 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
A = LHS->getOperand(1); A = LHS->getOperand(1);
} }
if (A && B) if (A && B)
return Builder->CreateICmp( return Builder.CreateICmp(
ICmpInst::ICMP_UGE, ICmpInst::ICMP_UGE,
Builder->CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A); Builder.CreateAdd(B, ConstantInt::getSigned(B->getType(), -1)), A);
} }
// E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
@ -1751,8 +1751,8 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
if (LHSC == RHSC && PredL == PredR) { if (LHSC == RHSC && PredL == PredR) {
// (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0) // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) { if (PredL == ICmpInst::ICMP_NE && LHSC->isZero()) {
Value *NewOr = Builder->CreateOr(LHS0, RHS0); Value *NewOr = Builder.CreateOr(LHS0, RHS0);
return Builder->CreateICmp(PredL, NewOr, LHSC); return Builder.CreateICmp(PredL, NewOr, LHSC);
} }
} }
@ -1762,7 +1762,7 @@ Value *InstCombiner::foldOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
ConstantInt *AddC; ConstantInt *AddC;
if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC)))) if (match(LHS0, m_Add(m_Specific(RHS0), m_ConstantInt(AddC))))
if (RHSC->getValue() + AddC->getValue() == LHSC->getValue()) if (RHSC->getValue() + AddC->getValue() == LHSC->getValue())
return Builder->CreateICmpULE(LHS0, LHSC); return Builder.CreateICmpULE(LHS0, LHSC);
} }
// From here on, we only handle: // From here on, we only handle:
@ -1878,18 +1878,18 @@ Value *InstCombiner::foldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
// If either of the constants are nans, then the whole thing returns // If either of the constants are nans, then the whole thing returns
// true. // true.
if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN()) if (LHSC->getValueAPF().isNaN() || RHSC->getValueAPF().isNaN())
return Builder->getTrue(); return Builder.getTrue();
// Otherwise, no need to compare the two constants, compare the // Otherwise, no need to compare the two constants, compare the
// rest. // rest.
return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); return Builder.CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
} }
// Handle vector zeros. This occurs because the canonical form of // Handle vector zeros. This occurs because the canonical form of
// "fcmp uno x,x" is "fcmp uno x, 0". // "fcmp uno x,x" is "fcmp uno x, 0".
if (isa<ConstantAggregateZero>(LHS->getOperand(1)) && if (isa<ConstantAggregateZero>(LHS->getOperand(1)) &&
isa<ConstantAggregateZero>(RHS->getOperand(1))) isa<ConstantAggregateZero>(RHS->getOperand(1)))
return Builder->CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0)); return Builder.CreateFCmpUNO(LHS->getOperand(0), RHS->getOperand(0));
return nullptr; return nullptr;
} }
@ -1908,7 +1908,7 @@ Value *InstCombiner::foldOrOfFCmps(FCmpInst *LHS, FCmpInst *RHS) {
/// when the XOR of the two constants is "all ones" (-1). /// when the XOR of the two constants is "all ones" (-1).
static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op, static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C, Value *A, Value *B, Value *C,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C); ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
if (!CI1) return nullptr; if (!CI1) return nullptr;
@ -1920,7 +1920,7 @@ static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
if (!Xor.isAllOnesValue()) return nullptr; if (!Xor.isAllOnesValue()) return nullptr;
if (V1 == A || V1 == B) { if (V1 == A || V1 == B) {
Value *NewOp = Builder->CreateAnd((V1 == A) ? B : A, CI1); Value *NewOp = Builder.CreateAnd((V1 == A) ? B : A, CI1);
return BinaryOperator::CreateOr(NewOp, V1); return BinaryOperator::CreateOr(NewOp, V1);
} }
@ -1938,7 +1938,7 @@ static Instruction *FoldOrWithConstants(BinaryOperator &I, Value *Op,
/// when the XOR of the two constants is "all ones" (-1). /// when the XOR of the two constants is "all ones" (-1).
static Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op, static Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op,
Value *A, Value *B, Value *C, Value *A, Value *B, Value *C,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
ConstantInt *CI1 = dyn_cast<ConstantInt>(C); ConstantInt *CI1 = dyn_cast<ConstantInt>(C);
if (!CI1) if (!CI1)
return nullptr; return nullptr;
@ -1953,7 +1953,7 @@ static Instruction *FoldXorWithConstants(BinaryOperator &I, Value *Op,
return nullptr; return nullptr;
if (V1 == A || V1 == B) { if (V1 == A || V1 == B) {
Value *NewOp = Builder->CreateAnd(V1 == A ? B : A, CI1); Value *NewOp = Builder.CreateAnd(V1 == A ? B : A, CI1);
return BinaryOperator::CreateXor(NewOp, V1); return BinaryOperator::CreateXor(NewOp, V1);
} }
@ -1979,7 +1979,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return &I; return &I;
// Do this before using distributive laws to catch simple and/or/not patterns. // Do this before using distributive laws to catch simple and/or/not patterns.
if (Instruction *Xor = foldOrToXor(I, *Builder)) if (Instruction *Xor = foldOrToXor(I, Builder))
return Xor; return Xor;
// (A&B)|(A&C) -> A&(B|C) etc // (A&B)|(A&C) -> A&(B|C) etc
@ -2003,7 +2003,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// (X^C)|Y -> (X|Y)^C iff Y&C == 0 // (X^C)|Y -> (X|Y)^C iff Y&C == 0
if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) && if (match(Op0, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
MaskedValueIsZero(Op1, *C, 0, &I)) { MaskedValueIsZero(Op1, *C, 0, &I)) {
Value *NOr = Builder->CreateOr(A, Op1); Value *NOr = Builder.CreateOr(A, Op1);
NOr->takeName(Op0); NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr, return BinaryOperator::CreateXor(NOr,
ConstantInt::get(NOr->getType(), *C)); ConstantInt::get(NOr->getType(), *C));
@ -2012,7 +2012,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// Y|(X^C) -> (X|Y)^C iff Y&C == 0 // Y|(X^C) -> (X|Y)^C iff Y&C == 0
if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) && if (match(Op1, m_OneUse(m_Xor(m_Value(A), m_APInt(C)))) &&
MaskedValueIsZero(Op0, *C, 0, &I)) { MaskedValueIsZero(Op0, *C, 0, &I)) {
Value *NOr = Builder->CreateOr(A, Op0); Value *NOr = Builder.CreateOr(A, Op0);
NOr->takeName(Op0); NOr->takeName(Op0);
return BinaryOperator::CreateXor(NOr, return BinaryOperator::CreateXor(NOr,
ConstantInt::get(NOr->getType(), *C)); ConstantInt::get(NOr->getType(), *C));
@ -2050,7 +2050,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(V2 == B && (V2 == B &&
MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V) MaskedValueIsZero(V1, ~C1->getValue(), 0, &I)))) // (N|V)
return BinaryOperator::CreateAnd(A, return BinaryOperator::CreateAnd(A,
Builder->getInt(C1->getValue()|C2->getValue())); Builder.getInt(C1->getValue()|C2->getValue()));
// Or commutes, try both ways. // Or commutes, try both ways.
if (match(B, m_Or(m_Value(V1), m_Value(V2))) && if (match(B, m_Or(m_Value(V1), m_Value(V2))) &&
((V1 == A && ((V1 == A &&
@ -2058,7 +2058,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(V2 == A && (V2 == A &&
MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V) MaskedValueIsZero(V1, ~C2->getValue(), 0, &I)))) // (N|V)
return BinaryOperator::CreateAnd(B, return BinaryOperator::CreateAnd(B,
Builder->getInt(C1->getValue()|C2->getValue())); Builder.getInt(C1->getValue()|C2->getValue()));
// ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2) // ((V|C3)&C1) | ((V|C4)&C2) --> (V|C3|C4)&(C1|C2)
// iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0. // iff (C1&C2) == 0 and (C3&~C1) == 0 and (C4&~C2) == 0.
@ -2067,9 +2067,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
(C3->getValue() & ~C1->getValue()).isNullValue() && (C3->getValue() & ~C1->getValue()).isNullValue() &&
match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) && match(B, m_Or(m_Specific(V1), m_ConstantInt(C4))) &&
(C4->getValue() & ~C2->getValue()).isNullValue()) { (C4->getValue() & ~C2->getValue()).isNullValue()) {
V2 = Builder->CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield"); V2 = Builder.CreateOr(V1, ConstantExpr::getOr(C3, C4), "bitfield");
return BinaryOperator::CreateAnd(V2, return BinaryOperator::CreateAnd(V2,
Builder->getInt(C1->getValue()|C2->getValue())); Builder.getInt(C1->getValue()|C2->getValue()));
} }
} }
} }
@ -2079,21 +2079,21 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// 'or' that it is replacing. // 'or' that it is replacing.
if (Op0->hasOneUse() || Op1->hasOneUse()) { if (Op0->hasOneUse() || Op1->hasOneUse()) {
// (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants. // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
if (Value *V = matchSelectFromAndOr(A, C, B, D, *Builder)) if (Value *V = matchSelectFromAndOr(A, C, B, D, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(A, C, D, B, *Builder)) if (Value *V = matchSelectFromAndOr(A, C, D, B, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(C, A, B, D, *Builder)) if (Value *V = matchSelectFromAndOr(C, A, B, D, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(C, A, D, B, *Builder)) if (Value *V = matchSelectFromAndOr(C, A, D, B, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(B, D, A, C, *Builder)) if (Value *V = matchSelectFromAndOr(B, D, A, C, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(B, D, C, A, *Builder)) if (Value *V = matchSelectFromAndOr(B, D, C, A, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(D, B, A, C, *Builder)) if (Value *V = matchSelectFromAndOr(D, B, A, C, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Value *V = matchSelectFromAndOr(D, B, C, A, *Builder)) if (Value *V = matchSelectFromAndOr(D, B, C, A, Builder))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
} }
@ -2131,9 +2131,9 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// ((B | C) & A) | B -> B | (A & C) // ((B | C) & A) | B -> B | (A & C)
if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A)))) if (match(Op0, m_And(m_Or(m_Specific(Op1), m_Value(C)), m_Value(A))))
return BinaryOperator::CreateOr(Op1, Builder->CreateAnd(A, C)); return BinaryOperator::CreateOr(Op1, Builder.CreateAnd(A, C));
if (Instruction *DeMorgan = matchDeMorgansLaws(I, *Builder)) if (Instruction *DeMorgan = matchDeMorgansLaws(I, Builder))
return DeMorgan; return DeMorgan;
// Canonicalize xor to the RHS. // Canonicalize xor to the RHS.
@ -2155,11 +2155,11 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
return BinaryOperator::CreateOr(A, B); return BinaryOperator::CreateOr(A, B);
if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) { if (Op1->hasOneUse() && match(A, m_Not(m_Specific(Op0)))) {
Value *Not = Builder->CreateNot(B, B->getName()+".not"); Value *Not = Builder.CreateNot(B, B->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0); return BinaryOperator::CreateOr(Not, Op0);
} }
if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) { if (Op1->hasOneUse() && match(B, m_Not(m_Specific(Op0)))) {
Value *Not = Builder->CreateNot(A, A->getName()+".not"); Value *Not = Builder.CreateNot(A, A->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0); return BinaryOperator::CreateOr(Not, Op0);
} }
} }
@ -2173,7 +2173,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
B->getOpcode() == Instruction::Xor)) { B->getOpcode() == Instruction::Xor)) {
Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) : Value *NotOp = Op0 == B->getOperand(0) ? B->getOperand(1) :
B->getOperand(0); B->getOperand(0);
Value *Not = Builder->CreateNot(NotOp, NotOp->getName()+".not"); Value *Not = Builder.CreateNot(NotOp, NotOp->getName() + ".not");
return BinaryOperator::CreateOr(Not, Op0); return BinaryOperator::CreateOr(Not, Op0);
} }
@ -2186,7 +2186,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
// xor was canonicalized to Op1 above. // xor was canonicalized to Op1 above.
if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) && if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
match(Op0, m_c_And(m_Specific(A), m_Specific(B)))) match(Op0, m_c_And(m_Specific(A), m_Specific(B))))
return BinaryOperator::CreateXor(Builder->CreateNot(A), B); return BinaryOperator::CreateXor(Builder.CreateNot(A), B);
if (SwappedForXor) if (SwappedForXor)
std::swap(Op0, Op1); std::swap(Op0, Op1);
@ -2204,18 +2204,18 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { if (LHS && match(Op1, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X)) if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
return replaceInstUsesWith(I, Builder->CreateOr(Res, Y)); return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y)) if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldOrOfICmps(LHS, Cmp, I)) if (Value *Res = foldOrOfICmps(LHS, Cmp, I))
return replaceInstUsesWith(I, Builder->CreateOr(Res, X)); return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
} }
if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) { if (RHS && match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
if (auto *Cmp = dyn_cast<ICmpInst>(X)) if (auto *Cmp = dyn_cast<ICmpInst>(X))
if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
return replaceInstUsesWith(I, Builder->CreateOr(Res, Y)); return replaceInstUsesWith(I, Builder.CreateOr(Res, Y));
if (auto *Cmp = dyn_cast<ICmpInst>(Y)) if (auto *Cmp = dyn_cast<ICmpInst>(Y))
if (Value *Res = foldOrOfICmps(Cmp, RHS, I)) if (Value *Res = foldOrOfICmps(Cmp, RHS, I))
return replaceInstUsesWith(I, Builder->CreateOr(Res, X)); return replaceInstUsesWith(I, Builder.CreateOr(Res, X));
} }
} }
@ -2244,7 +2244,7 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
ConstantInt *C1; ConstantInt *C1;
if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) && if (Op0->hasOneUse() && !isa<ConstantInt>(Op1) &&
match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) { match(Op0, m_Or(m_Value(A), m_ConstantInt(C1)))) {
Value *Inner = Builder->CreateOr(A, Op1); Value *Inner = Builder.CreateOr(A, Op1);
Inner->takeName(Op0); Inner->takeName(Op0);
return BinaryOperator::CreateOr(Inner, C1); return BinaryOperator::CreateOr(Inner, C1);
} }
@ -2257,8 +2257,8 @@ Instruction *InstCombiner::visitOr(BinaryOperator &I) {
if (Op0->hasOneUse() && Op1->hasOneUse() && if (Op0->hasOneUse() && Op1->hasOneUse() &&
match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) && match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) { match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
Value *orTrue = Builder->CreateOr(A, C); Value *orTrue = Builder.CreateOr(A, C);
Value *orFalse = Builder->CreateOr(B, D); Value *orFalse = Builder.CreateOr(B, D);
return SelectInst::Create(X, orTrue, orFalse); return SelectInst::Create(X, orTrue, orFalse);
} }
} }
@ -2363,12 +2363,12 @@ Value *InstCombiner::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS) {
if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) { if (OrICmp == LHS && AndICmp == RHS && RHS->hasOneUse()) {
// (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS
RHS->setPredicate(RHS->getInversePredicate()); RHS->setPredicate(RHS->getInversePredicate());
return Builder->CreateAnd(LHS, RHS); return Builder.CreateAnd(LHS, RHS);
} }
if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) { if (OrICmp == RHS && AndICmp == LHS && LHS->hasOneUse()) {
// !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS
LHS->setPredicate(LHS->getInversePredicate()); LHS->setPredicate(LHS->getInversePredicate());
return Builder->CreateAnd(LHS, RHS); return Builder.CreateAnd(LHS, RHS);
} }
} }
} }
@ -2389,7 +2389,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I))) if (Value *V = SimplifyXorInst(Op0, Op1, SQ.getWithInstruction(&I)))
return replaceInstUsesWith(I, V); return replaceInstUsesWith(I, V);
if (Instruction *NewXor = foldXorToXor(I, *Builder)) if (Instruction *NewXor = foldXorToXor(I, Builder))
return NewXor; return NewXor;
// (A&B)^(A&C) -> A&(B^C) etc // (A&B)^(A&C) -> A&(B^C) etc
@ -2412,13 +2412,13 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
// ~(~X & Y) --> (X | ~Y) // ~(~X & Y) --> (X | ~Y)
// ~(Y & ~X) --> (X | ~Y) // ~(Y & ~X) --> (X | ~Y)
if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) { if (match(&I, m_Not(m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y)))))) {
Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not"); Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
return BinaryOperator::CreateOr(X, NotY); return BinaryOperator::CreateOr(X, NotY);
} }
// ~(~X | Y) --> (X & ~Y) // ~(~X | Y) --> (X & ~Y)
// ~(Y | ~X) --> (X & ~Y) // ~(Y | ~X) --> (X & ~Y)
if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) { if (match(&I, m_Not(m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y)))))) {
Value *NotY = Builder->CreateNot(Y, Y->getName() + ".not"); Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
return BinaryOperator::CreateAnd(X, NotY); return BinaryOperator::CreateAnd(X, NotY);
} }
@ -2434,8 +2434,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
NotVal->getOperand(0)->hasOneUse()) && NotVal->getOperand(0)->hasOneUse()) &&
IsFreeToInvert(NotVal->getOperand(1), IsFreeToInvert(NotVal->getOperand(1),
NotVal->getOperand(1)->hasOneUse())) { NotVal->getOperand(1)->hasOneUse())) {
Value *NotX = Builder->CreateNot(NotVal->getOperand(0), "notlhs"); Value *NotX = Builder.CreateNot(NotVal->getOperand(0), "notlhs");
Value *NotY = Builder->CreateNot(NotVal->getOperand(1), "notrhs"); Value *NotY = Builder.CreateNot(NotVal->getOperand(1), "notrhs");
if (NotVal->getOpcode() == Instruction::And) if (NotVal->getOpcode() == Instruction::And)
return BinaryOperator::CreateOr(NotX, NotY); return BinaryOperator::CreateOr(NotX, NotY);
return BinaryOperator::CreateAnd(NotX, NotY); return BinaryOperator::CreateAnd(NotX, NotY);
@ -2478,8 +2478,8 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (CI->hasOneUse() && Op0C->hasOneUse()) { if (CI->hasOneUse() && Op0C->hasOneUse()) {
Instruction::CastOps Opcode = Op0C->getOpcode(); Instruction::CastOps Opcode = Op0C->getOpcode();
if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) && if ((Opcode == Instruction::ZExt || Opcode == Instruction::SExt) &&
(RHSC == ConstantExpr::getCast(Opcode, Builder->getTrue(), (RHSC == ConstantExpr::getCast(Opcode, Builder.getTrue(),
Op0C->getDestTy()))) { Op0C->getDestTy()))) {
CI->setPredicate(CI->getInversePredicate()); CI->setPredicate(CI->getInversePredicate());
return CastInst::Create(Opcode, CI, Op0C->getType()); return CastInst::Create(Opcode, CI, Op0C->getType());
} }
@ -2505,7 +2505,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
Op0I->getOperand(0)); Op0I->getOperand(0));
} else if (RHSC->getValue().isSignMask()) { } else if (RHSC->getValue().isSignMask()) {
// (X + C) ^ signmask -> (X + C + signmask) // (X + C) ^ signmask -> (X + C + signmask)
Constant *C = Builder->getInt(RHSC->getValue() + Op0CI->getValue()); Constant *C = Builder.getInt(RHSC->getValue() + Op0CI->getValue());
return BinaryOperator::CreateAdd(Op0I->getOperand(0), C); return BinaryOperator::CreateAdd(Op0I->getOperand(0), C);
} }
@ -2538,7 +2538,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
APInt FoldConst = C1->getValue().lshr(C2->getValue()); APInt FoldConst = C1->getValue().lshr(C2->getValue());
FoldConst ^= C3->getValue(); FoldConst ^= C3->getValue();
// Prepare the two operands. // Prepare the two operands.
Value *Opnd0 = Builder->CreateLShr(E1->getOperand(0), C2); Value *Opnd0 = Builder.CreateLShr(E1->getOperand(0), C2);
Opnd0->takeName(Op0I); Opnd0->takeName(Op0I);
cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc()); cast<Instruction>(Opnd0)->setDebugLoc(I.getDebugLoc());
Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst); Value *FoldVal = ConstantInt::get(Opnd0->getType(), FoldConst);
@ -2583,14 +2583,14 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
if (A == Op1) // (B|A)^B == (A|B)^B if (A == Op1) // (B|A)^B == (A|B)^B
std::swap(A, B); std::swap(A, B);
if (B == Op1) // (A|B)^B == A & ~B if (B == Op1) // (A|B)^B == A & ~B
return BinaryOperator::CreateAnd(A, Builder->CreateNot(Op1)); return BinaryOperator::CreateAnd(A, Builder.CreateNot(Op1));
} else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) { } else if (match(Op0, m_OneUse(m_And(m_Value(A), m_Value(B))))) {
if (A == Op1) // (A&B)^A -> (B&A)^A if (A == Op1) // (A&B)^A -> (B&A)^A
std::swap(A, B); std::swap(A, B);
const APInt *C; const APInt *C;
if (B == Op1 && // (B&A)^A == ~B & A if (B == Op1 && // (B&A)^A == ~B & A
!match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C !match(Op1, m_APInt(C))) { // Canonical form is (B&C)^C
return BinaryOperator::CreateAnd(Builder->CreateNot(A), Op1); return BinaryOperator::CreateAnd(Builder.CreateNot(A), Op1);
} }
} }
} }
@ -2602,20 +2602,20 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
match(Op1, m_Or(m_Value(A), m_Value(B)))) { match(Op1, m_Or(m_Value(A), m_Value(B)))) {
if (D == A) if (D == A)
return BinaryOperator::CreateXor( return BinaryOperator::CreateXor(
Builder->CreateAnd(Builder->CreateNot(A), B), C); Builder.CreateAnd(Builder.CreateNot(A), B), C);
if (D == B) if (D == B)
return BinaryOperator::CreateXor( return BinaryOperator::CreateXor(
Builder->CreateAnd(Builder->CreateNot(B), A), C); Builder.CreateAnd(Builder.CreateNot(B), A), C);
} }
// (A | B)^(A ^ C) -> ((~A) & B) ^ C // (A | B)^(A ^ C) -> ((~A) & B) ^ C
if (match(Op0, m_Or(m_Value(A), m_Value(B))) && if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
match(Op1, m_Xor(m_Value(D), m_Value(C)))) { match(Op1, m_Xor(m_Value(D), m_Value(C)))) {
if (D == A) if (D == A)
return BinaryOperator::CreateXor( return BinaryOperator::CreateXor(
Builder->CreateAnd(Builder->CreateNot(A), B), C); Builder.CreateAnd(Builder.CreateNot(A), B), C);
if (D == B) if (D == B)
return BinaryOperator::CreateXor( return BinaryOperator::CreateXor(
Builder->CreateAnd(Builder->CreateNot(B), A), C); Builder.CreateAnd(Builder.CreateNot(B), A), C);
} }
// (A & B) ^ (A ^ B) -> (A | B) // (A & B) ^ (A ^ B) -> (A | B)
if (match(Op0, m_And(m_Value(A), m_Value(B))) && if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
@ -2632,7 +2632,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
Value *A, *B; Value *A, *B;
if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) && if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Not(m_Specific(A)))) match(Op1, m_Not(m_Specific(A))))
return BinaryOperator::CreateNot(Builder->CreateAnd(A, B)); return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0))) if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1))) if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))

View File

@ -128,23 +128,23 @@ Instruction *InstCombiner::SimplifyElementUnorderedAtomicMemCpy(
Type::getIntNPtrTy(AMI->getContext(), ElementSizeInBits, Type::getIntNPtrTy(AMI->getContext(), ElementSizeInBits,
Src->getType()->getPointerAddressSpace()); Src->getType()->getPointerAddressSpace());
Value *SrcCasted = Builder->CreatePointerCast(Src, ElementPointerType, Value *SrcCasted = Builder.CreatePointerCast(Src, ElementPointerType,
"memcpy_unfold.src_casted"); "memcpy_unfold.src_casted");
Value *DstCasted = Builder->CreatePointerCast(Dst, ElementPointerType, Value *DstCasted = Builder.CreatePointerCast(Dst, ElementPointerType,
"memcpy_unfold.dst_casted"); "memcpy_unfold.dst_casted");
for (uint64_t i = 0; i < NumElements; ++i) { for (uint64_t i = 0; i < NumElements; ++i) {
// Get current element addresses // Get current element addresses
ConstantInt *ElementIdxCI = ConstantInt *ElementIdxCI =
ConstantInt::get(AMI->getContext(), APInt(64, i)); ConstantInt::get(AMI->getContext(), APInt(64, i));
Value *SrcElementAddr = Value *SrcElementAddr =
Builder->CreateGEP(SrcCasted, ElementIdxCI, "memcpy_unfold.src_addr"); Builder.CreateGEP(SrcCasted, ElementIdxCI, "memcpy_unfold.src_addr");
Value *DstElementAddr = Value *DstElementAddr =
Builder->CreateGEP(DstCasted, ElementIdxCI, "memcpy_unfold.dst_addr"); Builder.CreateGEP(DstCasted, ElementIdxCI, "memcpy_unfold.dst_addr");
// Load from the source. Transfer alignment information and mark load as // Load from the source. Transfer alignment information and mark load as
// unordered atomic. // unordered atomic.
LoadInst *Load = Builder->CreateLoad(SrcElementAddr, "memcpy_unfold.val"); LoadInst *Load = Builder.CreateLoad(SrcElementAddr, "memcpy_unfold.val");
Load->setOrdering(AtomicOrdering::Unordered); Load->setOrdering(AtomicOrdering::Unordered);
// We know alignment of the first element. It is also guaranteed by the // We know alignment of the first element. It is also guaranteed by the
// verifier that element size is less or equal than first element // verifier that element size is less or equal than first element
@ -157,7 +157,7 @@ Instruction *InstCombiner::SimplifyElementUnorderedAtomicMemCpy(
Load->setDebugLoc(AMI->getDebugLoc()); Load->setDebugLoc(AMI->getDebugLoc());
// Store loaded value via unordered atomic store. // Store loaded value via unordered atomic store.
StoreInst *Store = Builder->CreateStore(Load, DstElementAddr); StoreInst *Store = Builder.CreateStore(Load, DstElementAddr);
Store->setOrdering(AtomicOrdering::Unordered); Store->setOrdering(AtomicOrdering::Unordered);
Store->setAlignment(i == 0 ? AMI->getParamAlignment(0) Store->setAlignment(i == 0 ? AMI->getParamAlignment(0)
: ElementSizeInBytes); : ElementSizeInBytes);
@ -227,9 +227,9 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
SrcAlign = std::max(SrcAlign, CopyAlign); SrcAlign = std::max(SrcAlign, CopyAlign);
DstAlign = std::max(DstAlign, CopyAlign); DstAlign = std::max(DstAlign, CopyAlign);
Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile()); LoadInst *L = Builder.CreateLoad(Src, MI->isVolatile());
L->setAlignment(SrcAlign); L->setAlignment(SrcAlign);
if (CopyMD) if (CopyMD)
L->setMetadata(LLVMContext::MD_tbaa, CopyMD); L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
@ -238,7 +238,7 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
if (LoopMemParallelMD) if (LoopMemParallelMD)
L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD); L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile()); StoreInst *S = Builder.CreateStore(L, Dest, MI->isVolatile());
S->setAlignment(DstAlign); S->setAlignment(DstAlign);
if (CopyMD) if (CopyMD)
S->setMetadata(LLVMContext::MD_tbaa, CopyMD); S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
@ -274,15 +274,15 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
Value *Dest = MI->getDest(); Value *Dest = MI->getDest();
unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace(); unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp); Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
Dest = Builder->CreateBitCast(Dest, NewDstPtrTy); Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
// Alignment 0 is identity for alignment 1 for memset, but not store. // Alignment 0 is identity for alignment 1 for memset, but not store.
if (Alignment == 0) Alignment = 1; if (Alignment == 0) Alignment = 1;
// Extract the fill value and store. // Extract the fill value and store.
uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL; uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest, StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
MI->isVolatile()); MI->isVolatile());
S->setAlignment(Alignment); S->setAlignment(Alignment);
// Set the size of the copy to 0, it will be deleted on the next iteration. // Set the size of the copy to 0, it will be deleted on the next iteration.
@ -1402,7 +1402,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II, isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
&IC.getDominatorTree())) { &IC.getDominatorTree())) {
if (!match(II.getArgOperand(1), m_One())) { if (!match(II.getArgOperand(1), m_One())) {
II.setOperand(1, IC.Builder->getTrue()); II.setOperand(1, IC.Builder.getTrue());
return &II; return &II;
} }
} }
@ -1475,7 +1475,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// the LLVM intrinsic definition for the pointer argument. // the LLVM intrinsic definition for the pointer argument.
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace(); unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace); PointerType *VecPtrTy = PointerType::get(II.getType(), AddrSpace);
Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec"); Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
// Second, convert the x86 XMM integer vector mask to a vector of bools based // Second, convert the x86 XMM integer vector mask to a vector of bools based
// on each element's most significant bit (the sign bit). // on each element's most significant bit (the sign bit).
@ -1483,7 +1483,7 @@ static Instruction *simplifyX86MaskedLoad(IntrinsicInst &II, InstCombiner &IC) {
// The pass-through vector for an x86 masked load is a zero vector. // The pass-through vector for an x86 masked load is a zero vector.
CallInst *NewMaskedLoad = CallInst *NewMaskedLoad =
IC.Builder->CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec); IC.Builder.CreateMaskedLoad(PtrCast, 1, BoolMask, ZeroVec);
return IC.replaceInstUsesWith(II, NewMaskedLoad); return IC.replaceInstUsesWith(II, NewMaskedLoad);
} }
@ -1518,13 +1518,13 @@ static bool simplifyX86MaskedStore(IntrinsicInst &II, InstCombiner &IC) {
// the LLVM intrinsic definition for the pointer argument. // the LLVM intrinsic definition for the pointer argument.
unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace(); unsigned AddrSpace = cast<PointerType>(Ptr->getType())->getAddressSpace();
PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace); PointerType *VecPtrTy = PointerType::get(Vec->getType(), AddrSpace);
Value *PtrCast = IC.Builder->CreateBitCast(Ptr, VecPtrTy, "castvec"); Value *PtrCast = IC.Builder.CreateBitCast(Ptr, VecPtrTy, "castvec");
// Second, convert the x86 XMM integer vector mask to a vector of bools based // Second, convert the x86 XMM integer vector mask to a vector of bools based
// on each element's most significant bit (the sign bit). // on each element's most significant bit (the sign bit).
Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask); Constant *BoolMask = getNegativeIsTrueBoolVec(ConstMask);
IC.Builder->CreateMaskedStore(Vec, PtrCast, 1, BoolMask); IC.Builder.CreateMaskedStore(Vec, PtrCast, 1, BoolMask);
// 'Replace uses' doesn't work for stores. Erase the original masked store. // 'Replace uses' doesn't work for stores. Erase the original masked store.
IC.eraseInstFromFunction(II); IC.eraseInstFromFunction(II);
@ -1973,7 +1973,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
unsigned C = X->getType()->getPrimitiveSizeInBits() - unsigned C = X->getType()->getPrimitiveSizeInBits() -
IIOperand->getType()->getPrimitiveSizeInBits(); IIOperand->getType()->getPrimitiveSizeInBits();
Value *CV = ConstantInt::get(X->getType(), C); Value *CV = ConstantInt::get(X->getType(), C);
Value *V = Builder->CreateLShr(X, CV); Value *V = Builder.CreateLShr(X, CV);
return new TruncInst(V, IIOperand->getType()); return new TruncInst(V, IIOperand->getType());
} }
break; break;
@ -1991,7 +1991,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
} }
case Intrinsic::masked_load: case Intrinsic::masked_load:
if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, *Builder)) if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II, Builder))
return replaceInstUsesWith(CI, SimplifiedMaskedOp); return replaceInstUsesWith(CI, SimplifiedMaskedOp);
break; break;
case Intrinsic::masked_store: case Intrinsic::masked_store:
@ -2073,11 +2073,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::fmuladd: { case Intrinsic::fmuladd: {
// Canonicalize fast fmuladd to the separate fmul + fadd. // Canonicalize fast fmuladd to the separate fmul + fadd.
if (II->hasUnsafeAlgebra()) { if (II->hasUnsafeAlgebra()) {
BuilderTy::FastMathFlagGuard Guard(*Builder); BuilderTy::FastMathFlagGuard Guard(Builder);
Builder->setFastMathFlags(II->getFastMathFlags()); Builder.setFastMathFlags(II->getFastMathFlags());
Value *Mul = Builder->CreateFMul(II->getArgOperand(0), Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
II->getArgOperand(1)); II->getArgOperand(1));
Value *Add = Builder->CreateFAdd(Mul, II->getArgOperand(2)); Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
Add->takeName(II); Add->takeName(II);
return replaceInstUsesWith(*II, Add); return replaceInstUsesWith(*II, Add);
} }
@ -2128,8 +2128,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Constant *LHS, *RHS; Constant *LHS, *RHS;
if (match(II->getArgOperand(0), if (match(II->getArgOperand(0),
m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) { m_Select(m_Value(Cond), m_Constant(LHS), m_Constant(RHS)))) {
CallInst *Call0 = Builder->CreateCall(II->getCalledFunction(), {LHS}); CallInst *Call0 = Builder.CreateCall(II->getCalledFunction(), {LHS});
CallInst *Call1 = Builder->CreateCall(II->getCalledFunction(), {RHS}); CallInst *Call1 = Builder.CreateCall(II->getCalledFunction(), {RHS});
return SelectInst::Create(Cond, Call0, Call1); return SelectInst::Create(Cond, Call0, Call1);
} }
@ -2147,7 +2147,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// fabs (fpext x) -> fpext (fabs x) // fabs (fpext x) -> fpext (fabs x)
Value *F = Intrinsic::getDeclaration(II->getModule(), II->getIntrinsicID(), Value *F = Intrinsic::getDeclaration(II->getModule(), II->getIntrinsicID(),
{ ExtSrc->getType() }); { ExtSrc->getType() });
CallInst *NewFabs = Builder->CreateCall(F, ExtSrc); CallInst *NewFabs = Builder.CreateCall(F, ExtSrc);
NewFabs->copyFastMathFlags(II); NewFabs->copyFastMathFlags(II);
NewFabs->takeName(II); NewFabs->takeName(II);
return new FPExtInst(NewFabs, II->getType()); return new FPExtInst(NewFabs, II->getType());
@ -2174,7 +2174,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC lvx -> load if the pointer is known aligned. // Turn PPC lvx -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
&DT) >= 16) { &DT) >= 16) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType())); PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr); return new LoadInst(Ptr);
} }
@ -2182,8 +2182,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_vsx_lxvw4x: case Intrinsic::ppc_vsx_lxvw4x:
case Intrinsic::ppc_vsx_lxvd2x: { case Intrinsic::ppc_vsx_lxvd2x: {
// Turn PPC VSX loads into normal loads. // Turn PPC VSX loads into normal loads.
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType())); PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr, Twine(""), false, 1); return new LoadInst(Ptr, Twine(""), false, 1);
} }
case Intrinsic::ppc_altivec_stvx: case Intrinsic::ppc_altivec_stvx:
@ -2193,7 +2193,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 16) { &DT) >= 16) {
Type *OpPtrTy = Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType()); PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr); return new StoreInst(II->getArgOperand(0), Ptr);
} }
break; break;
@ -2201,18 +2201,18 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::ppc_vsx_stxvd2x: { case Intrinsic::ppc_vsx_stxvd2x: {
// Turn PPC VSX stores into normal stores. // Turn PPC VSX stores into normal stores.
Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType()); Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr, false, 1); return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
} }
case Intrinsic::ppc_qpx_qvlfs: case Intrinsic::ppc_qpx_qvlfs:
// Turn PPC QPX qvlfs -> load if the pointer is known aligned. // Turn PPC QPX qvlfs -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC, if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, II, &AC,
&DT) >= 16) { &DT) >= 16) {
Type *VTy = VectorType::get(Builder->getFloatTy(), Type *VTy = VectorType::get(Builder.getFloatTy(),
II->getType()->getVectorNumElements()); II->getType()->getVectorNumElements());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(VTy)); PointerType::getUnqual(VTy));
Value *Load = Builder->CreateLoad(Ptr); Value *Load = Builder.CreateLoad(Ptr);
return new FPExtInst(Load, II->getType()); return new FPExtInst(Load, II->getType());
} }
break; break;
@ -2220,7 +2220,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC QPX qvlfd -> load if the pointer is known aligned. // Turn PPC QPX qvlfd -> load if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC, if (getOrEnforceKnownAlignment(II->getArgOperand(0), 32, DL, II, &AC,
&DT) >= 32) { &DT) >= 32) {
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0),
PointerType::getUnqual(II->getType())); PointerType::getUnqual(II->getType()));
return new LoadInst(Ptr); return new LoadInst(Ptr);
} }
@ -2229,11 +2229,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Turn PPC QPX qvstfs -> store if the pointer is known aligned. // Turn PPC QPX qvstfs -> store if the pointer is known aligned.
if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC, if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, II, &AC,
&DT) >= 16) { &DT) >= 16) {
Type *VTy = VectorType::get(Builder->getFloatTy(), Type *VTy = VectorType::get(Builder.getFloatTy(),
II->getArgOperand(0)->getType()->getVectorNumElements()); II->getArgOperand(0)->getType()->getVectorNumElements());
Value *TOp = Builder->CreateFPTrunc(II->getArgOperand(0), VTy); Value *TOp = Builder.CreateFPTrunc(II->getArgOperand(0), VTy);
Type *OpPtrTy = PointerType::getUnqual(VTy); Type *OpPtrTy = PointerType::getUnqual(VTy);
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(TOp, Ptr); return new StoreInst(TOp, Ptr);
} }
break; break;
@ -2243,7 +2243,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
&DT) >= 32) { &DT) >= 32) {
Type *OpPtrTy = Type *OpPtrTy =
PointerType::getUnqual(II->getArgOperand(0)->getType()); PointerType::getUnqual(II->getArgOperand(0)->getType());
Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(1), OpPtrTy);
return new StoreInst(II->getArgOperand(0), Ptr); return new StoreInst(II->getArgOperand(0), Ptr);
} }
break; break;
@ -2272,15 +2272,15 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
SmallVector<uint32_t, 8> SubVecMask; SmallVector<uint32_t, 8> SubVecMask;
for (unsigned i = 0; i != RetWidth; ++i) for (unsigned i = 0; i != RetWidth; ++i)
SubVecMask.push_back((int)i); SubVecMask.push_back((int)i);
VectorHalfAsShorts = Builder->CreateShuffleVector( VectorHalfAsShorts = Builder.CreateShuffleVector(
Arg, UndefValue::get(ArgType), SubVecMask); Arg, UndefValue::get(ArgType), SubVecMask);
} }
auto VectorHalfType = auto VectorHalfType =
VectorType::get(Type::getHalfTy(II->getContext()), RetWidth); VectorType::get(Type::getHalfTy(II->getContext()), RetWidth);
auto VectorHalfs = auto VectorHalfs =
Builder->CreateBitCast(VectorHalfAsShorts, VectorHalfType); Builder.CreateBitCast(VectorHalfAsShorts, VectorHalfType);
auto VectorFloats = Builder->CreateFPExt(VectorHalfs, RetType); auto VectorFloats = Builder.CreateFPExt(VectorHalfs, RetType);
return replaceInstUsesWith(*II, VectorFloats); return replaceInstUsesWith(*II, VectorFloats);
} }
@ -2437,25 +2437,25 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: llvm_unreachable("Case stmts out of sync!"); default: llvm_unreachable("Case stmts out of sync!");
case Intrinsic::x86_avx512_mask_add_ps_512: case Intrinsic::x86_avx512_mask_add_ps_512:
case Intrinsic::x86_avx512_mask_add_pd_512: case Intrinsic::x86_avx512_mask_add_pd_512:
V = Builder->CreateFAdd(Arg0, Arg1); V = Builder.CreateFAdd(Arg0, Arg1);
break; break;
case Intrinsic::x86_avx512_mask_sub_ps_512: case Intrinsic::x86_avx512_mask_sub_ps_512:
case Intrinsic::x86_avx512_mask_sub_pd_512: case Intrinsic::x86_avx512_mask_sub_pd_512:
V = Builder->CreateFSub(Arg0, Arg1); V = Builder.CreateFSub(Arg0, Arg1);
break; break;
case Intrinsic::x86_avx512_mask_mul_ps_512: case Intrinsic::x86_avx512_mask_mul_ps_512:
case Intrinsic::x86_avx512_mask_mul_pd_512: case Intrinsic::x86_avx512_mask_mul_pd_512:
V = Builder->CreateFMul(Arg0, Arg1); V = Builder.CreateFMul(Arg0, Arg1);
break; break;
case Intrinsic::x86_avx512_mask_div_ps_512: case Intrinsic::x86_avx512_mask_div_ps_512:
case Intrinsic::x86_avx512_mask_div_pd_512: case Intrinsic::x86_avx512_mask_div_pd_512:
V = Builder->CreateFDiv(Arg0, Arg1); V = Builder.CreateFDiv(Arg0, Arg1);
break; break;
} }
// Create a select for the masking. // Create a select for the masking.
V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2), V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
*Builder); Builder);
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
} }
} }
@ -2476,27 +2476,27 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Extract the element as scalars. // Extract the element as scalars.
Value *Arg0 = II->getArgOperand(0); Value *Arg0 = II->getArgOperand(0);
Value *Arg1 = II->getArgOperand(1); Value *Arg1 = II->getArgOperand(1);
Value *LHS = Builder->CreateExtractElement(Arg0, (uint64_t)0); Value *LHS = Builder.CreateExtractElement(Arg0, (uint64_t)0);
Value *RHS = Builder->CreateExtractElement(Arg1, (uint64_t)0); Value *RHS = Builder.CreateExtractElement(Arg1, (uint64_t)0);
Value *V; Value *V;
switch (II->getIntrinsicID()) { switch (II->getIntrinsicID()) {
default: llvm_unreachable("Case stmts out of sync!"); default: llvm_unreachable("Case stmts out of sync!");
case Intrinsic::x86_avx512_mask_add_ss_round: case Intrinsic::x86_avx512_mask_add_ss_round:
case Intrinsic::x86_avx512_mask_add_sd_round: case Intrinsic::x86_avx512_mask_add_sd_round:
V = Builder->CreateFAdd(LHS, RHS); V = Builder.CreateFAdd(LHS, RHS);
break; break;
case Intrinsic::x86_avx512_mask_sub_ss_round: case Intrinsic::x86_avx512_mask_sub_ss_round:
case Intrinsic::x86_avx512_mask_sub_sd_round: case Intrinsic::x86_avx512_mask_sub_sd_round:
V = Builder->CreateFSub(LHS, RHS); V = Builder.CreateFSub(LHS, RHS);
break; break;
case Intrinsic::x86_avx512_mask_mul_ss_round: case Intrinsic::x86_avx512_mask_mul_ss_round:
case Intrinsic::x86_avx512_mask_mul_sd_round: case Intrinsic::x86_avx512_mask_mul_sd_round:
V = Builder->CreateFMul(LHS, RHS); V = Builder.CreateFMul(LHS, RHS);
break; break;
case Intrinsic::x86_avx512_mask_div_ss_round: case Intrinsic::x86_avx512_mask_div_ss_round:
case Intrinsic::x86_avx512_mask_div_sd_round: case Intrinsic::x86_avx512_mask_div_sd_round:
V = Builder->CreateFDiv(LHS, RHS); V = Builder.CreateFDiv(LHS, RHS);
break; break;
} }
@ -2506,18 +2506,18 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// We don't need a select if we know the mask bit is a 1. // We don't need a select if we know the mask bit is a 1.
if (!C || !C->getValue()[0]) { if (!C || !C->getValue()[0]) {
// Cast the mask to an i1 vector and then extract the lowest element. // Cast the mask to an i1 vector and then extract the lowest element.
auto *MaskTy = VectorType::get(Builder->getInt1Ty(), auto *MaskTy = VectorType::get(Builder.getInt1Ty(),
cast<IntegerType>(Mask->getType())->getBitWidth()); cast<IntegerType>(Mask->getType())->getBitWidth());
Mask = Builder->CreateBitCast(Mask, MaskTy); Mask = Builder.CreateBitCast(Mask, MaskTy);
Mask = Builder->CreateExtractElement(Mask, (uint64_t)0); Mask = Builder.CreateExtractElement(Mask, (uint64_t)0);
// Extract the lowest element from the passthru operand. // Extract the lowest element from the passthru operand.
Value *Passthru = Builder->CreateExtractElement(II->getArgOperand(2), Value *Passthru = Builder.CreateExtractElement(II->getArgOperand(2),
(uint64_t)0); (uint64_t)0);
V = Builder->CreateSelect(Mask, V, Passthru); V = Builder.CreateSelect(Mask, V, Passthru);
} }
// Insert the result back into the original argument 0. // Insert the result back into the original argument 0.
V = Builder->CreateInsertElement(Arg0, V, (uint64_t)0); V = Builder.CreateInsertElement(Arg0, V, (uint64_t)0);
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
} }
@ -2598,7 +2598,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_pslli_d_512: case Intrinsic::x86_avx512_pslli_d_512:
case Intrinsic::x86_avx512_pslli_q_512: case Intrinsic::x86_avx512_pslli_q_512:
case Intrinsic::x86_avx512_pslli_w_512: case Intrinsic::x86_avx512_pslli_w_512:
if (Value *V = simplifyX86immShift(*II, *Builder)) if (Value *V = simplifyX86immShift(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -2629,7 +2629,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_psll_d_512: case Intrinsic::x86_avx512_psll_d_512:
case Intrinsic::x86_avx512_psll_q_512: case Intrinsic::x86_avx512_psll_q_512:
case Intrinsic::x86_avx512_psll_w_512: { case Intrinsic::x86_avx512_psll_w_512: {
if (Value *V = simplifyX86immShift(*II, *Builder)) if (Value *V = simplifyX86immShift(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
// SSE2/AVX2 uses only the first 64-bits of the 128-bit vector // SSE2/AVX2 uses only the first 64-bits of the 128-bit vector
@ -2673,7 +2673,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_psrlv_w_128: case Intrinsic::x86_avx512_psrlv_w_128:
case Intrinsic::x86_avx512_psrlv_w_256: case Intrinsic::x86_avx512_psrlv_w_256:
case Intrinsic::x86_avx512_psrlv_w_512: case Intrinsic::x86_avx512_psrlv_w_512:
if (Value *V = simplifyX86varShift(*II, *Builder)) if (Value *V = simplifyX86varShift(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -2683,7 +2683,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx2_pmulu_dq: case Intrinsic::x86_avx2_pmulu_dq:
case Intrinsic::x86_avx512_pmul_dq_512: case Intrinsic::x86_avx512_pmul_dq_512:
case Intrinsic::x86_avx512_pmulu_dq_512: { case Intrinsic::x86_avx512_pmulu_dq_512: {
if (Value *V = simplifyX86muldq(*II, *Builder)) if (Value *V = simplifyX86muldq(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
unsigned VWidth = II->getType()->getVectorNumElements(); unsigned VWidth = II->getType()->getVectorNumElements();
@ -2756,7 +2756,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
} }
case Intrinsic::x86_sse41_insertps: case Intrinsic::x86_sse41_insertps:
if (Value *V = simplifyX86insertps(*II, *Builder)) if (Value *V = simplifyX86insertps(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -2779,7 +2779,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
: nullptr; : nullptr;
// Attempt to simplify to a constant, shuffle vector or EXTRQI call. // Attempt to simplify to a constant, shuffle vector or EXTRQI call.
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder)) if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
// EXTRQ only uses the lowest 64-bits of the first 128-bit vector // EXTRQ only uses the lowest 64-bits of the first 128-bit vector
@ -2811,7 +2811,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2)); ConstantInt *CIIndex = dyn_cast<ConstantInt>(II->getArgOperand(2));
// Attempt to simplify to a constant or shuffle vector. // Attempt to simplify to a constant or shuffle vector.
if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, *Builder)) if (Value *V = simplifyX86extrq(*II, Op0, CILength, CIIndex, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
// EXTRQI only uses the lowest 64-bits of the first 128-bit vector // EXTRQI only uses the lowest 64-bits of the first 128-bit vector
@ -2843,7 +2843,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
const APInt &V11 = CI11->getValue(); const APInt &V11 = CI11->getValue();
APInt Len = V11.zextOrTrunc(6); APInt Len = V11.zextOrTrunc(6);
APInt Idx = V11.lshr(8).zextOrTrunc(6); APInt Idx = V11.lshr(8).zextOrTrunc(6);
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder)) if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
} }
@ -2876,7 +2876,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (CILength && CIIndex) { if (CILength && CIIndex) {
APInt Len = CILength->getValue().zextOrTrunc(6); APInt Len = CILength->getValue().zextOrTrunc(6);
APInt Idx = CIIndex->getValue().zextOrTrunc(6); APInt Idx = CIIndex->getValue().zextOrTrunc(6);
if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, *Builder)) if (Value *V = simplifyX86insertq(*II, Op0, Op1, Len, Idx, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
} }
@ -2930,7 +2930,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_ssse3_pshuf_b_128: case Intrinsic::x86_ssse3_pshuf_b_128:
case Intrinsic::x86_avx2_pshuf_b: case Intrinsic::x86_avx2_pshuf_b:
case Intrinsic::x86_avx512_pshuf_b_512: case Intrinsic::x86_avx512_pshuf_b_512:
if (Value *V = simplifyX86pshufb(*II, *Builder)) if (Value *V = simplifyX86pshufb(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -2940,13 +2940,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx_vpermilvar_pd: case Intrinsic::x86_avx_vpermilvar_pd:
case Intrinsic::x86_avx_vpermilvar_pd_256: case Intrinsic::x86_avx_vpermilvar_pd_256:
case Intrinsic::x86_avx512_vpermilvar_pd_512: case Intrinsic::x86_avx512_vpermilvar_pd_512:
if (Value *V = simplifyX86vpermilvar(*II, *Builder)) if (Value *V = simplifyX86vpermilvar(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
case Intrinsic::x86_avx2_permd: case Intrinsic::x86_avx2_permd:
case Intrinsic::x86_avx2_permps: case Intrinsic::x86_avx2_permps:
if (Value *V = simplifyX86vpermv(*II, *Builder)) if (Value *V = simplifyX86vpermv(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -2964,10 +2964,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx512_mask_permvar_sf_512: case Intrinsic::x86_avx512_mask_permvar_sf_512:
case Intrinsic::x86_avx512_mask_permvar_si_256: case Intrinsic::x86_avx512_mask_permvar_si_256:
case Intrinsic::x86_avx512_mask_permvar_si_512: case Intrinsic::x86_avx512_mask_permvar_si_512:
if (Value *V = simplifyX86vpermv(*II, *Builder)) { if (Value *V = simplifyX86vpermv(*II, Builder)) {
// We simplified the permuting, now create a select for the masking. // We simplified the permuting, now create a select for the masking.
V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2), V = emitX86MaskSelect(II->getArgOperand(3), V, II->getArgOperand(2),
*Builder); Builder);
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
} }
break; break;
@ -2976,7 +2976,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx_vperm2f128_ps_256: case Intrinsic::x86_avx_vperm2f128_ps_256:
case Intrinsic::x86_avx_vperm2f128_si_256: case Intrinsic::x86_avx_vperm2f128_si_256:
case Intrinsic::x86_avx2_vperm2i128: case Intrinsic::x86_avx2_vperm2i128:
if (Value *V = simplifyX86vperm2(*II, *Builder)) if (Value *V = simplifyX86vperm2(*II, Builder))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -3009,7 +3009,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_xop_vpcomd: case Intrinsic::x86_xop_vpcomd:
case Intrinsic::x86_xop_vpcomq: case Intrinsic::x86_xop_vpcomq:
case Intrinsic::x86_xop_vpcomw: case Intrinsic::x86_xop_vpcomw:
if (Value *V = simplifyX86vpcom(*II, *Builder, true)) if (Value *V = simplifyX86vpcom(*II, Builder, true))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -3017,7 +3017,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_xop_vpcomud: case Intrinsic::x86_xop_vpcomud:
case Intrinsic::x86_xop_vpcomuq: case Intrinsic::x86_xop_vpcomuq:
case Intrinsic::x86_xop_vpcomuw: case Intrinsic::x86_xop_vpcomuw:
if (Value *V = simplifyX86vpcom(*II, *Builder, false)) if (Value *V = simplifyX86vpcom(*II, Builder, false))
return replaceInstUsesWith(*II, V); return replaceInstUsesWith(*II, V);
break; break;
@ -3044,10 +3044,10 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (AllEltsOk) { if (AllEltsOk) {
// Cast the input vectors to byte vectors. // Cast the input vectors to byte vectors.
Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0), Value *Op0 = Builder.CreateBitCast(II->getArgOperand(0),
Mask->getType()); Mask->getType());
Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1), Value *Op1 = Builder.CreateBitCast(II->getArgOperand(1),
Mask->getType()); Mask->getType());
Value *Result = UndefValue::get(Op0->getType()); Value *Result = UndefValue::get(Op0->getType());
// Only extract each element once. // Only extract each element once.
@ -3067,13 +3067,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0; Value *Op0ToUse = (DL.isLittleEndian()) ? Op1 : Op0;
Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1; Value *Op1ToUse = (DL.isLittleEndian()) ? Op0 : Op1;
ExtractedElts[Idx] = ExtractedElts[Idx] =
Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse, Builder.CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
Builder->getInt32(Idx&15)); Builder.getInt32(Idx&15));
} }
// Insert this value into the result vector. // Insert this value into the result vector.
Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx], Result = Builder.CreateInsertElement(Result, ExtractedElts[Idx],
Builder->getInt32(i)); Builder.getInt32(i));
} }
return CastInst::Create(Instruction::BitCast, Result, CI.getType()); return CastInst::Create(Instruction::BitCast, Result, CI.getType());
} }
@ -3238,7 +3238,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (Mask == (S_NAN | Q_NAN)) { if (Mask == (S_NAN | Q_NAN)) {
// Equivalent of isnan. Replace with standard fcmp. // Equivalent of isnan. Replace with standard fcmp.
Value *FCmp = Builder->CreateFCmpUNO(Src0, Src0); Value *FCmp = Builder.CreateFCmpUNO(Src0, Src0);
FCmp->takeName(II); FCmp->takeName(II);
return replaceInstUsesWith(*II, FCmp); return replaceInstUsesWith(*II, FCmp);
} }
@ -3250,7 +3250,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// Clamp mask to used bits // Clamp mask to used bits
if ((Mask & FullMask) != Mask) { if ((Mask & FullMask) != Mask) {
CallInst *NewCall = Builder->CreateCall(II->getCalledFunction(), CallInst *NewCall = Builder.CreateCall(II->getCalledFunction(),
{ Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) } { Src0, ConstantInt::get(Src1->getType(), Mask & FullMask) }
); );
@ -3343,13 +3343,12 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// TODO: Also emit sub if only width is constant. // TODO: Also emit sub if only width is constant.
if (!CWidth && COffset && Offset == 0) { if (!CWidth && COffset && Offset == 0) {
Constant *KSize = ConstantInt::get(COffset->getType(), IntSize); Constant *KSize = ConstantInt::get(COffset->getType(), IntSize);
Value *ShiftVal = Builder->CreateSub(KSize, II->getArgOperand(2)); Value *ShiftVal = Builder.CreateSub(KSize, II->getArgOperand(2));
ShiftVal = Builder->CreateZExt(ShiftVal, II->getType()); ShiftVal = Builder.CreateZExt(ShiftVal, II->getType());
Value *Shl = Builder->CreateShl(Src, ShiftVal); Value *Shl = Builder.CreateShl(Src, ShiftVal);
Value *RightShift = Signed ? Value *RightShift = Signed ? Builder.CreateAShr(Shl, ShiftVal)
Builder->CreateAShr(Shl, ShiftVal) : : Builder.CreateLShr(Shl, ShiftVal);
Builder->CreateLShr(Shl, ShiftVal);
RightShift->takeName(II); RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift); return replaceInstUsesWith(*II, RightShift);
} }
@ -3360,17 +3359,15 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// TODO: This allows folding to undef when the hardware has specific // TODO: This allows folding to undef when the hardware has specific
// behavior? // behavior?
if (Offset + Width < IntSize) { if (Offset + Width < IntSize) {
Value *Shl = Builder->CreateShl(Src, IntSize - Offset - Width); Value *Shl = Builder.CreateShl(Src, IntSize - Offset - Width);
Value *RightShift = Signed ? Value *RightShift = Signed ? Builder.CreateAShr(Shl, IntSize - Width)
Builder->CreateAShr(Shl, IntSize - Width) : : Builder.CreateLShr(Shl, IntSize - Width);
Builder->CreateLShr(Shl, IntSize - Width);
RightShift->takeName(II); RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift); return replaceInstUsesWith(*II, RightShift);
} }
Value *RightShift = Signed ? Value *RightShift = Signed ? Builder.CreateAShr(Src, Offset)
Builder->CreateAShr(Src, Offset) : : Builder.CreateLShr(Src, Offset);
Builder->CreateLShr(Src, Offset);
RightShift->takeName(II); RightShift->takeName(II);
return replaceInstUsesWith(*II, RightShift); return replaceInstUsesWith(*II, RightShift);
@ -3439,7 +3436,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
} }
if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) { if (match(Src2, m_NaN()) || isa<UndefValue>(Src2)) {
CallInst *NewCall = Builder->CreateMinNum(Src0, Src1); CallInst *NewCall = Builder.CreateMinNum(Src0, Src1);
NewCall->copyFastMathFlags(II); NewCall->copyFastMathFlags(II);
NewCall->takeName(II); NewCall->takeName(II);
return replaceInstUsesWith(*II, NewCall); return replaceInstUsesWith(*II, NewCall);
@ -3451,7 +3448,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(), APFloat Result = fmed3AMDGCN(C0->getValueAPF(), C1->getValueAPF(),
C2->getValueAPF()); C2->getValueAPF());
return replaceInstUsesWith(*II, return replaceInstUsesWith(*II,
ConstantFP::get(Builder->getContext(), Result)); ConstantFP::get(Builder.getContext(), Result));
} }
} }
} }
@ -3494,7 +3491,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")}; Metadata *MDArgs[] = {MDString::get(II->getContext(), "exec")};
MDNode *MD = MDNode::get(II->getContext(), MDArgs); MDNode *MD = MDNode::get(II->getContext(), MDArgs);
Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)}; Value *Args[] = {MetadataAsValue::get(II->getContext(), MD)};
CallInst *NewCall = Builder->CreateCall(NewF, Args); CallInst *NewCall = Builder.CreateCall(NewF, Args);
NewCall->addAttribute(AttributeList::FunctionIndex, NewCall->addAttribute(AttributeList::FunctionIndex,
Attribute::Convergent); Attribute::Convergent);
NewCall->takeName(II); NewCall->takeName(II);
@ -3556,7 +3553,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
SrcLHS->getType()); SrcLHS->getType());
Value *Args[] = { SrcLHS, SrcRHS, Value *Args[] = { SrcLHS, SrcRHS,
ConstantInt::get(CC->getType(), SrcPred) }; ConstantInt::get(CC->getType(), SrcPred) };
CallInst *NewCall = Builder->CreateCall(NewF, Args); CallInst *NewCall = Builder.CreateCall(NewF, Args);
NewCall->takeName(II); NewCall->takeName(II);
return replaceInstUsesWith(*II, NewCall); return replaceInstUsesWith(*II, NewCall);
} }
@ -3633,16 +3630,14 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// the InstCombineIRInserter object. // the InstCombineIRInserter object.
Value *AssumeIntrinsic = II->getCalledValue(), *A, *B; Value *AssumeIntrinsic = II->getCalledValue(), *A, *B;
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) { if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
Builder->CreateCall(AssumeIntrinsic, A, II->getName()); Builder.CreateCall(AssumeIntrinsic, A, II->getName());
Builder->CreateCall(AssumeIntrinsic, B, II->getName()); Builder.CreateCall(AssumeIntrinsic, B, II->getName());
return eraseInstFromFunction(*II); return eraseInstFromFunction(*II);
} }
// assume(!(a || b)) -> assume(!a); assume(!b); // assume(!(a || b)) -> assume(!a); assume(!b);
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) { if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A), Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(A), II->getName());
II->getName()); Builder.CreateCall(AssumeIntrinsic, Builder.CreateNot(B), II->getName());
Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
II->getName());
return eraseInstFromFunction(*II); return eraseInstFromFunction(*II);
} }
@ -3726,7 +3721,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return eraseInstFromFunction(*NextInst); return eraseInstFromFunction(*NextInst);
// Otherwise canonicalize guard(a); guard(b) -> guard(a & b). // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
II->setArgOperand(0, Builder->CreateAnd(CurrCond, NextCond)); II->setArgOperand(0, Builder.CreateAnd(CurrCond, NextCond));
return eraseInstFromFunction(*NextInst); return eraseInstFromFunction(*NextInst);
} }
break; break;
@ -4163,7 +4158,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
Value *NewArg = *AI; Value *NewArg = *AI;
if ((*AI)->getType() != ParamTy) if ((*AI)->getType() != ParamTy)
NewArg = Builder->CreateBitOrPointerCast(*AI, ParamTy); NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
Args.push_back(NewArg); Args.push_back(NewArg);
// Add any parameter attributes. // Add any parameter attributes.
@ -4189,7 +4184,7 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
// Must promote to pass through va_arg area! // Must promote to pass through va_arg area!
Instruction::CastOps opcode = Instruction::CastOps opcode =
CastInst::getCastOpcode(*AI, false, PTy, false); CastInst::getCastOpcode(*AI, false, PTy, false);
NewArg = Builder->CreateCast(opcode, *AI, PTy); NewArg = Builder.CreateCast(opcode, *AI, PTy);
} }
Args.push_back(NewArg); Args.push_back(NewArg);
@ -4215,10 +4210,10 @@ bool InstCombiner::transformConstExprCastCall(CallSite CS) {
CallSite NewCS; CallSite NewCS;
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) { if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
NewCS = Builder->CreateInvoke(Callee, II->getNormalDest(), NewCS = Builder.CreateInvoke(Callee, II->getNormalDest(),
II->getUnwindDest(), Args, OpBundles); II->getUnwindDest(), Args, OpBundles);
} else { } else {
NewCS = Builder->CreateCall(Callee, Args, OpBundles); NewCS = Builder.CreateCall(Callee, Args, OpBundles);
cast<CallInst>(NewCS.getInstruction()) cast<CallInst>(NewCS.getInstruction())
->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind()); ->setTailCallKind(cast<CallInst>(Caller)->getTailCallKind());
} }
@ -4328,7 +4323,7 @@ InstCombiner::transformCallThroughTrampoline(CallSite CS,
// Add the chain argument and attributes. // Add the chain argument and attributes.
Value *NestVal = Tramp->getArgOperand(2); Value *NestVal = Tramp->getArgOperand(2);
if (NestVal->getType() != NestTy) if (NestVal->getType() != NestTy)
NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest"); NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
NewArgs.push_back(NestVal); NewArgs.push_back(NestVal);
NewArgAttrs.push_back(NestAttr); NewArgAttrs.push_back(NestAttr);
} }

View File

@ -84,7 +84,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
AllocaInst &AI) { AllocaInst &AI) {
PointerType *PTy = cast<PointerType>(CI.getType()); PointerType *PTy = cast<PointerType>(CI.getType());
BuilderTy AllocaBuilder(*Builder); BuilderTy AllocaBuilder(Builder);
AllocaBuilder.SetInsertPoint(&AI); AllocaBuilder.SetInsertPoint(&AI);
// Get the type really allocated and the type casted to. // Get the type really allocated and the type casted to.
@ -433,14 +433,14 @@ static Instruction *foldVecTruncToExtElt(TruncInst &Trunc, InstCombiner &IC) {
unsigned NumVecElts = VecWidth / DestWidth; unsigned NumVecElts = VecWidth / DestWidth;
if (VecType->getElementType() != DestType) { if (VecType->getElementType() != DestType) {
VecType = VectorType::get(DestType, NumVecElts); VecType = VectorType::get(DestType, NumVecElts);
VecInput = IC.Builder->CreateBitCast(VecInput, VecType, "bc"); VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
} }
unsigned Elt = ShiftAmount / DestWidth; unsigned Elt = ShiftAmount / DestWidth;
if (IC.getDataLayout().isBigEndian()) if (IC.getDataLayout().isBigEndian())
Elt = NumVecElts - 1 - Elt; Elt = NumVecElts - 1 - Elt;
return ExtractElementInst::Create(VecInput, IC.Builder->getInt32(Elt)); return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
} }
/// Try to narrow the width of bitwise logic instructions with constants. /// Try to narrow the width of bitwise logic instructions with constants.
@ -459,7 +459,7 @@ Instruction *InstCombiner::shrinkBitwiseLogic(TruncInst &Trunc) {
// trunc (logic X, C) --> logic (trunc X, C') // trunc (logic X, C) --> logic (trunc X, C')
Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy); Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
Value *NarrowOp0 = Builder->CreateTrunc(LogicOp->getOperand(0), DestTy); Value *NarrowOp0 = Builder.CreateTrunc(LogicOp->getOperand(0), DestTy);
return BinaryOperator::Create(LogicOp->getOpcode(), NarrowOp0, NarrowC); return BinaryOperator::Create(LogicOp->getOpcode(), NarrowOp0, NarrowC);
} }
@ -553,7 +553,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector. // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
if (DestTy->getScalarSizeInBits() == 1) { if (DestTy->getScalarSizeInBits() == 1) {
Constant *One = ConstantInt::get(SrcTy, 1); Constant *One = ConstantInt::get(SrcTy, 1);
Src = Builder->CreateAnd(Src, One); Src = Builder.CreateAnd(Src, One);
Value *Zero = Constant::getNullValue(Src->getType()); Value *Zero = Constant::getNullValue(Src->getType());
return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero); return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
} }
@ -579,7 +579,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// Since we're doing an lshr and a zero extend, and know that the shift // Since we're doing an lshr and a zero extend, and know that the shift
// amount is smaller than ASize, it is always safe to do the shift in A's // amount is smaller than ASize, it is always safe to do the shift in A's
// type, then zero extend or truncate to the result. // type, then zero extend or truncate to the result.
Value *Shift = Builder->CreateLShr(A, Cst->getZExtValue()); Value *Shift = Builder.CreateLShr(A, Cst->getZExtValue());
Shift->takeName(Src); Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, DestTy, false); return CastInst::CreateIntegerCast(Shift, DestTy, false);
} }
@ -609,7 +609,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(), return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
std::min(ShiftAmt, ASize - 1))); std::min(ShiftAmt, ASize - 1)));
if (SExt->hasOneUse()) { if (SExt->hasOneUse()) {
Value *Shift = Builder->CreateAShr(A, std::min(ShiftAmt, ASize-1)); Value *Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1));
Shift->takeName(Src); Shift->takeName(Src);
return CastInst::CreateIntegerCast(Shift, CI.getType(), true); return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
} }
@ -619,10 +619,10 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
if (Instruction *I = shrinkBitwiseLogic(CI)) if (Instruction *I = shrinkBitwiseLogic(CI))
return I; return I;
if (Instruction *I = shrinkSplatShuffle(CI, *Builder)) if (Instruction *I = shrinkSplatShuffle(CI, Builder))
return I; return I;
if (Instruction *I = shrinkInsertElt(CI, *Builder)) if (Instruction *I = shrinkInsertElt(CI, Builder))
return I; return I;
if (Src->hasOneUse() && isa<IntegerType>(SrcTy) && if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
@ -635,7 +635,7 @@ Instruction *InstCombiner::visitTrunc(TruncInst &CI) {
// FoldShiftByConstant and is the extend in reg pattern. // FoldShiftByConstant and is the extend in reg pattern.
const unsigned DestSize = DestTy->getScalarSizeInBits(); const unsigned DestSize = DestTy->getScalarSizeInBits();
if (Cst->getValue().ult(DestSize)) { if (Cst->getValue().ult(DestSize)) {
Value *NewTrunc = Builder->CreateTrunc(A, DestTy, A->getName() + ".tr"); Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
return BinaryOperator::Create( return BinaryOperator::Create(
Instruction::Shl, NewTrunc, Instruction::Shl, NewTrunc,
@ -667,13 +667,13 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
Value *In = ICI->getOperand(0); Value *In = ICI->getOperand(0);
Value *Sh = ConstantInt::get(In->getType(), Value *Sh = ConstantInt::get(In->getType(),
In->getType()->getScalarSizeInBits() - 1); In->getType()->getScalarSizeInBits() - 1);
In = Builder->CreateLShr(In, Sh, In->getName() + ".lobit"); In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
if (In->getType() != CI.getType()) if (In->getType() != CI.getType())
In = Builder->CreateIntCast(In, CI.getType(), false/*ZExt*/); In = Builder.CreateIntCast(In, CI.getType(), false /*ZExt*/);
if (ICI->getPredicate() == ICmpInst::ICMP_SGT) { if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
Constant *One = ConstantInt::get(In->getType(), 1); Constant *One = ConstantInt::get(In->getType(), 1);
In = Builder->CreateXor(In, One, In->getName() + ".not"); In = Builder.CreateXor(In, One, In->getName() + ".not");
} }
return replaceInstUsesWith(CI, In); return replaceInstUsesWith(CI, In);
@ -712,19 +712,19 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
if (ShAmt) { if (ShAmt) {
// Perform a logical shr by shiftamt. // Perform a logical shr by shiftamt.
// Insert the shift to put the result in the low bit. // Insert the shift to put the result in the low bit.
In = Builder->CreateLShr(In, ConstantInt::get(In->getType(), ShAmt), In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
In->getName() + ".lobit"); In->getName() + ".lobit");
} }
if (!Op1CV.isNullValue() == isNE) { // Toggle the low bit. if (!Op1CV.isNullValue() == isNE) { // Toggle the low bit.
Constant *One = ConstantInt::get(In->getType(), 1); Constant *One = ConstantInt::get(In->getType(), 1);
In = Builder->CreateXor(In, One); In = Builder.CreateXor(In, One);
} }
if (CI.getType() == In->getType()) if (CI.getType() == In->getType())
return replaceInstUsesWith(CI, In); return replaceInstUsesWith(CI, In);
Value *IntCast = Builder->CreateIntCast(In, CI.getType(), false); Value *IntCast = Builder.CreateIntCast(In, CI.getType(), false);
return replaceInstUsesWith(CI, IntCast); return replaceInstUsesWith(CI, IntCast);
} }
} }
@ -747,19 +747,19 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
if (UnknownBit.countPopulation() == 1) { if (UnknownBit.countPopulation() == 1) {
if (!DoTransform) return ICI; if (!DoTransform) return ICI;
Value *Result = Builder->CreateXor(LHS, RHS); Value *Result = Builder.CreateXor(LHS, RHS);
// Mask off any bits that are set and won't be shifted away. // Mask off any bits that are set and won't be shifted away.
if (KnownLHS.One.uge(UnknownBit)) if (KnownLHS.One.uge(UnknownBit))
Result = Builder->CreateAnd(Result, Result = Builder.CreateAnd(Result,
ConstantInt::get(ITy, UnknownBit)); ConstantInt::get(ITy, UnknownBit));
// Shift the bit we're testing down to the lsb. // Shift the bit we're testing down to the lsb.
Result = Builder->CreateLShr( Result = Builder.CreateLShr(
Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros())); Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
if (ICI->getPredicate() == ICmpInst::ICMP_EQ) if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
Result = Builder->CreateXor(Result, ConstantInt::get(ITy, 1)); Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
Result->takeName(ICI); Result->takeName(ICI);
return replaceInstUsesWith(CI, Result); return replaceInstUsesWith(CI, Result);
} }
@ -959,7 +959,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
if (SrcSize < DstSize) { if (SrcSize < DstSize) {
APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize)); APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
Constant *AndConst = ConstantInt::get(A->getType(), AndValue); Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
Value *And = Builder->CreateAnd(A, AndConst, CSrc->getName()+".mask"); Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
return new ZExtInst(And, CI.getType()); return new ZExtInst(And, CI.getType());
} }
@ -969,7 +969,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
AndValue)); AndValue));
} }
if (SrcSize > DstSize) { if (SrcSize > DstSize) {
Value *Trunc = Builder->CreateTrunc(A, CI.getType()); Value *Trunc = Builder.CreateTrunc(A, CI.getType());
APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize)); APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
return BinaryOperator::CreateAnd(Trunc, return BinaryOperator::CreateAnd(Trunc,
ConstantInt::get(Trunc->getType(), ConstantInt::get(Trunc->getType(),
@ -991,8 +991,8 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
(transformZExtICmp(LHS, CI, false) || (transformZExtICmp(LHS, CI, false) ||
transformZExtICmp(RHS, CI, false))) { transformZExtICmp(RHS, CI, false))) {
// zext (or icmp, icmp) -> or (zext icmp), (zext icmp) // zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
Value *LCast = Builder->CreateZExt(LHS, CI.getType(), LHS->getName()); Value *LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
Value *RCast = Builder->CreateZExt(RHS, CI.getType(), RHS->getName()); Value *RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast); BinaryOperator *Or = BinaryOperator::Create(Instruction::Or, LCast, RCast);
// Perform the elimination. // Perform the elimination.
@ -1019,7 +1019,7 @@ Instruction *InstCombiner::visitZExt(ZExtInst &CI) {
match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) && match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
X->getType() == CI.getType()) { X->getType() == CI.getType()) {
Constant *ZC = ConstantExpr::getZExt(C, CI.getType()); Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
return BinaryOperator::CreateXor(Builder->CreateAnd(X, ZC), ZC); return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
} }
return nullptr; return nullptr;
@ -1042,12 +1042,12 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
Value *Sh = ConstantInt::get(Op0->getType(), Value *Sh = ConstantInt::get(Op0->getType(),
Op0->getType()->getScalarSizeInBits()-1); Op0->getType()->getScalarSizeInBits()-1);
Value *In = Builder->CreateAShr(Op0, Sh, Op0->getName()+".lobit"); Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
if (In->getType() != CI.getType()) if (In->getType() != CI.getType())
In = Builder->CreateIntCast(In, CI.getType(), true/*SExt*/); In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
if (Pred == ICmpInst::ICMP_SGT) if (Pred == ICmpInst::ICMP_SGT)
In = Builder->CreateNot(In, In->getName()+".not"); In = Builder.CreateNot(In, In->getName() + ".not");
return replaceInstUsesWith(CI, In); return replaceInstUsesWith(CI, In);
} }
} }
@ -1078,26 +1078,26 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
unsigned ShiftAmt = KnownZeroMask.countTrailingZeros(); unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
// Perform a right shift to place the desired bit in the LSB. // Perform a right shift to place the desired bit in the LSB.
if (ShiftAmt) if (ShiftAmt)
In = Builder->CreateLShr(In, In = Builder.CreateLShr(In,
ConstantInt::get(In->getType(), ShiftAmt)); ConstantInt::get(In->getType(), ShiftAmt));
// At this point "In" is either 1 or 0. Subtract 1 to turn // At this point "In" is either 1 or 0. Subtract 1 to turn
// {1, 0} -> {0, -1}. // {1, 0} -> {0, -1}.
In = Builder->CreateAdd(In, In = Builder.CreateAdd(In,
ConstantInt::getAllOnesValue(In->getType()), ConstantInt::getAllOnesValue(In->getType()),
"sext"); "sext");
} else { } else {
// sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
// sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
unsigned ShiftAmt = KnownZeroMask.countLeadingZeros(); unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
// Perform a left shift to place the desired bit in the MSB. // Perform a left shift to place the desired bit in the MSB.
if (ShiftAmt) if (ShiftAmt)
In = Builder->CreateShl(In, In = Builder.CreateShl(In,
ConstantInt::get(In->getType(), ShiftAmt)); ConstantInt::get(In->getType(), ShiftAmt));
// Distribute the bit over the whole bit width. // Distribute the bit over the whole bit width.
In = Builder->CreateAShr(In, ConstantInt::get(In->getType(), In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
KnownZeroMask.getBitWidth() - 1), "sext"); KnownZeroMask.getBitWidth() - 1), "sext");
} }
if (CI.getType() == In->getType()) if (CI.getType() == In->getType())
@ -1190,7 +1190,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// instead. // instead.
KnownBits Known = computeKnownBits(Src, 0, &CI); KnownBits Known = computeKnownBits(Src, 0, &CI);
if (Known.isNonNegative()) { if (Known.isNonNegative()) {
Value *ZExt = Builder->CreateZExt(Src, DestTy); Value *ZExt = Builder.CreateZExt(Src, DestTy);
return replaceInstUsesWith(CI, ZExt); return replaceInstUsesWith(CI, ZExt);
} }
@ -1216,7 +1216,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
// We need to emit a shl + ashr to do the sign extend. // We need to emit a shl + ashr to do the sign extend.
Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize); Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
return BinaryOperator::CreateAShr(Builder->CreateShl(Res, ShAmt, "sext"), return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
ShAmt); ShAmt);
} }
@ -1228,7 +1228,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
unsigned SrcBitSize = SrcTy->getScalarSizeInBits(); unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
unsigned DestBitSize = DestTy->getScalarSizeInBits(); unsigned DestBitSize = DestTy->getScalarSizeInBits();
Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize); Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
return BinaryOperator::CreateAShr(Builder->CreateShl(X, ShAmt), ShAmt); return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
} }
if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src)) if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
@ -1257,7 +1257,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
unsigned SrcDstSize = CI.getType()->getScalarSizeInBits(); unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize; unsigned ShAmt = CA->getZExtValue()+SrcDstSize-MidSize;
Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt); Constant *ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
A = Builder->CreateShl(A, ShAmtV, CI.getName()); A = Builder.CreateShl(A, ShAmtV, CI.getName());
return BinaryOperator::CreateAShr(A, ShAmtV); return BinaryOperator::CreateAShr(A, ShAmtV);
} }
@ -1346,9 +1346,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// case of interest here is (float)((double)float + float)). // case of interest here is (float)((double)float + float)).
if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) { if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType()) if (LHSOrig->getType() != CI.getType())
LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType()) if (RHSOrig->getType() != CI.getType())
RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI = Instruction *RI =
BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig); BinaryOperator::Create(OpI->getOpcode(), LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI); RI->copyFastMathFlags(OpI);
@ -1363,9 +1363,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// in the destination format if it can represent both sources. // in the destination format if it can represent both sources.
if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) { if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType()) if (LHSOrig->getType() != CI.getType())
LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType()) if (RHSOrig->getType() != CI.getType())
RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI = Instruction *RI =
BinaryOperator::CreateFMul(LHSOrig, RHSOrig); BinaryOperator::CreateFMul(LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI); RI->copyFastMathFlags(OpI);
@ -1381,9 +1381,9 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// TODO: Tighten bound via rigorous analysis of the unbalanced case. // TODO: Tighten bound via rigorous analysis of the unbalanced case.
if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) { if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
if (LHSOrig->getType() != CI.getType()) if (LHSOrig->getType() != CI.getType())
LHSOrig = Builder->CreateFPExt(LHSOrig, CI.getType()); LHSOrig = Builder.CreateFPExt(LHSOrig, CI.getType());
if (RHSOrig->getType() != CI.getType()) if (RHSOrig->getType() != CI.getType())
RHSOrig = Builder->CreateFPExt(RHSOrig, CI.getType()); RHSOrig = Builder.CreateFPExt(RHSOrig, CI.getType());
Instruction *RI = Instruction *RI =
BinaryOperator::CreateFDiv(LHSOrig, RHSOrig); BinaryOperator::CreateFDiv(LHSOrig, RHSOrig);
RI->copyFastMathFlags(OpI); RI->copyFastMathFlags(OpI);
@ -1398,11 +1398,11 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
if (SrcWidth == OpWidth) if (SrcWidth == OpWidth)
break; break;
if (LHSWidth < SrcWidth) if (LHSWidth < SrcWidth)
LHSOrig = Builder->CreateFPExt(LHSOrig, RHSOrig->getType()); LHSOrig = Builder.CreateFPExt(LHSOrig, RHSOrig->getType());
else if (RHSWidth <= SrcWidth) else if (RHSWidth <= SrcWidth)
RHSOrig = Builder->CreateFPExt(RHSOrig, LHSOrig->getType()); RHSOrig = Builder.CreateFPExt(RHSOrig, LHSOrig->getType());
if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) { if (LHSOrig != OpI->getOperand(0) || RHSOrig != OpI->getOperand(1)) {
Value *ExactResult = Builder->CreateFRem(LHSOrig, RHSOrig); Value *ExactResult = Builder.CreateFRem(LHSOrig, RHSOrig);
if (Instruction *RI = dyn_cast<Instruction>(ExactResult)) if (Instruction *RI = dyn_cast<Instruction>(ExactResult))
RI->copyFastMathFlags(OpI); RI->copyFastMathFlags(OpI);
return CastInst::CreateFPCast(ExactResult, CI.getType()); return CastInst::CreateFPCast(ExactResult, CI.getType());
@ -1411,8 +1411,8 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// (fptrunc (fneg x)) -> (fneg (fptrunc x)) // (fptrunc (fneg x)) -> (fneg (fptrunc x))
if (BinaryOperator::isFNeg(OpI)) { if (BinaryOperator::isFNeg(OpI)) {
Value *InnerTrunc = Builder->CreateFPTrunc(OpI->getOperand(1), Value *InnerTrunc = Builder.CreateFPTrunc(OpI->getOperand(1),
CI.getType()); CI.getType());
Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc); Instruction *RI = BinaryOperator::CreateFNeg(InnerTrunc);
RI->copyFastMathFlags(OpI); RI->copyFastMathFlags(OpI);
return RI; return RI;
@ -1431,10 +1431,8 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
(isa<ConstantFP>(SI->getOperand(1)) || (isa<ConstantFP>(SI->getOperand(1)) ||
isa<ConstantFP>(SI->getOperand(2))) && isa<ConstantFP>(SI->getOperand(2))) &&
matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) { matchSelectPattern(SI, LHS, RHS).Flavor == SPF_UNKNOWN) {
Value *LHSTrunc = Builder->CreateFPTrunc(SI->getOperand(1), Value *LHSTrunc = Builder.CreateFPTrunc(SI->getOperand(1), CI.getType());
CI.getType()); Value *RHSTrunc = Builder.CreateFPTrunc(SI->getOperand(2), CI.getType());
Value *RHSTrunc = Builder->CreateFPTrunc(SI->getOperand(2),
CI.getType());
return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc); return SelectInst::Create(SI->getOperand(0), LHSTrunc, RHSTrunc);
} }
@ -1464,7 +1462,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
// Do unary FP operation on smaller type. // Do unary FP operation on smaller type.
// (fptrunc (fabs x)) -> (fabs (fptrunc x)) // (fptrunc (fabs x)) -> (fabs (fptrunc x))
Value *InnerTrunc = Builder->CreateFPTrunc(Src, CI.getType()); Value *InnerTrunc = Builder.CreateFPTrunc(Src, CI.getType());
Type *IntrinsicType[] = { CI.getType() }; Type *IntrinsicType[] = { CI.getType() };
Function *Overload = Intrinsic::getDeclaration( Function *Overload = Intrinsic::getDeclaration(
CI.getModule(), II->getIntrinsicID(), IntrinsicType); CI.getModule(), II->getIntrinsicID(), IntrinsicType);
@ -1481,7 +1479,7 @@ Instruction *InstCombiner::visitFPTrunc(FPTruncInst &CI) {
} }
} }
if (Instruction *I = shrinkInsertElt(CI, *Builder)) if (Instruction *I = shrinkInsertElt(CI, Builder))
return I; return I;
return nullptr; return nullptr;
@ -1576,7 +1574,7 @@ Instruction *InstCombiner::visitIntToPtr(IntToPtrInst &CI) {
if (CI.getType()->isVectorTy()) // Handle vectors of pointers. if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements()); Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
Value *P = Builder->CreateZExtOrTrunc(CI.getOperand(0), Ty); Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
return new IntToPtrInst(P, CI.getType()); return new IntToPtrInst(P, CI.getType());
} }
@ -1626,7 +1624,7 @@ Instruction *InstCombiner::visitPtrToInt(PtrToIntInst &CI) {
if (Ty->isVectorTy()) // Handle vectors of pointers. if (Ty->isVectorTy()) // Handle vectors of pointers.
PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements()); PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
Value *P = Builder->CreatePtrToInt(CI.getOperand(0), PtrTy); Value *P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy);
return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false); return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
} }
@ -1652,7 +1650,7 @@ static Instruction *optimizeVectorResize(Value *InVal, VectorType *DestTy,
return nullptr; return nullptr;
SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements()); SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
InVal = IC.Builder->CreateBitCast(InVal, SrcTy); InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
} }
// Now that the element types match, get the shuffle mask and RHS of the // Now that the element types match, get the shuffle mask and RHS of the
@ -1832,8 +1830,8 @@ static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
for (unsigned i = 0, e = Elements.size(); i != e; ++i) { for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
if (!Elements[i]) continue; // Unset element. if (!Elements[i]) continue; // Unset element.
Result = IC.Builder->CreateInsertElement(Result, Elements[i], Result = IC.Builder.CreateInsertElement(Result, Elements[i],
IC.Builder->getInt32(i)); IC.Builder.getInt32(i));
} }
return Result; return Result;
@ -1858,8 +1856,8 @@ static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements(); unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
auto *NewVecType = VectorType::get(DestType, NumElts); auto *NewVecType = VectorType::get(DestType, NumElts);
auto *NewBC = IC.Builder->CreateBitCast(ExtElt->getVectorOperand(), auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
NewVecType, "bc"); NewVecType, "bc");
return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand()); return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
} }
@ -2031,8 +2029,8 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
// For each old PHI node, create a corresponding new PHI node with a type A. // For each old PHI node, create a corresponding new PHI node with a type A.
SmallDenseMap<PHINode *, PHINode *> NewPNodes; SmallDenseMap<PHINode *, PHINode *> NewPNodes;
for (auto *OldPN : OldPhiNodes) { for (auto *OldPN : OldPhiNodes) {
Builder->SetInsertPoint(OldPN); Builder.SetInsertPoint(OldPN);
PHINode *NewPN = Builder->CreatePHI(DestTy, OldPN->getNumOperands()); PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
NewPNodes[OldPN] = NewPN; NewPNodes[OldPN] = NewPN;
} }
@ -2045,8 +2043,8 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
if (auto *C = dyn_cast<Constant>(V)) { if (auto *C = dyn_cast<Constant>(V)) {
NewV = ConstantExpr::getBitCast(C, DestTy); NewV = ConstantExpr::getBitCast(C, DestTy);
} else if (auto *LI = dyn_cast<LoadInst>(V)) { } else if (auto *LI = dyn_cast<LoadInst>(V)) {
Builder->SetInsertPoint(LI->getNextNode()); Builder.SetInsertPoint(LI->getNextNode());
NewV = Builder->CreateBitCast(LI, DestTy); NewV = Builder.CreateBitCast(LI, DestTy);
Worklist.Add(LI); Worklist.Add(LI);
} else if (auto *BCI = dyn_cast<BitCastInst>(V)) { } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
NewV = BCI->getOperand(0); NewV = BCI->getOperand(0);
@ -2062,9 +2060,9 @@ Instruction *InstCombiner::optimizeBitCastFromPhi(CastInst &CI, PHINode *PN) {
for (User *U : PN->users()) { for (User *U : PN->users()) {
auto *SI = dyn_cast<StoreInst>(U); auto *SI = dyn_cast<StoreInst>(U);
if (SI && SI->isSimple() && SI->getOperand(0) == PN) { if (SI && SI->isSimple() && SI->getOperand(0) == PN) {
Builder->SetInsertPoint(SI); Builder.SetInsertPoint(SI);
auto *NewBC = auto *NewBC =
cast<BitCastInst>(Builder->CreateBitCast(NewPNodes[PN], SrcTy)); cast<BitCastInst>(Builder.CreateBitCast(NewPNodes[PN], SrcTy));
SI->setOperand(0, NewBC); SI->setOperand(0, NewBC);
Worklist.Add(SI); Worklist.Add(SI);
assert(hasStoreUsersOnly(*NewBC)); assert(hasStoreUsersOnly(*NewBC));
@ -2119,14 +2117,14 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// If we found a path from the src to dest, create the getelementptr now. // If we found a path from the src to dest, create the getelementptr now.
if (SrcElTy == DstElTy) { if (SrcElTy == DstElTy) {
SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder->getInt32(0)); SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
return GetElementPtrInst::CreateInBounds(Src, Idxs); return GetElementPtrInst::CreateInBounds(Src, Idxs);
} }
} }
if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) { if (VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) { if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
Value *Elem = Builder->CreateBitCast(Src, DestVTy->getElementType()); Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
return InsertElementInst::Create(UndefValue::get(DestTy), Elem, return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
// FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast) // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
@ -2159,7 +2157,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
// scalar-scalar cast. // scalar-scalar cast.
if (!DestTy->isVectorTy()) { if (!DestTy->isVectorTy()) {
Value *Elem = Value *Elem =
Builder->CreateExtractElement(Src, Builder.CreateExtractElement(Src,
Constant::getNullValue(Type::getInt32Ty(CI.getContext()))); Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
return CastInst::Create(Instruction::BitCast, Elem, DestTy); return CastInst::Create(Instruction::BitCast, Elem, DestTy);
} }
@ -2188,8 +2186,8 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
Tmp->getOperand(0)->getType() == DestTy) || Tmp->getOperand(0)->getType() == DestTy) ||
((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) && ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
Tmp->getOperand(0)->getType() == DestTy)) { Tmp->getOperand(0)->getType() == DestTy)) {
Value *LHS = Builder->CreateBitCast(SVI->getOperand(0), DestTy); Value *LHS = Builder.CreateBitCast(SVI->getOperand(0), DestTy);
Value *RHS = Builder->CreateBitCast(SVI->getOperand(1), DestTy); Value *RHS = Builder.CreateBitCast(SVI->getOperand(1), DestTy);
// Return a new shuffle vector. Use the same element ID's, as we // Return a new shuffle vector. Use the same element ID's, as we
// know the vector types match #elts. // know the vector types match #elts.
return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2)); return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
@ -2205,10 +2203,10 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
if (Instruction *I = canonicalizeBitCastExtElt(CI, *this)) if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
return I; return I;
if (Instruction *I = foldBitCastBitwiseLogic(CI, *Builder)) if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
return I; return I;
if (Instruction *I = foldBitCastSelect(CI, *Builder)) if (Instruction *I = foldBitCastSelect(CI, Builder))
return I; return I;
if (SrcTy->isPointerTy()) if (SrcTy->isPointerTy())
@ -2232,7 +2230,7 @@ Instruction *InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
MidTy = VectorType::get(MidTy, VT->getNumElements()); MidTy = VectorType::get(MidTy, VT->getNumElements());
} }
Value *NewBitCast = Builder->CreateBitCast(Src, MidTy); Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
return new AddrSpaceCastInst(NewBitCast, CI.getType()); return new AddrSpaceCastInst(NewBitCast, CI.getType());
} }

View File

@ -392,7 +392,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
Type *IntPtrTy = DL.getIntPtrType(GEP->getType()); Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
unsigned PtrSize = IntPtrTy->getIntegerBitWidth(); unsigned PtrSize = IntPtrTy->getIntegerBitWidth();
if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize) if (Idx->getType()->getPrimitiveSizeInBits() > PtrSize)
Idx = Builder->CreateTrunc(Idx, IntPtrTy); Idx = Builder.CreateTrunc(Idx, IntPtrTy);
} }
// If the comparison is only true for one or two elements, emit direct // If the comparison is only true for one or two elements, emit direct
@ -400,7 +400,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
if (SecondTrueElement != Overdefined) { if (SecondTrueElement != Overdefined) {
// None true -> false. // None true -> false.
if (FirstTrueElement == Undefined) if (FirstTrueElement == Undefined)
return replaceInstUsesWith(ICI, Builder->getFalse()); return replaceInstUsesWith(ICI, Builder.getFalse());
Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement); Value *FirstTrueIdx = ConstantInt::get(Idx->getType(), FirstTrueElement);
@ -409,9 +409,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx); return new ICmpInst(ICmpInst::ICMP_EQ, Idx, FirstTrueIdx);
// True for two elements -> 'i == 47 | i == 72'. // True for two elements -> 'i == 47 | i == 72'.
Value *C1 = Builder->CreateICmpEQ(Idx, FirstTrueIdx); Value *C1 = Builder.CreateICmpEQ(Idx, FirstTrueIdx);
Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement); Value *SecondTrueIdx = ConstantInt::get(Idx->getType(), SecondTrueElement);
Value *C2 = Builder->CreateICmpEQ(Idx, SecondTrueIdx); Value *C2 = Builder.CreateICmpEQ(Idx, SecondTrueIdx);
return BinaryOperator::CreateOr(C1, C2); return BinaryOperator::CreateOr(C1, C2);
} }
@ -420,7 +420,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
if (SecondFalseElement != Overdefined) { if (SecondFalseElement != Overdefined) {
// None false -> true. // None false -> true.
if (FirstFalseElement == Undefined) if (FirstFalseElement == Undefined)
return replaceInstUsesWith(ICI, Builder->getTrue()); return replaceInstUsesWith(ICI, Builder.getTrue());
Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement); Value *FirstFalseIdx = ConstantInt::get(Idx->getType(), FirstFalseElement);
@ -429,9 +429,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx); return new ICmpInst(ICmpInst::ICMP_NE, Idx, FirstFalseIdx);
// False for two elements -> 'i != 47 & i != 72'. // False for two elements -> 'i != 47 & i != 72'.
Value *C1 = Builder->CreateICmpNE(Idx, FirstFalseIdx); Value *C1 = Builder.CreateICmpNE(Idx, FirstFalseIdx);
Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement); Value *SecondFalseIdx = ConstantInt::get(Idx->getType(),SecondFalseElement);
Value *C2 = Builder->CreateICmpNE(Idx, SecondFalseIdx); Value *C2 = Builder.CreateICmpNE(Idx, SecondFalseIdx);
return BinaryOperator::CreateAnd(C1, C2); return BinaryOperator::CreateAnd(C1, C2);
} }
@ -443,7 +443,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
// Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1). // Generate (i-FirstTrue) <u (TrueRangeEnd-FirstTrue+1).
if (FirstTrueElement) { if (FirstTrueElement) {
Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement); Value *Offs = ConstantInt::get(Idx->getType(), -FirstTrueElement);
Idx = Builder->CreateAdd(Idx, Offs); Idx = Builder.CreateAdd(Idx, Offs);
} }
Value *End = ConstantInt::get(Idx->getType(), Value *End = ConstantInt::get(Idx->getType(),
@ -457,7 +457,7 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
// Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse). // Generate (i-FirstFalse) >u (FalseRangeEnd-FirstFalse).
if (FirstFalseElement) { if (FirstFalseElement) {
Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement); Value *Offs = ConstantInt::get(Idx->getType(), -FirstFalseElement);
Idx = Builder->CreateAdd(Idx, Offs); Idx = Builder.CreateAdd(Idx, Offs);
} }
Value *End = ConstantInt::get(Idx->getType(), Value *End = ConstantInt::get(Idx->getType(),
@ -481,9 +481,9 @@ Instruction *InstCombiner::foldCmpLoadFromIndexedGlobal(GetElementPtrInst *GEP,
Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount); Ty = DL.getSmallestLegalIntType(Init->getContext(), ArrayElementCount);
if (Ty) { if (Ty) {
Value *V = Builder->CreateIntCast(Idx, Ty, false); Value *V = Builder.CreateIntCast(Idx, Ty, false);
V = Builder->CreateLShr(ConstantInt::get(Ty, MagicBitvector), V); V = Builder.CreateLShr(ConstantInt::get(Ty, MagicBitvector), V);
V = Builder->CreateAnd(ConstantInt::get(Ty, 1), V); V = Builder.CreateAnd(ConstantInt::get(Ty, 1), V);
return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0)); return new ICmpInst(ICmpInst::ICMP_NE, V, ConstantInt::get(Ty, 0));
} }
} }
@ -566,7 +566,7 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
// we don't need to bother extending: the extension won't affect where the // we don't need to bother extending: the extension won't affect where the
// computation crosses zero. // computation crosses zero.
if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) { if (VariableIdx->getType()->getPrimitiveSizeInBits() > IntPtrWidth) {
VariableIdx = IC.Builder->CreateTrunc(VariableIdx, IntPtrTy); VariableIdx = IC.Builder.CreateTrunc(VariableIdx, IntPtrTy);
} }
return VariableIdx; return VariableIdx;
} }
@ -588,10 +588,10 @@ static Value *evaluateGEPOffsetExpression(User *GEP, InstCombiner &IC,
// Okay, we can do this evaluation. Start by converting the index to intptr. // Okay, we can do this evaluation. Start by converting the index to intptr.
if (VariableIdx->getType() != IntPtrTy) if (VariableIdx->getType() != IntPtrTy)
VariableIdx = IC.Builder->CreateIntCast(VariableIdx, IntPtrTy, VariableIdx = IC.Builder.CreateIntCast(VariableIdx, IntPtrTy,
true /*Signed*/); true /*Signed*/);
Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs); Constant *OffsetVal = ConstantInt::get(IntPtrTy, NewOffs);
return IC.Builder->CreateAdd(VariableIdx, OffsetVal, "offset"); return IC.Builder.CreateAdd(VariableIdx, OffsetVal, "offset");
} }
/// Returns true if we can rewrite Start as a GEP with pointer Base /// Returns true if we can rewrite Start as a GEP with pointer Base
@ -981,13 +981,13 @@ Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (LHSIndexTy != RHSIndexTy) { if (LHSIndexTy != RHSIndexTy) {
if (LHSIndexTy->getPrimitiveSizeInBits() < if (LHSIndexTy->getPrimitiveSizeInBits() <
RHSIndexTy->getPrimitiveSizeInBits()) { RHSIndexTy->getPrimitiveSizeInBits()) {
ROffset = Builder->CreateTrunc(ROffset, LHSIndexTy); ROffset = Builder.CreateTrunc(ROffset, LHSIndexTy);
} else } else
LOffset = Builder->CreateTrunc(LOffset, RHSIndexTy); LOffset = Builder.CreateTrunc(LOffset, RHSIndexTy);
} }
Value *Cmp = Builder->CreateICmp(ICmpInst::getSignedPredicate(Cond), Value *Cmp = Builder.CreateICmp(ICmpInst::getSignedPredicate(Cond),
LOffset, ROffset); LOffset, ROffset);
return replaceInstUsesWith(I, Cmp); return replaceInstUsesWith(I, Cmp);
} }
@ -1026,7 +1026,7 @@ Instruction *InstCombiner::foldGEPICmp(GEPOperator *GEPLHS, Value *RHS,
if (NumDifferences == 0) // SAME GEP? if (NumDifferences == 0) // SAME GEP?
return replaceInstUsesWith(I, // No comparison is needed here. return replaceInstUsesWith(I, // No comparison is needed here.
Builder->getInt1(ICmpInst::isTrueWhenEqual(Cond))); Builder.getInt1(ICmpInst::isTrueWhenEqual(Cond)));
else if (NumDifferences == 1 && GEPsInBounds) { else if (NumDifferences == 1 && GEPsInBounds) {
Value *LHSV = GEPLHS->getOperand(DiffOperand); Value *LHSV = GEPLHS->getOperand(DiffOperand);
@ -1174,7 +1174,7 @@ Instruction *InstCombiner::foldICmpAddOpConst(Instruction &ICI,
// (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128 // (X+ -1) >s X --> X <s (MAXSINT-(-1-1)) --> X == -128
assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE); assert(Pred == ICmpInst::ICMP_SGT || Pred == ICmpInst::ICMP_SGE);
Constant *C = Builder->getInt(CI->getValue()-1); Constant *C = Builder.getInt(CI->getValue() - 1);
return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C)); return new ICmpInst(ICmpInst::ICMP_SLT, X, ConstantExpr::getSub(SMax, C));
} }
@ -1347,17 +1347,17 @@ static Instruction *processUGT_ADDCST_ADD(ICmpInst &I, Value *A, Value *B,
Value *F = Intrinsic::getDeclaration(I.getModule(), Value *F = Intrinsic::getDeclaration(I.getModule(),
Intrinsic::sadd_with_overflow, NewType); Intrinsic::sadd_with_overflow, NewType);
InstCombiner::BuilderTy *Builder = IC.Builder; InstCombiner::BuilderTy &Builder = IC.Builder;
// Put the new code above the original add, in case there are any uses of the // Put the new code above the original add, in case there are any uses of the
// add between the add and the compare. // add between the add and the compare.
Builder->SetInsertPoint(OrigAdd); Builder.SetInsertPoint(OrigAdd);
Value *TruncA = Builder->CreateTrunc(A, NewType, A->getName() + ".trunc"); Value *TruncA = Builder.CreateTrunc(A, NewType, A->getName() + ".trunc");
Value *TruncB = Builder->CreateTrunc(B, NewType, B->getName() + ".trunc"); Value *TruncB = Builder.CreateTrunc(B, NewType, B->getName() + ".trunc");
CallInst *Call = Builder->CreateCall(F, {TruncA, TruncB}, "sadd"); CallInst *Call = Builder.CreateCall(F, {TruncA, TruncB}, "sadd");
Value *Add = Builder->CreateExtractValue(Call, 0, "sadd.result"); Value *Add = Builder.CreateExtractValue(Call, 0, "sadd.result");
Value *ZExt = Builder->CreateZExt(Add, OrigAdd->getType()); Value *ZExt = Builder.CreateZExt(Add, OrigAdd->getType());
// The inner add was the result of the narrow add, zero extended to the // The inner add was the result of the narrow add, zero extended to the
// wider type. Replace it with the result computed by the intrinsic. // wider type. Replace it with the result computed by the intrinsic.
@ -1434,9 +1434,9 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
ConstantRange Intersection = DominatingCR.intersectWith(CR); ConstantRange Intersection = DominatingCR.intersectWith(CR);
ConstantRange Difference = DominatingCR.difference(CR); ConstantRange Difference = DominatingCR.difference(CR);
if (Intersection.isEmptySet()) if (Intersection.isEmptySet())
return replaceInstUsesWith(Cmp, Builder->getFalse()); return replaceInstUsesWith(Cmp, Builder.getFalse());
if (Difference.isEmptySet()) if (Difference.isEmptySet())
return replaceInstUsesWith(Cmp, Builder->getTrue()); return replaceInstUsesWith(Cmp, Builder.getTrue());
// If this is a normal comparison, it demands all bits. If it is a sign // If this is a normal comparison, it demands all bits. If it is a sign
// bit comparison, it only demands the sign bit. // bit comparison, it only demands the sign bit.
@ -1452,9 +1452,9 @@ Instruction *InstCombiner::foldICmpWithConstant(ICmpInst &Cmp) {
return nullptr; return nullptr;
if (auto *AI = Intersection.getSingleElement()) if (auto *AI = Intersection.getSingleElement())
return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder->getInt(*AI)); return new ICmpInst(ICmpInst::ICMP_EQ, X, Builder.getInt(*AI));
if (auto *AD = Difference.getSingleElement()) if (auto *AD = Difference.getSingleElement())
return new ICmpInst(ICmpInst::ICMP_NE, X, Builder->getInt(*AD)); return new ICmpInst(ICmpInst::ICMP_NE, X, Builder.getInt(*AD));
} }
return nullptr; return nullptr;
@ -1628,11 +1628,11 @@ Instruction *InstCombiner::foldICmpAndShift(ICmpInst &Cmp, BinaryOperator *And,
!Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) { !Shift->isArithmeticShift() && !isa<Constant>(Shift->getOperand(0))) {
// Compute C2 << Y. // Compute C2 << Y.
Value *NewShift = Value *NewShift =
IsShl ? Builder->CreateLShr(And->getOperand(1), Shift->getOperand(1)) IsShl ? Builder.CreateLShr(And->getOperand(1), Shift->getOperand(1))
: Builder->CreateShl(And->getOperand(1), Shift->getOperand(1)); : Builder.CreateShl(And->getOperand(1), Shift->getOperand(1));
// Compute X & (C2 << Y). // Compute X & (C2 << Y).
Value *NewAnd = Builder->CreateAnd(Shift->getOperand(0), NewShift); Value *NewAnd = Builder.CreateAnd(Shift->getOperand(0), NewShift);
Cmp.setOperand(0, NewAnd); Cmp.setOperand(0, NewAnd);
return &Cmp; return &Cmp;
} }
@ -1670,7 +1670,7 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
unsigned WideScalarBits = WideType->getScalarSizeInBits(); unsigned WideScalarBits = WideType->getScalarSizeInBits();
Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits)); Constant *ZextC1 = ConstantInt::get(WideType, C1->zext(WideScalarBits));
Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits)); Constant *ZextC2 = ConstantInt::get(WideType, C2->zext(WideScalarBits));
Value *NewAnd = Builder->CreateAnd(W, ZextC2, And->getName()); Value *NewAnd = Builder.CreateAnd(W, ZextC2, And->getName());
return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1); return new ICmpInst(Cmp.getPredicate(), NewAnd, ZextC1);
} }
} }
@ -1704,12 +1704,12 @@ Instruction *InstCombiner::foldICmpAndConstConst(ICmpInst &Cmp,
NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One); NewOr = ConstantExpr::getOr(ConstantExpr::getNUWShl(One, C), One);
} else { } else {
if (UsesRemoved >= 3) if (UsesRemoved >= 3)
NewOr = Builder->CreateOr(Builder->CreateShl(One, B, LShr->getName(), NewOr = Builder.CreateOr(Builder.CreateShl(One, B, LShr->getName(),
/*HasNUW=*/true), /*HasNUW=*/true),
One, Or->getName()); One, Or->getName());
} }
if (NewOr) { if (NewOr) {
Value *NewAnd = Builder->CreateAnd(A, NewOr, And->getName()); Value *NewAnd = Builder.CreateAnd(A, NewOr, And->getName());
Cmp.setOperand(0, NewAnd); Cmp.setOperand(0, NewAnd);
return &Cmp; return &Cmp;
} }
@ -1772,7 +1772,7 @@ Instruction *InstCombiner::foldICmpAndConstant(ICmpInst &Cmp,
Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1); Type *NTy = IntegerType::get(Cmp.getContext(), ExactLogBase2 + 1);
if (And->getType()->isVectorTy()) if (And->getType()->isVectorTy())
NTy = VectorType::get(NTy, And->getType()->getVectorNumElements()); NTy = VectorType::get(NTy, And->getType()->getVectorNumElements());
Value *Trunc = Builder->CreateTrunc(X, NTy); Value *Trunc = Builder.CreateTrunc(X, NTy);
auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE auto NewPred = Cmp.getPredicate() == CmpInst::ICMP_EQ ? CmpInst::ICMP_SGE
: CmpInst::ICMP_SLT; : CmpInst::ICMP_SLT;
return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy)); return new ICmpInst(NewPred, Trunc, Constant::getNullValue(NTy));
@ -1811,9 +1811,9 @@ Instruction *InstCombiner::foldICmpOrConstant(ICmpInst &Cmp, BinaryOperator *Or,
// Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0 // Simplify icmp eq (or (ptrtoint P), (ptrtoint Q)), 0
// -> and (icmp eq P, null), (icmp eq Q, null). // -> and (icmp eq P, null), (icmp eq Q, null).
Value *CmpP = Value *CmpP =
Builder->CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType())); Builder.CreateICmp(Pred, P, ConstantInt::getNullValue(P->getType()));
Value *CmpQ = Value *CmpQ =
Builder->CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType())); Builder.CreateICmp(Pred, Q, ConstantInt::getNullValue(Q->getType()));
auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And auto LogicOpc = Pred == ICmpInst::Predicate::ICMP_EQ ? Instruction::And
: Instruction::Or; : Instruction::Or;
return BinaryOperator::Create(LogicOpc, CmpP, CmpQ); return BinaryOperator::Create(LogicOpc, CmpP, CmpQ);
@ -1993,7 +1993,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
Constant *Mask = ConstantInt::get( Constant *Mask = ConstantInt::get(
ShType, ShType,
APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue())); APInt::getLowBitsSet(TypeBits, TypeBits - ShiftAmt->getZExtValue()));
Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask"); Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt)); Constant *LShrC = ConstantInt::get(ShType, C->lshr(*ShiftAmt));
return new ICmpInst(Pred, And, LShrC); return new ICmpInst(Pred, And, LShrC);
} }
@ -2005,7 +2005,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
Constant *Mask = ConstantInt::get( Constant *Mask = ConstantInt::get(
ShType, ShType,
APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1)); APInt::getOneBitSet(TypeBits, TypeBits - ShiftAmt->getZExtValue() - 1));
Value *And = Builder->CreateAnd(X, Mask, Shl->getName() + ".mask"); Value *And = Builder.CreateAnd(X, Mask, Shl->getName() + ".mask");
return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ, return new ICmpInst(TrueIfSigned ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ,
And, Constant::getNullValue(ShType)); And, Constant::getNullValue(ShType));
} }
@ -2024,7 +2024,7 @@ Instruction *InstCombiner::foldICmpShlConstant(ICmpInst &Cmp,
TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements()); TruncTy = VectorType::get(TruncTy, ShType->getVectorNumElements());
Constant *NewC = Constant *NewC =
ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt)); ConstantInt::get(TruncTy, C->ashr(*ShiftAmt).trunc(TypeBits - Amt));
return new ICmpInst(Pred, Builder->CreateTrunc(X, TruncTy), NewC); return new ICmpInst(Pred, Builder.CreateTrunc(X, TruncTy), NewC);
} }
return nullptr; return nullptr;
@ -2076,8 +2076,8 @@ Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
Constant *DivCst = ConstantInt::get( Constant *DivCst = ConstantInt::get(
Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal)); Shr->getType(), APInt::getOneBitSet(TypeBits, ShAmtVal));
Value *Tmp = IsAShr ? Builder->CreateSDiv(X, DivCst, "", Shr->isExact()) Value *Tmp = IsAShr ? Builder.CreateSDiv(X, DivCst, "", Shr->isExact())
: Builder->CreateUDiv(X, DivCst, "", Shr->isExact()); : Builder.CreateUDiv(X, DivCst, "", Shr->isExact());
Cmp.setOperand(0, Tmp); Cmp.setOperand(0, Tmp);
@ -2115,7 +2115,7 @@ Instruction *InstCombiner::foldICmpShrConstant(ICmpInst &Cmp,
// Otherwise strength reduce the shift into an 'and'. // Otherwise strength reduce the shift into an 'and'.
APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal)); APInt Val(APInt::getHighBitsSet(TypeBits, TypeBits - ShAmtVal));
Constant *Mask = ConstantInt::get(Shr->getType(), Val); Constant *Mask = ConstantInt::get(Shr->getType(), Val);
Value *And = Builder->CreateAnd(X, Mask, Shr->getName() + ".mask"); Value *And = Builder.CreateAnd(X, Mask, Shr->getName() + ".mask");
return new ICmpInst(Pred, And, ShiftedCmpRHS); return new ICmpInst(Pred, And, ShiftedCmpRHS);
} }
@ -2279,7 +2279,7 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
default: llvm_unreachable("Unhandled icmp opcode!"); default: llvm_unreachable("Unhandled icmp opcode!");
case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_EQ:
if (LoOverflow && HiOverflow) if (LoOverflow && HiOverflow)
return replaceInstUsesWith(Cmp, Builder->getFalse()); return replaceInstUsesWith(Cmp, Builder.getFalse());
if (HiOverflow) if (HiOverflow)
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE : return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SGE :
ICmpInst::ICMP_UGE, X, LoBound); ICmpInst::ICMP_UGE, X, LoBound);
@ -2291,7 +2291,7 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
HiBound->getUniqueInteger(), DivIsSigned, true)); HiBound->getUniqueInteger(), DivIsSigned, true));
case ICmpInst::ICMP_NE: case ICmpInst::ICMP_NE:
if (LoOverflow && HiOverflow) if (LoOverflow && HiOverflow)
return replaceInstUsesWith(Cmp, Builder->getTrue()); return replaceInstUsesWith(Cmp, Builder.getTrue());
if (HiOverflow) if (HiOverflow)
return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT : return new ICmpInst(DivIsSigned ? ICmpInst::ICMP_SLT :
ICmpInst::ICMP_ULT, X, LoBound); ICmpInst::ICMP_ULT, X, LoBound);
@ -2305,16 +2305,16 @@ Instruction *InstCombiner::foldICmpDivConstant(ICmpInst &Cmp,
case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLT:
if (LoOverflow == +1) // Low bound is greater than input range. if (LoOverflow == +1) // Low bound is greater than input range.
return replaceInstUsesWith(Cmp, Builder->getTrue()); return replaceInstUsesWith(Cmp, Builder.getTrue());
if (LoOverflow == -1) // Low bound is less than input range. if (LoOverflow == -1) // Low bound is less than input range.
return replaceInstUsesWith(Cmp, Builder->getFalse()); return replaceInstUsesWith(Cmp, Builder.getFalse());
return new ICmpInst(Pred, X, LoBound); return new ICmpInst(Pred, X, LoBound);
case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGT:
if (HiOverflow == +1) // High bound greater than input range. if (HiOverflow == +1) // High bound greater than input range.
return replaceInstUsesWith(Cmp, Builder->getFalse()); return replaceInstUsesWith(Cmp, Builder.getFalse());
if (HiOverflow == -1) // High bound less than input range. if (HiOverflow == -1) // High bound less than input range.
return replaceInstUsesWith(Cmp, Builder->getTrue()); return replaceInstUsesWith(Cmp, Builder.getTrue());
if (Pred == ICmpInst::ICMP_UGT) if (Pred == ICmpInst::ICMP_UGT)
return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound); return new ICmpInst(ICmpInst::ICMP_UGE, X, HiBound);
return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound); return new ICmpInst(ICmpInst::ICMP_SGE, X, HiBound);
@ -2361,12 +2361,12 @@ Instruction *InstCombiner::foldICmpSubConstant(ICmpInst &Cmp,
// iff (C2 & (C - 1)) == C - 1 and C is a power of 2 // iff (C2 & (C - 1)) == C - 1 and C is a power of 2
if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() &&
(*C2 & (*C - 1)) == (*C - 1)) (*C2 & (*C - 1)) == (*C - 1))
return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateOr(Y, *C - 1), X); return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateOr(Y, *C - 1), X);
// C2 - Y >u C -> (Y | C) != C2 // C2 - Y >u C -> (Y | C) != C2
// iff C2 & C == C and C + 1 is a power of 2 // iff C2 & C == C and C + 1 is a power of 2
if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C) if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == *C)
return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateOr(Y, *C), X); return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateOr(Y, *C), X);
return nullptr; return nullptr;
} }
@ -2422,14 +2422,14 @@ Instruction *InstCombiner::foldICmpAddConstant(ICmpInst &Cmp,
// iff C & (C2-1) == 0 // iff C & (C2-1) == 0
// C2 is a power of 2 // C2 is a power of 2
if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0) if (Pred == ICmpInst::ICMP_ULT && C->isPowerOf2() && (*C2 & (*C - 1)) == 0)
return new ICmpInst(ICmpInst::ICMP_EQ, Builder->CreateAnd(X, -(*C)), return new ICmpInst(ICmpInst::ICMP_EQ, Builder.CreateAnd(X, -(*C)),
ConstantExpr::getNeg(cast<Constant>(Y))); ConstantExpr::getNeg(cast<Constant>(Y)));
// X+C >u C2 -> (X & ~C2) != C // X+C >u C2 -> (X & ~C2) != C
// iff C & C2 == 0 // iff C & C2 == 0
// C2+1 is a power of 2 // C2+1 is a power of 2
if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0) if (Pred == ICmpInst::ICMP_UGT && (*C + 1).isPowerOf2() && (*C2 & *C) == 0)
return new ICmpInst(ICmpInst::ICMP_NE, Builder->CreateAnd(X, ~(*C)), return new ICmpInst(ICmpInst::ICMP_NE, Builder.CreateAnd(X, ~(*C)),
ConstantExpr::getNeg(cast<Constant>(Y))); ConstantExpr::getNeg(cast<Constant>(Y)));
return nullptr; return nullptr;
@ -2493,13 +2493,13 @@ Instruction *InstCombiner::foldICmpSelectConstant(ICmpInst &Cmp,
// When none of the three constants satisfy the predicate for the RHS (C), // When none of the three constants satisfy the predicate for the RHS (C),
// the entire original Cmp can be simplified to a false. // the entire original Cmp can be simplified to a false.
Value *Cond = Builder->getFalse(); Value *Cond = Builder.getFalse();
if (TrueWhenLessThan) if (TrueWhenLessThan)
Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS)); Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SLT, OrigLHS, OrigRHS));
if (TrueWhenEqual) if (TrueWhenEqual)
Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS)); Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_EQ, OrigLHS, OrigRHS));
if (TrueWhenGreaterThan) if (TrueWhenGreaterThan)
Cond = Builder->CreateOr(Cond, Builder->CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS)); Cond = Builder.CreateOr(Cond, Builder.CreateICmp(ICmpInst::ICMP_SGT, OrigLHS, OrigRHS));
return replaceInstUsesWith(Cmp, Cond); return replaceInstUsesWith(Cmp, Cond);
} }
@ -2615,7 +2615,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
if (C->isNullValue() && BO->hasOneUse()) { if (C->isNullValue() && BO->hasOneUse()) {
const APInt *BOC; const APInt *BOC;
if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) { if (match(BOp1, m_APInt(BOC)) && BOC->sgt(1) && BOC->isPowerOf2()) {
Value *NewRem = Builder->CreateURem(BOp0, BOp1, BO->getName()); Value *NewRem = Builder.CreateURem(BOp0, BOp1, BO->getName());
return new ICmpInst(Pred, NewRem, return new ICmpInst(Pred, NewRem,
Constant::getNullValue(BO->getType())); Constant::getNullValue(BO->getType()));
} }
@ -2637,7 +2637,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
if (Value *NegVal = dyn_castNegVal(BOp0)) if (Value *NegVal = dyn_castNegVal(BOp0))
return new ICmpInst(Pred, NegVal, BOp1); return new ICmpInst(Pred, NegVal, BOp1);
if (BO->hasOneUse()) { if (BO->hasOneUse()) {
Value *Neg = Builder->CreateNeg(BOp1); Value *Neg = Builder.CreateNeg(BOp1);
Neg->takeName(BO); Neg->takeName(BO);
return new ICmpInst(Pred, BOp0, Neg); return new ICmpInst(Pred, BOp0, Neg);
} }
@ -2676,7 +2676,7 @@ Instruction *InstCombiner::foldICmpBinOpEqualityWithConstant(ICmpInst &Cmp,
// Replace (X | C) == -1 with (X & ~C) == ~C. // Replace (X | C) == -1 with (X & ~C) == ~C.
// This removes the -1 constant. // This removes the -1 constant.
Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1)); Constant *NotBOC = ConstantExpr::getNot(cast<Constant>(BOp1));
Value *And = Builder->CreateAnd(BOp0, NotBOC); Value *And = Builder.CreateAnd(BOp0, NotBOC);
return new ICmpInst(Pred, And, NotBOC); return new ICmpInst(Pred, And, NotBOC);
} }
break; break;
@ -2845,11 +2845,11 @@ Instruction *InstCombiner::foldICmpInstWithConstantNotInt(ICmpInst &I) {
} }
if (Transform) { if (Transform) {
if (!Op1) if (!Op1)
Op1 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC, Op1 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(1), RHSC,
I.getName()); I.getName());
if (!Op2) if (!Op2)
Op2 = Builder->CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC, Op2 = Builder.CreateICmp(I.getPredicate(), LHSI->getOperand(2), RHSC,
I.getName()); I.getName());
return SelectInst::Create(LHSI->getOperand(0), Op1, Op2); return SelectInst::Create(LHSI->getOperand(0), Op1, Op2);
} }
break; break;
@ -3033,12 +3033,12 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
APInt AP1Abs = C1->getValue().abs(); APInt AP1Abs = C1->getValue().abs();
APInt AP2Abs = C2->getValue().abs(); APInt AP2Abs = C2->getValue().abs();
if (AP1Abs.uge(AP2Abs)) { if (AP1Abs.uge(AP2Abs)) {
ConstantInt *C3 = Builder->getInt(AP1 - AP2); ConstantInt *C3 = Builder.getInt(AP1 - AP2);
Value *NewAdd = Builder->CreateNSWAdd(A, C3); Value *NewAdd = Builder.CreateNSWAdd(A, C3);
return new ICmpInst(Pred, NewAdd, C); return new ICmpInst(Pred, NewAdd, C);
} else { } else {
ConstantInt *C3 = Builder->getInt(AP2 - AP1); ConstantInt *C3 = Builder.getInt(AP2 - AP1);
Value *NewAdd = Builder->CreateNSWAdd(C, C3); Value *NewAdd = Builder.CreateNSWAdd(C, C3);
return new ICmpInst(Pred, A, NewAdd); return new ICmpInst(Pred, A, NewAdd);
} }
} }
@ -3161,8 +3161,8 @@ Instruction *InstCombiner::foldICmpBinOp(ICmpInst &I) {
Constant *Mask = ConstantInt::get( Constant *Mask = ConstantInt::get(
BO0->getType(), BO0->getType(),
APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs)); APInt::getLowBitsSet(C->getBitWidth(), C->getBitWidth() - TZs));
Value *And1 = Builder->CreateAnd(BO0->getOperand(0), Mask); Value *And1 = Builder.CreateAnd(BO0->getOperand(0), Mask);
Value *And2 = Builder->CreateAnd(BO1->getOperand(0), Mask); Value *And2 = Builder.CreateAnd(BO1->getOperand(0), Mask);
return new ICmpInst(Pred, And1, And2); return new ICmpInst(Pred, And1, And2);
} }
// If there are no trailing zeros in the multiplier, just eliminate // If there are no trailing zeros in the multiplier, just eliminate
@ -3319,8 +3319,8 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
ConstantInt *C1, *C2; ConstantInt *C1, *C2;
if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) && if (match(B, m_ConstantInt(C1)) && match(D, m_ConstantInt(C2)) &&
Op1->hasOneUse()) { Op1->hasOneUse()) {
Constant *NC = Builder->getInt(C1->getValue() ^ C2->getValue()); Constant *NC = Builder.getInt(C1->getValue() ^ C2->getValue());
Value *Xor = Builder->CreateXor(C, NC); Value *Xor = Builder.CreateXor(C, NC);
return new ICmpInst(Pred, A, Xor); return new ICmpInst(Pred, A, Xor);
} }
@ -3366,8 +3366,8 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
} }
if (X) { // Build (X^Y) & Z if (X) { // Build (X^Y) & Z
Op1 = Builder->CreateXor(X, Y); Op1 = Builder.CreateXor(X, Y);
Op1 = Builder->CreateAnd(Op1, Z); Op1 = Builder.CreateAnd(Op1, Z);
I.setOperand(0, Op1); I.setOperand(0, Op1);
I.setOperand(1, Constant::getNullValue(Op1->getType())); I.setOperand(1, Constant::getNullValue(Op1->getType()));
return &I; return &I;
@ -3384,7 +3384,7 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
APInt Pow2 = Cst1->getValue() + 1; APInt Pow2 = Cst1->getValue() + 1;
if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) && if (Pow2.isPowerOf2() && isa<IntegerType>(A->getType()) &&
Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth()) Pow2.logBase2() == cast<IntegerType>(A->getType())->getBitWidth())
return new ICmpInst(Pred, A, Builder->CreateTrunc(B, A->getType())); return new ICmpInst(Pred, A, Builder.CreateTrunc(B, A->getType()));
} }
// (A >> C) == (B >> C) --> (A^B) u< (1 << C) // (A >> C) == (B >> C) --> (A^B) u< (1 << C)
@ -3398,9 +3398,9 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
if (ShAmt < TypeBits && ShAmt != 0) { if (ShAmt < TypeBits && ShAmt != 0) {
ICmpInst::Predicate NewPred = ICmpInst::Predicate NewPred =
Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT; Pred == ICmpInst::ICMP_NE ? ICmpInst::ICMP_UGE : ICmpInst::ICMP_ULT;
Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted"); Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt); APInt CmpVal = APInt::getOneBitSet(TypeBits, ShAmt);
return new ICmpInst(NewPred, Xor, Builder->getInt(CmpVal)); return new ICmpInst(NewPred, Xor, Builder.getInt(CmpVal));
} }
} }
@ -3410,9 +3410,9 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
unsigned TypeBits = Cst1->getBitWidth(); unsigned TypeBits = Cst1->getBitWidth();
unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits); unsigned ShAmt = (unsigned)Cst1->getLimitedValue(TypeBits);
if (ShAmt < TypeBits && ShAmt != 0) { if (ShAmt < TypeBits && ShAmt != 0) {
Value *Xor = Builder->CreateXor(A, B, I.getName() + ".unshifted"); Value *Xor = Builder.CreateXor(A, B, I.getName() + ".unshifted");
APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt); APInt AndVal = APInt::getLowBitsSet(TypeBits, TypeBits - ShAmt);
Value *And = Builder->CreateAnd(Xor, Builder->getInt(AndVal), Value *And = Builder.CreateAnd(Xor, Builder.getInt(AndVal),
I.getName() + ".mask"); I.getName() + ".mask");
return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType())); return new ICmpInst(Pred, And, Constant::getNullValue(Cst1->getType()));
} }
@ -3437,8 +3437,8 @@ Instruction *InstCombiner::foldICmpEquality(ICmpInst &I) {
APInt CmpV = Cst1->getValue().zext(ASize); APInt CmpV = Cst1->getValue().zext(ASize);
CmpV <<= ShAmt; CmpV <<= ShAmt;
Value *Mask = Builder->CreateAnd(A, Builder->getInt(MaskV)); Value *Mask = Builder.CreateAnd(A, Builder.getInt(MaskV));
return new ICmpInst(Pred, Mask, Builder->getInt(CmpV)); return new ICmpInst(Pred, Mask, Builder.getInt(CmpV));
} }
} }
@ -3475,7 +3475,7 @@ Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
RHSOp = RHSC->getOperand(0); RHSOp = RHSC->getOperand(0);
// If the pointer types don't match, insert a bitcast. // If the pointer types don't match, insert a bitcast.
if (LHSCIOp->getType() != RHSOp->getType()) if (LHSCIOp->getType() != RHSOp->getType())
RHSOp = Builder->CreateBitCast(RHSOp, LHSCIOp->getType()); RHSOp = Builder.CreateBitCast(RHSOp, LHSCIOp->getType());
} }
} else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) { } else if (auto *RHSC = dyn_cast<Constant>(ICmp.getOperand(1))) {
RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy); RHSOp = ConstantExpr::getIntToPtr(RHSC, SrcTy);
@ -3559,7 +3559,7 @@ Instruction *InstCombiner::foldICmpWithCastAndCast(ICmpInst &ICmp) {
// We're performing an unsigned comp with a sign extended value. // We're performing an unsigned comp with a sign extended value.
// This is true if the input is >= 0. [aka >s -1] // This is true if the input is >= 0. [aka >s -1]
Constant *NegOne = Constant::getAllOnesValue(SrcTy); Constant *NegOne = Constant::getAllOnesValue(SrcTy);
Value *Result = Builder->CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName()); Value *Result = Builder.CreateICmpSGT(LHSCIOp, NegOne, ICmp.getName());
// Finally, return the value computed. // Finally, return the value computed.
if (ICmp.getPredicate() == ICmpInst::ICMP_ULT) if (ICmp.getPredicate() == ICmpInst::ICMP_ULT)
@ -3587,7 +3587,7 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
// may be pointing to the compare. We want to insert the new instructions // may be pointing to the compare. We want to insert the new instructions
// before the add in case there are uses of the add between the add and the // before the add in case there are uses of the add between the add and the
// compare. // compare.
Builder->SetInsertPoint(&OrigI); Builder.SetInsertPoint(&OrigI);
switch (OCF) { switch (OCF) {
case OCF_INVALID: case OCF_INVALID:
@ -3596,11 +3596,11 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_UNSIGNED_ADD: { case OCF_UNSIGNED_ADD: {
OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI); OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, &OrigI);
if (OR == OverflowResult::NeverOverflows) if (OR == OverflowResult::NeverOverflows)
return SetResult(Builder->CreateNUWAdd(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNUWAdd(LHS, RHS), Builder.getFalse(),
true); true);
if (OR == OverflowResult::AlwaysOverflows) if (OR == OverflowResult::AlwaysOverflows)
return SetResult(Builder->CreateAdd(LHS, RHS), Builder->getTrue(), true); return SetResult(Builder.CreateAdd(LHS, RHS), Builder.getTrue(), true);
// Fall through uadd into sadd // Fall through uadd into sadd
LLVM_FALLTHROUGH; LLVM_FALLTHROUGH;
@ -3608,13 +3608,13 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_SIGNED_ADD: { case OCF_SIGNED_ADD: {
// X + 0 -> {X, false} // X + 0 -> {X, false}
if (match(RHS, m_Zero())) if (match(RHS, m_Zero()))
return SetResult(LHS, Builder->getFalse(), false); return SetResult(LHS, Builder.getFalse(), false);
// We can strength reduce this signed add into a regular add if we can prove // We can strength reduce this signed add into a regular add if we can prove
// that it will never overflow. // that it will never overflow.
if (OCF == OCF_SIGNED_ADD) if (OCF == OCF_SIGNED_ADD)
if (willNotOverflowSignedAdd(LHS, RHS, OrigI)) if (willNotOverflowSignedAdd(LHS, RHS, OrigI))
return SetResult(Builder->CreateNSWAdd(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNSWAdd(LHS, RHS), Builder.getFalse(),
true); true);
break; break;
} }
@ -3623,15 +3623,15 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_SIGNED_SUB: { case OCF_SIGNED_SUB: {
// X - 0 -> {X, false} // X - 0 -> {X, false}
if (match(RHS, m_Zero())) if (match(RHS, m_Zero()))
return SetResult(LHS, Builder->getFalse(), false); return SetResult(LHS, Builder.getFalse(), false);
if (OCF == OCF_SIGNED_SUB) { if (OCF == OCF_SIGNED_SUB) {
if (willNotOverflowSignedSub(LHS, RHS, OrigI)) if (willNotOverflowSignedSub(LHS, RHS, OrigI))
return SetResult(Builder->CreateNSWSub(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNSWSub(LHS, RHS), Builder.getFalse(),
true); true);
} else { } else {
if (willNotOverflowUnsignedSub(LHS, RHS, OrigI)) if (willNotOverflowUnsignedSub(LHS, RHS, OrigI))
return SetResult(Builder->CreateNUWSub(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNUWSub(LHS, RHS), Builder.getFalse(),
true); true);
} }
break; break;
@ -3640,28 +3640,28 @@ bool InstCombiner::OptimizeOverflowCheck(OverflowCheckFlavor OCF, Value *LHS,
case OCF_UNSIGNED_MUL: { case OCF_UNSIGNED_MUL: {
OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI); OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, &OrigI);
if (OR == OverflowResult::NeverOverflows) if (OR == OverflowResult::NeverOverflows)
return SetResult(Builder->CreateNUWMul(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNUWMul(LHS, RHS), Builder.getFalse(),
true); true);
if (OR == OverflowResult::AlwaysOverflows) if (OR == OverflowResult::AlwaysOverflows)
return SetResult(Builder->CreateMul(LHS, RHS), Builder->getTrue(), true); return SetResult(Builder.CreateMul(LHS, RHS), Builder.getTrue(), true);
LLVM_FALLTHROUGH; LLVM_FALLTHROUGH;
} }
case OCF_SIGNED_MUL: case OCF_SIGNED_MUL:
// X * undef -> undef // X * undef -> undef
if (isa<UndefValue>(RHS)) if (isa<UndefValue>(RHS))
return SetResult(RHS, UndefValue::get(Builder->getInt1Ty()), false); return SetResult(RHS, UndefValue::get(Builder.getInt1Ty()), false);
// X * 0 -> {0, false} // X * 0 -> {0, false}
if (match(RHS, m_Zero())) if (match(RHS, m_Zero()))
return SetResult(RHS, Builder->getFalse(), false); return SetResult(RHS, Builder.getFalse(), false);
// X * 1 -> {X, false} // X * 1 -> {X, false}
if (match(RHS, m_One())) if (match(RHS, m_One()))
return SetResult(LHS, Builder->getFalse(), false); return SetResult(LHS, Builder.getFalse(), false);
if (OCF == OCF_SIGNED_MUL) if (OCF == OCF_SIGNED_MUL)
if (willNotOverflowSignedMul(LHS, RHS, OrigI)) if (willNotOverflowSignedMul(LHS, RHS, OrigI))
return SetResult(Builder->CreateNSWMul(LHS, RHS), Builder->getFalse(), return SetResult(Builder.CreateNSWMul(LHS, RHS), Builder.getFalse(),
true); true);
break; break;
} }
@ -3826,25 +3826,25 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
return nullptr; return nullptr;
} }
InstCombiner::BuilderTy *Builder = IC.Builder; InstCombiner::BuilderTy &Builder = IC.Builder;
Builder->SetInsertPoint(MulInstr); Builder.SetInsertPoint(MulInstr);
// Replace: mul(zext A, zext B) --> mul.with.overflow(A, B) // Replace: mul(zext A, zext B) --> mul.with.overflow(A, B)
Value *MulA = A, *MulB = B; Value *MulA = A, *MulB = B;
if (WidthA < MulWidth) if (WidthA < MulWidth)
MulA = Builder->CreateZExt(A, MulType); MulA = Builder.CreateZExt(A, MulType);
if (WidthB < MulWidth) if (WidthB < MulWidth)
MulB = Builder->CreateZExt(B, MulType); MulB = Builder.CreateZExt(B, MulType);
Value *F = Intrinsic::getDeclaration(I.getModule(), Value *F = Intrinsic::getDeclaration(I.getModule(),
Intrinsic::umul_with_overflow, MulType); Intrinsic::umul_with_overflow, MulType);
CallInst *Call = Builder->CreateCall(F, {MulA, MulB}, "umul"); CallInst *Call = Builder.CreateCall(F, {MulA, MulB}, "umul");
IC.Worklist.Add(MulInstr); IC.Worklist.Add(MulInstr);
// If there are uses of mul result other than the comparison, we know that // If there are uses of mul result other than the comparison, we know that
// they are truncation or binary AND. Change them to use result of // they are truncation or binary AND. Change them to use result of
// mul.with.overflow and adjust properly mask/size. // mul.with.overflow and adjust properly mask/size.
if (MulVal->hasNUsesOrMore(2)) { if (MulVal->hasNUsesOrMore(2)) {
Value *Mul = Builder->CreateExtractValue(Call, 0, "umul.value"); Value *Mul = Builder.CreateExtractValue(Call, 0, "umul.value");
for (User *U : MulVal->users()) { for (User *U : MulVal->users()) {
if (U == &I || U == OtherVal) if (U == &I || U == OtherVal)
continue; continue;
@ -3858,9 +3858,9 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
// Replace (mul & mask) --> zext (mul.with.overflow & short_mask) // Replace (mul & mask) --> zext (mul.with.overflow & short_mask)
ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1)); ConstantInt *CI = cast<ConstantInt>(BO->getOperand(1));
APInt ShortMask = CI->getValue().trunc(MulWidth); APInt ShortMask = CI->getValue().trunc(MulWidth);
Value *ShortAnd = Builder->CreateAnd(Mul, ShortMask); Value *ShortAnd = Builder.CreateAnd(Mul, ShortMask);
Instruction *Zext = Instruction *Zext =
cast<Instruction>(Builder->CreateZExt(ShortAnd, BO->getType())); cast<Instruction>(Builder.CreateZExt(ShortAnd, BO->getType()));
IC.Worklist.Add(Zext); IC.Worklist.Add(Zext);
IC.replaceInstUsesWith(*BO, Zext); IC.replaceInstUsesWith(*BO, Zext);
} else { } else {
@ -3897,7 +3897,7 @@ static Instruction *processUMulZExtIdiom(ICmpInst &I, Value *MulVal,
llvm_unreachable("Unexpected predicate"); llvm_unreachable("Unexpected predicate");
} }
if (Inverse) { if (Inverse) {
Value *Res = Builder->CreateExtractValue(Call, 1); Value *Res = Builder.CreateExtractValue(Call, 1);
return BinaryOperator::CreateNot(Res); return BinaryOperator::CreateNot(Res);
} }
@ -4252,7 +4252,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C if (Op1Max == Op0Min + 1) // A <s C -> A == C-1 if min(A)+1 == C
return new ICmpInst(ICmpInst::ICMP_EQ, Op0, return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
Builder->getInt(CI->getValue() - 1)); Builder.getInt(CI->getValue() - 1));
} }
break; break;
case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGT:
@ -4266,7 +4266,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) { if (ConstantInt *CI = dyn_cast<ConstantInt>(Op1)) {
if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C if (Op1Min == Op0Max - 1) // A >s C -> A == C+1 if max(A)-1 == C
return new ICmpInst(ICmpInst::ICMP_EQ, Op0, return new ICmpInst(ICmpInst::ICMP_EQ, Op0,
Builder->getInt(CI->getValue() + 1)); Builder.getInt(CI->getValue() + 1));
} }
break; break;
case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_SGE:
@ -4479,7 +4479,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
} }
if (Op0->getType()->getScalarType()->isIntegerTy(1)) if (Op0->getType()->getScalarType()->isIntegerTy(1))
if (Instruction *Res = canonicalizeICmpBool(I, *Builder)) if (Instruction *Res = canonicalizeICmpBool(I, Builder))
return Res; return Res;
if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I)) if (ICmpInst *NewICmp = canonicalizeCmpWithConstant(I))
@ -4572,7 +4572,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType()); Op1 = ConstantExpr::getBitCast(Op1C, Op0->getType());
} else { } else {
// Otherwise, cast the RHS right before the icmp // Otherwise, cast the RHS right before the icmp
Op1 = Builder->CreateBitCast(Op1, Op0->getType()); Op1 = Builder.CreateBitCast(Op1, Op0->getType());
} }
} }
return new ICmpInst(I.getPredicate(), Op0, Op1); return new ICmpInst(I.getPredicate(), Op0, Op1);
@ -4605,8 +4605,7 @@ Instruction *InstCombiner::visitICmpInst(ICmpInst &I) {
if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) && if (match(Op0, m_And(m_Value(A), m_Not(m_Value(B)))) &&
match(Op1, m_Zero()) && match(Op1, m_Zero()) &&
isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality()) isKnownToBeAPowerOfTwo(A, false, 0, &I) && I.isEquality())
return new ICmpInst(I.getInversePredicate(), return new ICmpInst(I.getInversePredicate(), Builder.CreateAnd(A, B),
Builder->CreateAnd(A, B),
Op1); Op1);
// ~X < ~Y --> Y < X // ~X < ~Y --> Y < X
@ -4706,10 +4705,10 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven); RHSRoundInt.roundToIntegral(APFloat::rmNearestTiesToEven);
if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) { if (RHS.compare(RHSRoundInt) != APFloat::cmpEqual) {
if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ) if (P == FCmpInst::FCMP_OEQ || P == FCmpInst::FCMP_UEQ)
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE); assert(P == FCmpInst::FCMP_ONE || P == FCmpInst::FCMP_UNE);
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
} }
} }
@ -4775,9 +4774,9 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
Pred = ICmpInst::ICMP_NE; Pred = ICmpInst::ICMP_NE;
break; break;
case FCmpInst::FCMP_ORD: case FCmpInst::FCMP_ORD:
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
case FCmpInst::FCMP_UNO: case FCmpInst::FCMP_UNO:
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
} }
// Now we know that the APFloat is a normal number, zero or inf. // Now we know that the APFloat is a normal number, zero or inf.
@ -4795,8 +4794,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0 if (SMax.compare(RHS) == APFloat::cmpLessThan) { // smax < 13123.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT || if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SLT ||
Pred == ICmpInst::ICMP_SLE) Pred == ICmpInst::ICMP_SLE)
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
} }
} else { } else {
// If the RHS value is > UnsignedMax, fold the comparison. This handles // If the RHS value is > UnsignedMax, fold the comparison. This handles
@ -4807,8 +4806,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0 if (UMax.compare(RHS) == APFloat::cmpLessThan) { // umax < 13123.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT || if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_ULT ||
Pred == ICmpInst::ICMP_ULE) Pred == ICmpInst::ICMP_ULE)
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
} }
} }
@ -4820,8 +4819,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // smin > 12312.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT || if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_SGT ||
Pred == ICmpInst::ICMP_SGE) Pred == ICmpInst::ICMP_SGE)
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
} }
} else { } else {
// See if the RHS value is < UnsignedMin. // See if the RHS value is < UnsignedMin.
@ -4831,8 +4830,8 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0 if (SMin.compare(RHS) == APFloat::cmpGreaterThan) { // umin > 12312.0
if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT || if (Pred == ICmpInst::ICMP_NE || Pred == ICmpInst::ICMP_UGT ||
Pred == ICmpInst::ICMP_UGE) Pred == ICmpInst::ICMP_UGE)
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
} }
} }
@ -4854,14 +4853,14 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
switch (Pred) { switch (Pred) {
default: llvm_unreachable("Unexpected integer comparison!"); default: llvm_unreachable("Unexpected integer comparison!");
case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true case ICmpInst::ICMP_NE: // (float)int != 4.4 --> true
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false case ICmpInst::ICMP_EQ: // (float)int == 4.4 --> false
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_ULE:
// (float)int <= 4.4 --> int <= 4 // (float)int <= 4.4 --> int <= 4
// (float)int <= -4.4 --> false // (float)int <= -4.4 --> false
if (RHS.isNegative()) if (RHS.isNegative())
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
break; break;
case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SLE:
// (float)int <= 4.4 --> int <= 4 // (float)int <= 4.4 --> int <= 4
@ -4873,7 +4872,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int < -4.4 --> false // (float)int < -4.4 --> false
// (float)int < 4.4 --> int <= 4 // (float)int < 4.4 --> int <= 4
if (RHS.isNegative()) if (RHS.isNegative())
return replaceInstUsesWith(I, Builder->getFalse()); return replaceInstUsesWith(I, Builder.getFalse());
Pred = ICmpInst::ICMP_ULE; Pred = ICmpInst::ICMP_ULE;
break; break;
case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLT:
@ -4886,7 +4885,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int > 4.4 --> int > 4 // (float)int > 4.4 --> int > 4
// (float)int > -4.4 --> true // (float)int > -4.4 --> true
if (RHS.isNegative()) if (RHS.isNegative())
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
break; break;
case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_SGT:
// (float)int > 4.4 --> int > 4 // (float)int > 4.4 --> int > 4
@ -4898,7 +4897,7 @@ Instruction *InstCombiner::foldFCmpIntToFPConst(FCmpInst &I, Instruction *LHSI,
// (float)int >= -4.4 --> true // (float)int >= -4.4 --> true
// (float)int >= 4.4 --> int > 4 // (float)int >= 4.4 --> int > 4
if (RHS.isNegative()) if (RHS.isNegative())
return replaceInstUsesWith(I, Builder->getTrue()); return replaceInstUsesWith(I, Builder.getTrue());
Pred = ICmpInst::ICMP_UGT; Pred = ICmpInst::ICMP_UGT;
break; break;
case ICmpInst::ICMP_SGE: case ICmpInst::ICMP_SGE:

View File

@ -210,7 +210,7 @@ public:
/// \brief An IRBuilder that automatically inserts new instructions into the /// \brief An IRBuilder that automatically inserts new instructions into the
/// worklist. /// worklist.
typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderTy; typedef IRBuilder<TargetFolder, IRBuilderCallbackInserter> BuilderTy;
BuilderTy *Builder; BuilderTy &Builder;
private: private:
// Mode in which we are running the combiner. // Mode in which we are running the combiner.
@ -233,7 +233,7 @@ private:
bool MadeIRChange; bool MadeIRChange;
public: public:
InstCombiner(InstCombineWorklist &Worklist, BuilderTy *Builder, InstCombiner(InstCombineWorklist &Worklist, BuilderTy &Builder,
bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA, bool MinimizeSize, bool ExpensiveCombines, AliasAnalysis *AA,
AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT, AssumptionCache &AC, TargetLibraryInfo &TLI, DominatorTree &DT,
const DataLayout &DL, LoopInfo *LI) const DataLayout &DL, LoopInfo *LI)

View File

@ -189,7 +189,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
return nullptr; return nullptr;
// Canonicalize it. // Canonicalize it.
Value *V = IC.Builder->getInt32(1); Value *V = IC.Builder.getInt32(1);
AI.setOperand(0, V); AI.setOperand(0, V);
return &AI; return &AI;
} }
@ -197,7 +197,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
// Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) { if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue()); Type *NewTy = ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
AllocaInst *New = IC.Builder->CreateAlloca(NewTy, nullptr, AI.getName()); AllocaInst *New = IC.Builder.CreateAlloca(NewTy, nullptr, AI.getName());
New->setAlignment(AI.getAlignment()); New->setAlignment(AI.getAlignment());
// Scan to the end of the allocation instructions, to skip over a block of // Scan to the end of the allocation instructions, to skip over a block of
@ -229,7 +229,7 @@ static Instruction *simplifyAllocaArraySize(InstCombiner &IC, AllocaInst &AI) {
// any casting is exposed early. // any casting is exposed early.
Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType()); Type *IntPtrTy = IC.getDataLayout().getIntPtrType(AI.getType());
if (AI.getArraySize()->getType() != IntPtrTy) { if (AI.getArraySize()->getType() != IntPtrTy) {
Value *V = IC.Builder->CreateIntCast(AI.getArraySize(), IntPtrTy, false); Value *V = IC.Builder.CreateIntCast(AI.getArraySize(), IntPtrTy, false);
AI.setOperand(0, V); AI.setOperand(0, V);
return &AI; return &AI;
} }
@ -458,8 +458,8 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
SmallVector<std::pair<unsigned, MDNode *>, 8> MD; SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
LI.getAllMetadata(MD); LI.getAllMetadata(MD);
LoadInst *NewLoad = IC.Builder->CreateAlignedLoad( LoadInst *NewLoad = IC.Builder.CreateAlignedLoad(
IC.Builder->CreateBitCast(Ptr, NewTy->getPointerTo(AS)), IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)),
LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix);
NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope()); NewLoad->setAtomic(LI.getOrdering(), LI.getSynchScope());
MDBuilder MDB(NewLoad->getContext()); MDBuilder MDB(NewLoad->getContext());
@ -518,8 +518,8 @@ static StoreInst *combineStoreToNewValue(InstCombiner &IC, StoreInst &SI, Value
SmallVector<std::pair<unsigned, MDNode *>, 8> MD; SmallVector<std::pair<unsigned, MDNode *>, 8> MD;
SI.getAllMetadata(MD); SI.getAllMetadata(MD);
StoreInst *NewStore = IC.Builder->CreateAlignedStore( StoreInst *NewStore = IC.Builder.CreateAlignedStore(
V, IC.Builder->CreateBitCast(Ptr, V->getType()->getPointerTo(AS)), V, IC.Builder.CreateBitCast(Ptr, V->getType()->getPointerTo(AS)),
SI.getAlignment(), SI.isVolatile()); SI.getAlignment(), SI.isVolatile());
NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope()); NewStore->setAtomic(SI.getOrdering(), SI.getSynchScope());
for (const auto &MDPair : MD) { for (const auto &MDPair : MD) {
@ -613,7 +613,7 @@ static Instruction *combineLoadToOperationType(InstCombiner &IC, LoadInst &LI) {
// Replace all the stores with stores of the newly loaded value. // Replace all the stores with stores of the newly loaded value.
for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) { for (auto UI = LI.user_begin(), UE = LI.user_end(); UI != UE;) {
auto *SI = cast<StoreInst>(*UI++); auto *SI = cast<StoreInst>(*UI++);
IC.Builder->SetInsertPoint(SI); IC.Builder.SetInsertPoint(SI);
combineStoreToNewValue(IC, *SI, NewLoad); combineStoreToNewValue(IC, *SI, NewLoad);
IC.eraseInstFromFunction(*SI); IC.eraseInstFromFunction(*SI);
} }
@ -664,7 +664,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
AAMDNodes AAMD; AAMDNodes AAMD;
LI.getAAMetadata(AAMD); LI.getAAMetadata(AAMD);
NewLoad->setAAMetadata(AAMD); NewLoad->setAAMetadata(AAMD);
return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name)); UndefValue::get(T), NewLoad, 0, Name));
} }
@ -689,15 +689,15 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
Zero, Zero,
ConstantInt::get(IdxType, i), ConstantInt::get(IdxType, i),
}; };
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
Name + ".elt"); Name + ".elt");
auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
auto *L = IC.Builder->CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack");
// Propagate AA metadata. It'll still be valid on the narrowed load. // Propagate AA metadata. It'll still be valid on the narrowed load.
AAMDNodes AAMD; AAMDNodes AAMD;
LI.getAAMetadata(AAMD); LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD); L->setAAMetadata(AAMD);
V = IC.Builder->CreateInsertValue(V, L, i); V = IC.Builder.CreateInsertValue(V, L, i);
} }
V->setName(Name); V->setName(Name);
@ -712,7 +712,7 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
AAMDNodes AAMD; AAMDNodes AAMD;
LI.getAAMetadata(AAMD); LI.getAAMetadata(AAMD);
NewLoad->setAAMetadata(AAMD); NewLoad->setAAMetadata(AAMD);
return IC.replaceInstUsesWith(LI, IC.Builder->CreateInsertValue( return IC.replaceInstUsesWith(LI, IC.Builder.CreateInsertValue(
UndefValue::get(T), NewLoad, 0, Name)); UndefValue::get(T), NewLoad, 0, Name));
} }
@ -740,14 +740,14 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) {
Zero, Zero,
ConstantInt::get(IdxType, i), ConstantInt::get(IdxType, i),
}; };
auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
Name + ".elt"); Name + ".elt");
auto *L = IC.Builder->CreateAlignedLoad(Ptr, MinAlign(Align, Offset), auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset),
Name + ".unpack"); Name + ".unpack");
AAMDNodes AAMD; AAMDNodes AAMD;
LI.getAAMetadata(AAMD); LI.getAAMetadata(AAMD);
L->setAAMetadata(AAMD); L->setAAMetadata(AAMD);
V = IC.Builder->CreateInsertValue(V, L, i); V = IC.Builder.CreateInsertValue(V, L, i);
Offset += EltSize; Offset += EltSize;
} }
@ -982,8 +982,8 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI); combineMetadataForCSE(cast<LoadInst>(AvailableVal), &LI);
return replaceInstUsesWith( return replaceInstUsesWith(
LI, Builder->CreateBitOrPointerCast(AvailableVal, LI.getType(), LI, Builder.CreateBitOrPointerCast(AvailableVal, LI.getType(),
LI.getName() + ".cast")); LI.getName() + ".cast"));
} }
// None of the following transforms are legal for volatile/ordered atomic // None of the following transforms are legal for volatile/ordered atomic
@ -1019,10 +1019,10 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
unsigned Align = LI.getAlignment(); unsigned Align = LI.getAlignment();
if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) &&
isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) {
LoadInst *V1 = Builder->CreateLoad(SI->getOperand(1), LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1),
SI->getOperand(1)->getName()+".val"); SI->getOperand(1)->getName()+".val");
LoadInst *V2 = Builder->CreateLoad(SI->getOperand(2), LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2),
SI->getOperand(2)->getName()+".val"); SI->getOperand(2)->getName()+".val");
assert(LI.isUnordered() && "implied by above"); assert(LI.isUnordered() && "implied by above");
V1->setAlignment(Align); V1->setAlignment(Align);
V1->setAtomic(LI.getOrdering(), LI.getSynchScope()); V1->setAtomic(LI.getOrdering(), LI.getSynchScope());
@ -1172,7 +1172,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// If the struct only have one element, we unpack. // If the struct only have one element, we unpack.
unsigned Count = ST->getNumElements(); unsigned Count = ST->getNumElements();
if (Count == 1) { if (Count == 1) {
V = IC.Builder->CreateExtractValue(V, 0); V = IC.Builder.CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V); combineStoreToNewValue(IC, SI, V);
return true; return true;
} }
@ -1201,12 +1201,11 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
Zero, Zero,
ConstantInt::get(IdxType, i), ConstantInt::get(IdxType, i),
}; };
auto *Ptr = IC.Builder->CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices),
AddrName); AddrName);
auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); auto EltAlign = MinAlign(Align, SL->getElementOffset(i));
llvm::Instruction *NS = llvm::Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD; AAMDNodes AAMD;
SI.getAAMetadata(AAMD); SI.getAAMetadata(AAMD);
NS->setAAMetadata(AAMD); NS->setAAMetadata(AAMD);
@ -1219,7 +1218,7 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
// If the array only have one element, we unpack. // If the array only have one element, we unpack.
auto NumElements = AT->getNumElements(); auto NumElements = AT->getNumElements();
if (NumElements == 1) { if (NumElements == 1) {
V = IC.Builder->CreateExtractValue(V, 0); V = IC.Builder.CreateExtractValue(V, 0);
combineStoreToNewValue(IC, SI, V); combineStoreToNewValue(IC, SI, V);
return true; return true;
} }
@ -1252,11 +1251,11 @@ static bool unpackStoreToAggregate(InstCombiner &IC, StoreInst &SI) {
Zero, Zero,
ConstantInt::get(IdxType, i), ConstantInt::get(IdxType, i),
}; };
auto *Ptr = IC.Builder->CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices),
AddrName); AddrName);
auto *Val = IC.Builder->CreateExtractValue(V, i, EltName); auto *Val = IC.Builder.CreateExtractValue(V, i, EltName);
auto EltAlign = MinAlign(Align, Offset); auto EltAlign = MinAlign(Align, Offset);
Instruction *NS = IC.Builder->CreateAlignedStore(Val, Ptr, EltAlign); Instruction *NS = IC.Builder.CreateAlignedStore(Val, Ptr, EltAlign);
AAMDNodes AAMD; AAMDNodes AAMD;
SI.getAAMetadata(AAMD); SI.getAAMetadata(AAMD);
NS->setAAMetadata(AAMD); NS->setAAMetadata(AAMD);

View File

@ -39,8 +39,8 @@ static Value *simplifyValueKnownNonZero(Value *V, InstCombiner &IC,
Value *A = nullptr, *B = nullptr, *One = nullptr; Value *A = nullptr, *B = nullptr, *One = nullptr;
if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) && if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
match(One, m_One())) { match(One, m_One())) {
A = IC.Builder->CreateSub(A, B); A = IC.Builder.CreateSub(A, B);
return IC.Builder->CreateShl(One, A); return IC.Builder.CreateShl(One, A);
} }
// (PowerOfTwo >>u B) --> isExact since shifting out the result would make it // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
@ -250,9 +250,9 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
ConstantInt *C1; ConstantInt *C1;
Value *Sub = nullptr; Value *Sub = nullptr;
if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) if (match(Op0, m_Sub(m_Value(Y), m_Value(X))))
Sub = Builder->CreateSub(X, Y, "suba"); Sub = Builder.CreateSub(X, Y, "suba");
else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1))))
Sub = Builder->CreateSub(Builder->CreateNeg(C1), Y, "subc"); Sub = Builder.CreateSub(Builder.CreateNeg(C1), Y, "subc");
if (Sub) if (Sub)
return return
BinaryOperator::CreateMul(Sub, BinaryOperator::CreateMul(Sub,
@ -272,11 +272,11 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
Value *X; Value *X;
Constant *C1; Constant *C1;
if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
Value *Mul = Builder->CreateMul(C1, Op1); Value *Mul = Builder.CreateMul(C1, Op1);
// Only go forward with the transform if C1*CI simplifies to a tidier // Only go forward with the transform if C1*CI simplifies to a tidier
// constant. // constant.
if (!match(Mul, m_Mul(m_Value(), m_Value()))) if (!match(Mul, m_Mul(m_Value(), m_Value())))
return BinaryOperator::CreateAdd(Builder->CreateMul(X, Op1), Mul); return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
} }
} }
} }
@ -318,7 +318,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
: Instruction::SRem; : Instruction::SRem;
Value *Rem = Builder->CreateBinOp(RemOpc, X, DivOp1); Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
if (DivOp1 == Y) if (DivOp1 == Y)
return BinaryOperator::CreateSub(X, Rem); return BinaryOperator::CreateSub(X, Rem);
return BinaryOperator::CreateSub(Rem, X); return BinaryOperator::CreateSub(Rem, X);
@ -368,7 +368,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
} }
if (BoolCast) { if (BoolCast) {
Value *V = Builder->CreateSub(Constant::getNullValue(I.getType()), Value *V = Builder.CreateSub(Constant::getNullValue(I.getType()),
BoolCast); BoolCast);
return BinaryOperator::CreateAnd(V, OtherOp); return BinaryOperator::CreateAnd(V, OtherOp);
} }
@ -386,7 +386,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) { willNotOverflowSignedMul(Op0Conv->getOperand(0), CI, I)) {
// Insert the new, smaller mul. // Insert the new, smaller mul.
Value *NewMul = Value *NewMul =
Builder->CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv"); Builder.CreateNSWMul(Op0Conv->getOperand(0), CI, "mulconv");
return new SExtInst(NewMul, I.getType()); return new SExtInst(NewMul, I.getType());
} }
} }
@ -403,7 +403,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowSignedMul(Op0Conv->getOperand(0), willNotOverflowSignedMul(Op0Conv->getOperand(0),
Op1Conv->getOperand(0), I)) { Op1Conv->getOperand(0), I)) {
// Insert the new integer mul. // Insert the new integer mul.
Value *NewMul = Builder->CreateNSWMul( Value *NewMul = Builder.CreateNSWMul(
Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
return new SExtInst(NewMul, I.getType()); return new SExtInst(NewMul, I.getType());
} }
@ -422,7 +422,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) { willNotOverflowUnsignedMul(Op0Conv->getOperand(0), CI, I)) {
// Insert the new, smaller mul. // Insert the new, smaller mul.
Value *NewMul = Value *NewMul =
Builder->CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv"); Builder.CreateNUWMul(Op0Conv->getOperand(0), CI, "mulconv");
return new ZExtInst(NewMul, I.getType()); return new ZExtInst(NewMul, I.getType());
} }
} }
@ -439,7 +439,7 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) {
willNotOverflowUnsignedMul(Op0Conv->getOperand(0), willNotOverflowUnsignedMul(Op0Conv->getOperand(0),
Op1Conv->getOperand(0), I)) { Op1Conv->getOperand(0), I)) {
// Insert the new integer mul. // Insert the new integer mul.
Value *NewMul = Builder->CreateNUWMul( Value *NewMul = Builder.CreateNUWMul(
Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv"); Op0Conv->getOperand(0), Op1Conv->getOperand(0), "mulconv");
return new ZExtInst(NewMul, I.getType()); return new ZExtInst(NewMul, I.getType());
} }
@ -698,11 +698,11 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
} }
// if pattern detected emit alternate sequence // if pattern detected emit alternate sequence
if (OpX && OpY) { if (OpX && OpY) {
BuilderTy::FastMathFlagGuard Guard(*Builder); BuilderTy::FastMathFlagGuard Guard(Builder);
Builder->setFastMathFlags(Log2->getFastMathFlags()); Builder.setFastMathFlags(Log2->getFastMathFlags());
Log2->setArgOperand(0, OpY); Log2->setArgOperand(0, OpY);
Value *FMulVal = Builder->CreateFMul(OpX, Log2); Value *FMulVal = Builder.CreateFMul(OpX, Log2);
Value *FSub = Builder->CreateFSub(FMulVal, OpX); Value *FSub = Builder.CreateFSub(FMulVal, OpX);
FSub->takeName(&I); FSub->takeName(&I);
return replaceInstUsesWith(I, FSub); return replaceInstUsesWith(I, FSub);
} }
@ -714,23 +714,23 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
for (int i = 0; i < 2; i++) { for (int i = 0; i < 2; i++) {
bool IgnoreZeroSign = I.hasNoSignedZeros(); bool IgnoreZeroSign = I.hasNoSignedZeros();
if (BinaryOperator::isFNeg(Opnd0, IgnoreZeroSign)) { if (BinaryOperator::isFNeg(Opnd0, IgnoreZeroSign)) {
BuilderTy::FastMathFlagGuard Guard(*Builder); BuilderTy::FastMathFlagGuard Guard(Builder);
Builder->setFastMathFlags(I.getFastMathFlags()); Builder.setFastMathFlags(I.getFastMathFlags());
Value *N0 = dyn_castFNegVal(Opnd0, IgnoreZeroSign); Value *N0 = dyn_castFNegVal(Opnd0, IgnoreZeroSign);
Value *N1 = dyn_castFNegVal(Opnd1, IgnoreZeroSign); Value *N1 = dyn_castFNegVal(Opnd1, IgnoreZeroSign);
// -X * -Y => X*Y // -X * -Y => X*Y
if (N1) { if (N1) {
Value *FMul = Builder->CreateFMul(N0, N1); Value *FMul = Builder.CreateFMul(N0, N1);
FMul->takeName(&I); FMul->takeName(&I);
return replaceInstUsesWith(I, FMul); return replaceInstUsesWith(I, FMul);
} }
if (Opnd0->hasOneUse()) { if (Opnd0->hasOneUse()) {
// -X * Y => -(X*Y) (Promote negation as high as possible) // -X * Y => -(X*Y) (Promote negation as high as possible)
Value *T = Builder->CreateFMul(N0, Opnd1); Value *T = Builder.CreateFMul(N0, Opnd1);
Value *Neg = Builder->CreateFNeg(T); Value *Neg = Builder.CreateFNeg(T);
Neg->takeName(&I); Neg->takeName(&I);
return replaceInstUsesWith(I, Neg); return replaceInstUsesWith(I, Neg);
} }
@ -755,10 +755,10 @@ Instruction *InstCombiner::visitFMul(BinaryOperator &I) {
Y = Opnd0_0; Y = Opnd0_0;
if (Y) { if (Y) {
BuilderTy::FastMathFlagGuard Guard(*Builder); BuilderTy::FastMathFlagGuard Guard(Builder);
Builder->setFastMathFlags(I.getFastMathFlags()); Builder.setFastMathFlags(I.getFastMathFlags());
Value *T = Builder->CreateFMul(Opnd1, Opnd1); Value *T = Builder.CreateFMul(Opnd1, Opnd1);
Value *R = Builder->CreateFMul(T, Y); Value *R = Builder.CreateFMul(T, Y);
R->takeName(&I); R->takeName(&I);
return replaceInstUsesWith(I, R); return replaceInstUsesWith(I, R);
} }
@ -824,7 +824,7 @@ bool InstCombiner::SimplifyDivRemOfSelect(BinaryOperator &I) {
*I = SI->getOperand(NonNullOperand); *I = SI->getOperand(NonNullOperand);
Worklist.Add(&*BBI); Worklist.Add(&*BBI);
} else if (*I == SelectCond) { } else if (*I == SelectCond) {
*I = Builder->getInt1(NonNullOperand == 1); *I = Builder.getInt1(NonNullOperand == 1);
Worklist.Add(&*BBI); Worklist.Add(&*BBI);
} }
} }
@ -944,14 +944,13 @@ Instruction *InstCombiner::commonIDivTransforms(BinaryOperator &I) {
// If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
// result is one, if Op1 is -1 then the result is minus one, otherwise // result is one, if Op1 is -1 then the result is minus one, otherwise
// it's zero. // it's zero.
Value *Inc = Builder->CreateAdd(Op1, Op0); Value *Inc = Builder.CreateAdd(Op1, Op0);
Value *Cmp = Builder->CreateICmpULT( Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(I.getType(), 3));
Inc, ConstantInt::get(I.getType(), 3));
return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0)); return SelectInst::Create(Cmp, Op1, ConstantInt::get(I.getType(), 0));
} else { } else {
// If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
// result is one, otherwise it's zero. // result is one, otherwise it's zero.
return new ZExtInst(Builder->CreateICmpEQ(Op1, Op0), I.getType()); return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), I.getType());
} }
} }
@ -1026,7 +1025,7 @@ static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
// X udiv C, where C >= signbit // X udiv C, where C >= signbit
static Instruction *foldUDivNegCst(Value *Op0, Value *Op1, static Instruction *foldUDivNegCst(Value *Op0, Value *Op1,
const BinaryOperator &I, InstCombiner &IC) { const BinaryOperator &I, InstCombiner &IC) {
Value *ICI = IC.Builder->CreateICmpULT(Op0, cast<ConstantInt>(Op1)); Value *ICI = IC.Builder.CreateICmpULT(Op0, cast<ConstantInt>(Op1));
return SelectInst::Create(ICI, Constant::getNullValue(I.getType()), return SelectInst::Create(ICI, Constant::getNullValue(I.getType()),
ConstantInt::get(I.getType(), 1)); ConstantInt::get(I.getType(), 1));
@ -1045,10 +1044,9 @@ static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
if (!match(ShiftLeft, m_Shl(m_APInt(CI), m_Value(N)))) if (!match(ShiftLeft, m_Shl(m_APInt(CI), m_Value(N))))
llvm_unreachable("match should never fail here!"); llvm_unreachable("match should never fail here!");
if (*CI != 1) if (*CI != 1)
N = IC.Builder->CreateAdd(N, N = IC.Builder.CreateAdd(N, ConstantInt::get(N->getType(), CI->logBase2()));
ConstantInt::get(N->getType(), CI->logBase2()));
if (Op1 != ShiftLeft) if (Op1 != ShiftLeft)
N = IC.Builder->CreateZExt(N, Op1->getType()); N = IC.Builder.CreateZExt(N, Op1->getType());
BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N); BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
if (I.isExact()) if (I.isExact())
LShr->setIsExact(); LShr->setIsExact();
@ -1134,7 +1132,7 @@ Instruction *InstCombiner::visitUDiv(BinaryOperator &I) {
if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0)) if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0))
if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy())) if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy()))
return new ZExtInst( return new ZExtInst(
Builder->CreateUDiv(ZOp0->getOperand(0), ZOp1, "div", I.isExact()), Builder.CreateUDiv(ZOp0->getOperand(0), ZOp1, "div", I.isExact()),
I.getType()); I.getType());
// (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...)))) // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
@ -1209,7 +1207,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
Constant *NarrowDivisor = Constant *NarrowDivisor =
ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType()); ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
Value *NarrowOp = Builder->CreateSDiv(Op0Src, NarrowDivisor); Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
return new SExtInst(NarrowOp, Op0->getType()); return new SExtInst(NarrowOp, Op0->getType());
} }
} }
@ -1217,7 +1215,7 @@ Instruction *InstCombiner::visitSDiv(BinaryOperator &I) {
if (Constant *RHS = dyn_cast<Constant>(Op1)) { if (Constant *RHS = dyn_cast<Constant>(Op1)) {
// X/INT_MIN -> X == INT_MIN // X/INT_MIN -> X == INT_MIN
if (RHS->isMinSignedValue()) if (RHS->isMinSignedValue())
return new ZExtInst(Builder->CreateICmpEQ(Op0, Op1), I.getType()); return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), I.getType());
// -X/C --> X/-C provided the negation doesn't overflow. // -X/C --> X/-C provided the negation doesn't overflow.
Value *X; Value *X;
@ -1380,7 +1378,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
// (X/Y) / Z => X / (Y*Z) // (X/Y) / Z => X / (Y*Z)
// //
if (!isa<Constant>(Y) || !isa<Constant>(Op1)) { if (!isa<Constant>(Y) || !isa<Constant>(Op1)) {
NewInst = Builder->CreateFMul(Y, Op1); NewInst = Builder.CreateFMul(Y, Op1);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) { if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
FastMathFlags Flags = I.getFastMathFlags(); FastMathFlags Flags = I.getFastMathFlags();
Flags &= cast<Instruction>(Op0)->getFastMathFlags(); Flags &= cast<Instruction>(Op0)->getFastMathFlags();
@ -1392,7 +1390,7 @@ Instruction *InstCombiner::visitFDiv(BinaryOperator &I) {
// Z / (X/Y) => Z*Y / X // Z / (X/Y) => Z*Y / X
// //
if (!isa<Constant>(Y) || !isa<Constant>(Op0)) { if (!isa<Constant>(Y) || !isa<Constant>(Op0)) {
NewInst = Builder->CreateFMul(Op0, Y); NewInst = Builder.CreateFMul(Op0, Y);
if (Instruction *RI = dyn_cast<Instruction>(NewInst)) { if (Instruction *RI = dyn_cast<Instruction>(NewInst)) {
FastMathFlags Flags = I.getFastMathFlags(); FastMathFlags Flags = I.getFastMathFlags();
Flags &= cast<Instruction>(Op1)->getFastMathFlags(); Flags &= cast<Instruction>(Op1)->getFastMathFlags();
@ -1483,28 +1481,28 @@ Instruction *InstCombiner::visitURem(BinaryOperator &I) {
// (zext A) urem (zext B) --> zext (A urem B) // (zext A) urem (zext B) --> zext (A urem B)
if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0)) if (ZExtInst *ZOp0 = dyn_cast<ZExtInst>(Op0))
if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy())) if (Value *ZOp1 = dyn_castZExtVal(Op1, ZOp0->getSrcTy()))
return new ZExtInst(Builder->CreateURem(ZOp0->getOperand(0), ZOp1), return new ZExtInst(Builder.CreateURem(ZOp0->getOperand(0), ZOp1),
I.getType()); I.getType());
// X urem Y -> X and Y-1, where Y is a power of 2, // X urem Y -> X and Y-1, where Y is a power of 2,
if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) { if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
Constant *N1 = Constant::getAllOnesValue(I.getType()); Constant *N1 = Constant::getAllOnesValue(I.getType());
Value *Add = Builder->CreateAdd(Op1, N1); Value *Add = Builder.CreateAdd(Op1, N1);
return BinaryOperator::CreateAnd(Op0, Add); return BinaryOperator::CreateAnd(Op0, Add);
} }
// 1 urem X -> zext(X != 1) // 1 urem X -> zext(X != 1)
if (match(Op0, m_One())) { if (match(Op0, m_One())) {
Value *Cmp = Builder->CreateICmpNE(Op1, Op0); Value *Cmp = Builder.CreateICmpNE(Op1, Op0);
Value *Ext = Builder->CreateZExt(Cmp, I.getType()); Value *Ext = Builder.CreateZExt(Cmp, I.getType());
return replaceInstUsesWith(I, Ext); return replaceInstUsesWith(I, Ext);
} }
// X urem C -> X < C ? X : X - C, where C >= signbit. // X urem C -> X < C ? X : X - C, where C >= signbit.
const APInt *DivisorC; const APInt *DivisorC;
if (match(Op1, m_APInt(DivisorC)) && DivisorC->isNegative()) { if (match(Op1, m_APInt(DivisorC)) && DivisorC->isNegative()) {
Value *Cmp = Builder->CreateICmpULT(Op0, Op1); Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
Value *Sub = Builder->CreateSub(Op0, Op1); Value *Sub = Builder.CreateSub(Op0, Op1);
return SelectInst::Create(Cmp, Op0, Sub); return SelectInst::Create(Cmp, Op0, Sub);
} }

View File

@ -836,12 +836,12 @@ Instruction *InstCombiner::SliceUpIllegalIntegerPHI(PHINode &FirstPhi) {
} }
// Otherwise, do an extract in the predecessor. // Otherwise, do an extract in the predecessor.
Builder->SetInsertPoint(Pred->getTerminator()); Builder.SetInsertPoint(Pred->getTerminator());
Value *Res = InVal; Value *Res = InVal;
if (Offset) if (Offset)
Res = Builder->CreateLShr(Res, ConstantInt::get(InVal->getType(), Res = Builder.CreateLShr(Res, ConstantInt::get(InVal->getType(),
Offset), "extract"); Offset), "extract");
Res = Builder->CreateTrunc(Res, Ty, "extract.t"); Res = Builder.CreateTrunc(Res, Ty, "extract.t");
PredVal = Res; PredVal = Res;
EltPHI->addIncoming(Res, Pred); EltPHI->addIncoming(Res, Pred);

View File

@ -61,12 +61,12 @@ static CmpInst::Predicate getCmpPredicateForMinMax(SelectPatternFlavor SPF,
} }
} }
static Value *generateMinMaxSelectPattern(InstCombiner::BuilderTy *Builder, static Value *generateMinMaxSelectPattern(InstCombiner::BuilderTy &Builder,
SelectPatternFlavor SPF, Value *A, SelectPatternFlavor SPF, Value *A,
Value *B) { Value *B) {
CmpInst::Predicate Pred = getCmpPredicateForMinMax(SPF); CmpInst::Predicate Pred = getCmpPredicateForMinMax(SPF);
assert(CmpInst::isIntPredicate(Pred)); assert(CmpInst::isIntPredicate(Pred));
return Builder->CreateSelect(Builder->CreateICmp(Pred, A, B), A, B); return Builder.CreateSelect(Builder.CreateICmp(Pred, A, B), A, B);
} }
/// We want to turn code that looks like this: /// We want to turn code that looks like this:
@ -167,8 +167,8 @@ Instruction *InstCombiner::foldSelectOpOp(SelectInst &SI, Instruction *TI,
// Fold this by inserting a select from the input values. // Fold this by inserting a select from the input values.
Value *NewSI = Value *NewSI =
Builder->CreateSelect(SI.getCondition(), TI->getOperand(0), Builder.CreateSelect(SI.getCondition(), TI->getOperand(0),
FI->getOperand(0), SI.getName() + ".v", &SI); FI->getOperand(0), SI.getName() + ".v", &SI);
return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI, return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
TI->getType()); TI->getType());
} }
@ -211,8 +211,8 @@ Instruction *InstCombiner::foldSelectOpOp(SelectInst &SI, Instruction *TI,
} }
// If we reach here, they do have operations in common. // If we reach here, they do have operations in common.
Value *NewSI = Builder->CreateSelect(SI.getCondition(), OtherOpT, OtherOpF, Value *NewSI = Builder.CreateSelect(SI.getCondition(), OtherOpT, OtherOpF,
SI.getName() + ".v", &SI); SI.getName() + ".v", &SI);
Value *Op0 = MatchIsOpZero ? MatchOp : NewSI; Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
Value *Op1 = MatchIsOpZero ? NewSI : MatchOp; Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
return BinaryOperator::Create(BO->getOpcode(), Op0, Op1); return BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
@ -254,7 +254,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
// Avoid creating select between 2 constants unless it's selecting // Avoid creating select between 2 constants unless it's selecting
// between 0, 1 and -1. // between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) { if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
Value *NewSel = Builder->CreateSelect(SI.getCondition(), OOp, C); Value *NewSel = Builder.CreateSelect(SI.getCondition(), OOp, C);
NewSel->takeName(TVI); NewSel->takeName(TVI);
BinaryOperator *TVI_BO = cast<BinaryOperator>(TVI); BinaryOperator *TVI_BO = cast<BinaryOperator>(TVI);
BinaryOperator *BO = BinaryOperator::Create(TVI_BO->getOpcode(), BinaryOperator *BO = BinaryOperator::Create(TVI_BO->getOpcode(),
@ -284,7 +284,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
// Avoid creating select between 2 constants unless it's selecting // Avoid creating select between 2 constants unless it's selecting
// between 0, 1 and -1. // between 0, 1 and -1.
if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) { if (!isa<Constant>(OOp) || isSelect01(C, cast<Constant>(OOp))) {
Value *NewSel = Builder->CreateSelect(SI.getCondition(), C, OOp); Value *NewSel = Builder.CreateSelect(SI.getCondition(), C, OOp);
NewSel->takeName(FVI); NewSel->takeName(FVI);
BinaryOperator *FVI_BO = cast<BinaryOperator>(FVI); BinaryOperator *FVI_BO = cast<BinaryOperator>(FVI);
BinaryOperator *BO = BinaryOperator::Create(FVI_BO->getOpcode(), BinaryOperator *BO = BinaryOperator::Create(FVI_BO->getOpcode(),
@ -315,7 +315,7 @@ Instruction *InstCombiner::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
/// 3. The magnitude of C2 and C1 are flipped /// 3. The magnitude of C2 and C1 are flipped
static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal, static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
Value *FalseVal, Value *FalseVal,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition()); const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !SI.getType()->isIntegerTy()) if (!IC || !SI.getType()->isIntegerTy())
return nullptr; return nullptr;
@ -383,22 +383,22 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
if (NeedAnd) { if (NeedAnd) {
// Insert the AND instruction on the input to the truncate. // Insert the AND instruction on the input to the truncate.
APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log); APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
V = Builder->CreateAnd(V, ConstantInt::get(V->getType(), C1)); V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
} }
if (C2Log > C1Log) { if (C2Log > C1Log) {
V = Builder->CreateZExtOrTrunc(V, Y->getType()); V = Builder.CreateZExtOrTrunc(V, Y->getType());
V = Builder->CreateShl(V, C2Log - C1Log); V = Builder.CreateShl(V, C2Log - C1Log);
} else if (C1Log > C2Log) { } else if (C1Log > C2Log) {
V = Builder->CreateLShr(V, C1Log - C2Log); V = Builder.CreateLShr(V, C1Log - C2Log);
V = Builder->CreateZExtOrTrunc(V, Y->getType()); V = Builder.CreateZExtOrTrunc(V, Y->getType());
} else } else
V = Builder->CreateZExtOrTrunc(V, Y->getType()); V = Builder.CreateZExtOrTrunc(V, Y->getType());
if (NeedXor) if (NeedXor)
V = Builder->CreateXor(V, *C2); V = Builder.CreateXor(V, *C2);
return Builder->CreateOr(V, Y); return Builder.CreateOr(V, Y);
} }
/// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single /// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
@ -414,7 +414,7 @@ static Value *foldSelectICmpAndOr(const SelectInst &SI, Value *TrueVal,
/// into: /// into:
/// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false) /// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal, static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
ICmpInst::Predicate Pred = ICI->getPredicate(); ICmpInst::Predicate Pred = ICI->getPredicate();
Value *CmpLHS = ICI->getOperand(0); Value *CmpLHS = ICI->getOperand(0);
Value *CmpRHS = ICI->getOperand(1); Value *CmpRHS = ICI->getOperand(1);
@ -449,8 +449,8 @@ static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
IntrinsicInst *NewI = cast<IntrinsicInst>(II->clone()); IntrinsicInst *NewI = cast<IntrinsicInst>(II->clone());
Type *Ty = NewI->getArgOperand(1)->getType(); Type *Ty = NewI->getArgOperand(1)->getType();
NewI->setArgOperand(1, Constant::getNullValue(Ty)); NewI->setArgOperand(1, Constant::getNullValue(Ty));
Builder->Insert(NewI); Builder.Insert(NewI);
return Builder->CreateZExtOrTrunc(NewI, ValueOnZero->getType()); return Builder.CreateZExtOrTrunc(NewI, ValueOnZero->getType());
} }
return nullptr; return nullptr;
@ -597,7 +597,7 @@ canonicalizeMinMaxWithConstant(SelectInst &Sel, ICmpInst &Cmp,
/// Visit a SelectInst that has an ICmpInst as its first operand. /// Visit a SelectInst that has an ICmpInst as its first operand.
Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI, Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
ICmpInst *ICI) { ICmpInst *ICI) {
if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, *Builder)) if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, Builder))
return NewSel; return NewSel;
bool Changed = adjustMinMax(SI, *ICI); bool Changed = adjustMinMax(SI, *ICI);
@ -626,14 +626,14 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
} }
if (C1 && C2) { if (C1 && C2) {
// This shift results in either -1 or 0. // This shift results in either -1 or 0.
Value *AShr = Builder->CreateAShr(CmpLHS, Ty->getBitWidth()-1); Value *AShr = Builder.CreateAShr(CmpLHS, Ty->getBitWidth() - 1);
// Check if we can express the operation with a single or. // Check if we can express the operation with a single or.
if (C2->isMinusOne()) if (C2->isMinusOne())
return replaceInstUsesWith(SI, Builder->CreateOr(AShr, C1)); return replaceInstUsesWith(SI, Builder.CreateOr(AShr, C1));
Value *And = Builder->CreateAnd(AShr, C2->getValue()-C1->getValue()); Value *And = Builder.CreateAnd(AShr, C2->getValue() - C1->getValue());
return replaceInstUsesWith(SI, Builder->CreateAdd(And, C1)); return replaceInstUsesWith(SI, Builder.CreateAdd(And, C1));
} }
} }
} }
@ -684,19 +684,19 @@ Instruction *InstCombiner::foldSelectInstWithICmp(SelectInst &SI,
// (X & Y) == 0 ? X : X ^ Y --> X & ~Y // (X & Y) == 0 ? X : X ^ Y --> X & ~Y
if (TrueWhenUnset && TrueVal == X && if (TrueWhenUnset && TrueVal == X &&
match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C) match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
V = Builder->CreateAnd(X, ~(*Y)); V = Builder.CreateAnd(X, ~(*Y));
// (X & Y) != 0 ? X ^ Y : X --> X & ~Y // (X & Y) != 0 ? X ^ Y : X --> X & ~Y
else if (!TrueWhenUnset && FalseVal == X && else if (!TrueWhenUnset && FalseVal == X &&
match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C) match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
V = Builder->CreateAnd(X, ~(*Y)); V = Builder.CreateAnd(X, ~(*Y));
// (X & Y) == 0 ? X ^ Y : X --> X | Y // (X & Y) == 0 ? X ^ Y : X --> X | Y
else if (TrueWhenUnset && FalseVal == X && else if (TrueWhenUnset && FalseVal == X &&
match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C) match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
V = Builder->CreateOr(X, *Y); V = Builder.CreateOr(X, *Y);
// (X & Y) != 0 ? X : X ^ Y --> X | Y // (X & Y) != 0 ? X : X ^ Y --> X | Y
else if (!TrueWhenUnset && TrueVal == X && else if (!TrueWhenUnset && TrueVal == X &&
match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C) match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
V = Builder->CreateOr(X, *Y); V = Builder.CreateOr(X, *Y);
if (V) if (V)
return replaceInstUsesWith(SI, V); return replaceInstUsesWith(SI, V);
@ -809,8 +809,8 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
(SPF1 == SPF_NABS && SPF2 == SPF_ABS)) { (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
SelectInst *SI = cast<SelectInst>(Inner); SelectInst *SI = cast<SelectInst>(Inner);
Value *NewSI = Value *NewSI =
Builder->CreateSelect(SI->getCondition(), SI->getFalseValue(), Builder.CreateSelect(SI->getCondition(), SI->getFalseValue(),
SI->getTrueValue(), SI->getName(), SI); SI->getTrueValue(), SI->getName(), SI);
return replaceInstUsesWith(Outer, NewSI); return replaceInstUsesWith(Outer, NewSI);
} }
@ -848,15 +848,15 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
IsFreeOrProfitableToInvert(B, NotB, ElidesXor) && IsFreeOrProfitableToInvert(B, NotB, ElidesXor) &&
IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) { IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) {
if (!NotA) if (!NotA)
NotA = Builder->CreateNot(A); NotA = Builder.CreateNot(A);
if (!NotB) if (!NotB)
NotB = Builder->CreateNot(B); NotB = Builder.CreateNot(B);
if (!NotC) if (!NotC)
NotC = Builder->CreateNot(C); NotC = Builder.CreateNot(C);
Value *NewInner = generateMinMaxSelectPattern( Value *NewInner = generateMinMaxSelectPattern(
Builder, getInverseMinMaxSelectPattern(SPF1), NotA, NotB); Builder, getInverseMinMaxSelectPattern(SPF1), NotA, NotB);
Value *NewOuter = Builder->CreateNot(generateMinMaxSelectPattern( Value *NewOuter = Builder.CreateNot(generateMinMaxSelectPattern(
Builder, getInverseMinMaxSelectPattern(SPF2), NewInner, NotC)); Builder, getInverseMinMaxSelectPattern(SPF2), NewInner, NotC));
return replaceInstUsesWith(Outer, NewOuter); return replaceInstUsesWith(Outer, NewOuter);
} }
@ -870,7 +870,7 @@ Instruction *InstCombiner::foldSPFofSPF(Instruction *Inner,
/// 'and'. /// 'and'.
static Value *foldSelectICmpAnd(const SelectInst &SI, APInt TrueVal, static Value *foldSelectICmpAnd(const SelectInst &SI, APInt TrueVal,
APInt FalseVal, APInt FalseVal,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition()); const ICmpInst *IC = dyn_cast<ICmpInst>(SI.getCondition());
if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy()) if (!IC || !IC->isEquality() || !SI.getType()->isIntegerTy())
return nullptr; return nullptr;
@ -917,22 +917,22 @@ static Value *foldSelectICmpAnd(const SelectInst &SI, APInt TrueVal,
if (AndZeros >= ValC.getBitWidth()) if (AndZeros >= ValC.getBitWidth())
return nullptr; return nullptr;
Value *V = Builder->CreateZExtOrTrunc(LHS, SI.getType()); Value *V = Builder.CreateZExtOrTrunc(LHS, SI.getType());
if (ValZeros > AndZeros) if (ValZeros > AndZeros)
V = Builder->CreateShl(V, ValZeros - AndZeros); V = Builder.CreateShl(V, ValZeros - AndZeros);
else if (ValZeros < AndZeros) else if (ValZeros < AndZeros)
V = Builder->CreateLShr(V, AndZeros - ValZeros); V = Builder.CreateLShr(V, AndZeros - ValZeros);
// Okay, now we know that everything is set up, we just don't know whether we // Okay, now we know that everything is set up, we just don't know whether we
// have a icmp_ne or icmp_eq and whether the true or false val is the zero. // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
bool ShouldNotVal = !TrueVal.isNullValue(); bool ShouldNotVal = !TrueVal.isNullValue();
ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE; ShouldNotVal ^= IC->getPredicate() == ICmpInst::ICMP_NE;
if (ShouldNotVal) if (ShouldNotVal)
V = Builder->CreateXor(V, ValC); V = Builder.CreateXor(V, ValC);
// Apply an offset if needed. // Apply an offset if needed.
if (!Offset.isNullValue()) if (!Offset.isNullValue())
V = Builder->CreateAdd(V, ConstantInt::get(V->getType(), Offset)); V = Builder.CreateAdd(V, ConstantInt::get(V->getType(), Offset));
return V; return V;
} }
@ -1042,7 +1042,7 @@ Instruction *InstCombiner::foldSelectExtConst(SelectInst &Sel) {
// select Cond, (ext X), C --> ext(select Cond, X, C') // select Cond, (ext X), C --> ext(select Cond, X, C')
// select Cond, C, (ext X) --> ext(select Cond, C', X) // select Cond, C, (ext X) --> ext(select Cond, C', X)
Value *NewSel = Builder->CreateSelect(Cond, X, TruncCVal, "narrow", &Sel); Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType); return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
} }
@ -1189,7 +1189,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
} }
if (match(TrueVal, m_Zero())) { if (match(TrueVal, m_Zero())) {
// Change: A = select B, false, C --> A = and !B, C // Change: A = select B, false, C --> A = and !B, C
Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName()); Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return BinaryOperator::CreateAnd(NotCond, FalseVal); return BinaryOperator::CreateAnd(NotCond, FalseVal);
} }
if (match(FalseVal, m_Zero())) { if (match(FalseVal, m_Zero())) {
@ -1198,7 +1198,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
} }
if (match(FalseVal, m_One())) { if (match(FalseVal, m_One())) {
// Change: A = select B, C, true --> A = or !B, C // Change: A = select B, C, true --> A = or !B, C
Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName()); Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return BinaryOperator::CreateOr(NotCond, TrueVal); return BinaryOperator::CreateOr(NotCond, TrueVal);
} }
@ -1234,13 +1234,13 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// select C, 0, 1 -> zext !C to int // select C, 0, 1 -> zext !C to int
if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) { if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName()); Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return new ZExtInst(NotCond, SelType); return new ZExtInst(NotCond, SelType);
} }
// select C, 0, -1 -> sext !C to int // select C, 0, -1 -> sext !C to int
if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) { if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
Value *NotCond = Builder->CreateNot(CondVal, "not." + CondVal->getName()); Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
return new SExtInst(NotCond, SelType); return new SExtInst(NotCond, SelType);
} }
} }
@ -1286,10 +1286,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// (X ugt Y) ? X : Y -> (X ole Y) ? Y : X // (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) { if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
FCmpInst::Predicate InvPred = FCI->getInversePredicate(); FCmpInst::Predicate InvPred = FCI->getInversePredicate();
IRBuilder<>::FastMathFlagGuard FMFG(*Builder); IRBuilder<>::FastMathFlagGuard FMFG(Builder);
Builder->setFastMathFlags(FCI->getFastMathFlags()); Builder.setFastMathFlags(FCI->getFastMathFlags());
Value *NewCond = Builder->CreateFCmp(InvPred, TrueVal, FalseVal, Value *NewCond = Builder.CreateFCmp(InvPred, TrueVal, FalseVal,
FCI->getName() + ".inv"); FCI->getName() + ".inv");
return SelectInst::Create(NewCond, FalseVal, TrueVal, return SelectInst::Create(NewCond, FalseVal, TrueVal,
SI.getName() + ".p"); SI.getName() + ".p");
@ -1329,10 +1329,10 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// (X ugt Y) ? X : Y -> (X ole Y) ? X : Y // (X ugt Y) ? X : Y -> (X ole Y) ? X : Y
if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) { if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
FCmpInst::Predicate InvPred = FCI->getInversePredicate(); FCmpInst::Predicate InvPred = FCI->getInversePredicate();
IRBuilder<>::FastMathFlagGuard FMFG(*Builder); IRBuilder<>::FastMathFlagGuard FMFG(Builder);
Builder->setFastMathFlags(FCI->getFastMathFlags()); Builder.setFastMathFlags(FCI->getFastMathFlags());
Value *NewCond = Builder->CreateFCmp(InvPred, FalseVal, TrueVal, Value *NewCond = Builder.CreateFCmp(InvPred, FalseVal, TrueVal,
FCI->getName() + ".inv"); FCI->getName() + ".inv");
return SelectInst::Create(NewCond, FalseVal, TrueVal, return SelectInst::Create(NewCond, FalseVal, TrueVal,
SI.getName() + ".p"); SI.getName() + ".p");
@ -1348,7 +1348,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
if (Instruction *Result = foldSelectInstWithICmp(SI, ICI)) if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
return Result; return Result;
if (Instruction *Add = foldAddSubSelect(SI, *Builder)) if (Instruction *Add = foldAddSubSelect(SI, Builder))
return Add; return Add;
// Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z)) // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
@ -1379,16 +1379,16 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
Value *Cmp; Value *Cmp;
if (CmpInst::isIntPredicate(Pred)) { if (CmpInst::isIntPredicate(Pred)) {
Cmp = Builder->CreateICmp(Pred, LHS, RHS); Cmp = Builder.CreateICmp(Pred, LHS, RHS);
} else { } else {
IRBuilder<>::FastMathFlagGuard FMFG(*Builder); IRBuilder<>::FastMathFlagGuard FMFG(Builder);
auto FMF = cast<FPMathOperator>(SI.getCondition())->getFastMathFlags(); auto FMF = cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
Builder->setFastMathFlags(FMF); Builder.setFastMathFlags(FMF);
Cmp = Builder->CreateFCmp(Pred, LHS, RHS); Cmp = Builder.CreateFCmp(Pred, LHS, RHS);
} }
Value *NewSI = Builder->CreateCast( Value *NewSI = Builder.CreateCast(
CastOp, Builder->CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI), CastOp, Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI),
SelType); SelType);
return replaceInstUsesWith(SI, NewSI); return replaceInstUsesWith(SI, NewSI);
} }
@ -1423,13 +1423,12 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
(SI.hasOneUse() && match(*SI.user_begin(), m_Not(m_Value()))); (SI.hasOneUse() && match(*SI.user_begin(), m_Not(m_Value())));
if (NumberOfNots >= 2) { if (NumberOfNots >= 2) {
Value *NewLHS = Builder->CreateNot(LHS); Value *NewLHS = Builder.CreateNot(LHS);
Value *NewRHS = Builder->CreateNot(RHS); Value *NewRHS = Builder.CreateNot(RHS);
Value *NewCmp = SPF == SPF_SMAX Value *NewCmp = SPF == SPF_SMAX ? Builder.CreateICmpSLT(NewLHS, NewRHS)
? Builder->CreateICmpSLT(NewLHS, NewRHS) : Builder.CreateICmpULT(NewLHS, NewRHS);
: Builder->CreateICmpULT(NewLHS, NewRHS);
Value *NewSI = Value *NewSI =
Builder->CreateNot(Builder->CreateSelect(NewCmp, NewLHS, NewRHS)); Builder.CreateNot(Builder.CreateSelect(NewCmp, NewLHS, NewRHS));
return replaceInstUsesWith(SI, NewSI); return replaceInstUsesWith(SI, NewSI);
} }
} }
@ -1459,7 +1458,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
// We choose this as normal form to enable folding on the And and shortening // We choose this as normal form to enable folding on the And and shortening
// paths for the values (this helps GetUnderlyingObjects() for example). // paths for the values (this helps GetUnderlyingObjects() for example).
if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) { if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
Value *And = Builder->CreateAnd(CondVal, TrueSI->getCondition()); Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
SI.setOperand(0, And); SI.setOperand(0, And);
SI.setOperand(1, TrueSI->getTrueValue()); SI.setOperand(1, TrueSI->getTrueValue());
return &SI; return &SI;
@ -1477,7 +1476,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
} }
// select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b) // select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) { if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
Value *Or = Builder->CreateOr(CondVal, FalseSI->getCondition()); Value *Or = Builder.CreateOr(CondVal, FalseSI->getCondition());
SI.setOperand(0, Or); SI.setOperand(0, Or);
SI.setOperand(2, FalseSI->getFalseValue()); SI.setOperand(2, FalseSI->getFalseValue());
return &SI; return &SI;
@ -1539,7 +1538,7 @@ Instruction *InstCombiner::visitSelectInst(SelectInst &SI) {
return replaceInstUsesWith(SI, FalseVal); return replaceInstUsesWith(SI, FalseVal);
} }
if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, *Builder)) if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
return BitCastSel; return BitCastSel;
return nullptr; return nullptr;

View File

@ -47,7 +47,7 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) && if (isKnownNonNegative(A, DL, 0, &AC, &I, &DT) &&
isKnownNonNegative(C, DL, 0, &AC, &I, &DT)) isKnownNonNegative(C, DL, 0, &AC, &I, &DT))
return BinaryOperator::Create( return BinaryOperator::Create(
I.getOpcode(), Builder->CreateBinOp(I.getOpcode(), Op0, C), A); I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A);
// X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2. // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
// Because shifts by negative values (which could occur if A were negative) // Because shifts by negative values (which could occur if A were negative)
@ -56,8 +56,8 @@ Instruction *InstCombiner::commonShiftTransforms(BinaryOperator &I) {
if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) { if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
// FIXME: Should this get moved into SimplifyDemandedBits by saying we don't // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
// demand the sign bit (and many others) here?? // demand the sign bit (and many others) here??
Value *Rem = Builder->CreateAnd(A, ConstantInt::get(I.getType(), *B-1), Value *Rem = Builder.CreateAnd(A, ConstantInt::get(I.getType(), *B - 1),
Op1->getName()); Op1->getName());
I.setOperand(1, Rem); I.setOperand(1, Rem);
return &I; return &I;
} }
@ -260,9 +260,9 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
// We can always evaluate constants shifted. // We can always evaluate constants shifted.
if (Constant *C = dyn_cast<Constant>(V)) { if (Constant *C = dyn_cast<Constant>(V)) {
if (isLeftShift) if (isLeftShift)
V = IC.Builder->CreateShl(C, NumBits); V = IC.Builder.CreateShl(C, NumBits);
else else
V = IC.Builder->CreateLShr(C, NumBits); V = IC.Builder.CreateLShr(C, NumBits);
// If we got a constantexpr back, try to simplify it with TD info. // If we got a constantexpr back, try to simplify it with TD info.
if (auto *C = dyn_cast<Constant>(V)) if (auto *C = dyn_cast<Constant>(V))
if (auto *FoldedC = if (auto *FoldedC =
@ -289,7 +289,7 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
case Instruction::Shl: case Instruction::Shl:
case Instruction::LShr: case Instruction::LShr:
return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift, return foldShiftedShift(cast<BinaryOperator>(I), NumBits, isLeftShift,
*(IC.Builder)); IC.Builder);
case Instruction::Select: case Instruction::Select:
I->setOperand( I->setOperand(
@ -353,7 +353,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
Constant *ShAmt = Constant *ShAmt =
ConstantExpr::getZExt(cast<Constant>(Op1), TrOp->getType()); ConstantExpr::getZExt(cast<Constant>(Op1), TrOp->getType());
// (shift2 (shift1 & 0x00FF), c2) // (shift2 (shift1 & 0x00FF), c2)
Value *NSh = Builder->CreateBinOp(I.getOpcode(), TrOp, ShAmt,I.getName()); Value *NSh = Builder.CreateBinOp(I.getOpcode(), TrOp, ShAmt, I.getName());
// For logical shifts, the truncation has the effect of making the high // For logical shifts, the truncation has the effect of making the high
// part of the register be zeros. Emulate this by inserting an AND to // part of the register be zeros. Emulate this by inserting an AND to
@ -375,9 +375,9 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
} }
// shift1 & 0x00FF // shift1 & 0x00FF
Value *And = Builder->CreateAnd(NSh, Value *And = Builder.CreateAnd(NSh,
ConstantInt::get(I.getContext(), MaskV), ConstantInt::get(I.getContext(), MaskV),
TI->getName()); TI->getName());
// Return the value truncated to the interesting size. // Return the value truncated to the interesting size.
return new TruncInst(And, I.getType()); return new TruncInst(And, I.getType());
@ -401,10 +401,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
match(Op0BO->getOperand(1), m_Shr(m_Value(V1), match(Op0BO->getOperand(1), m_Shr(m_Value(V1),
m_Specific(Op1)))) { m_Specific(Op1)))) {
Value *YS = // (Y << C) Value *YS = // (Y << C)
Builder->CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName()); Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
// (X + (Y << C)) // (X + (Y << C))
Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), YS, V1, Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), YS, V1,
Op0BO->getOperand(1)->getName()); Op0BO->getOperand(1)->getName());
unsigned Op1Val = Op1C->getLimitedValue(TypeBits); unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val); APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
@ -421,11 +421,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))), m_And(m_OneUse(m_Shr(m_Value(V1), m_Specific(Op1))),
m_ConstantInt(CC)))) { m_ConstantInt(CC)))) {
Value *YS = // (Y << C) Value *YS = // (Y << C)
Builder->CreateShl(Op0BO->getOperand(0), Op1, Builder.CreateShl(Op0BO->getOperand(0), Op1, Op0BO->getName());
Op0BO->getName());
// X & (CC << C) // X & (CC << C)
Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1), Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
V1->getName()+".mask"); V1->getName()+".mask");
return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM); return BinaryOperator::Create(Op0BO->getOpcode(), YS, XM);
} }
LLVM_FALLTHROUGH; LLVM_FALLTHROUGH;
@ -437,10 +436,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
match(Op0BO->getOperand(0), m_Shr(m_Value(V1), match(Op0BO->getOperand(0), m_Shr(m_Value(V1),
m_Specific(Op1)))) { m_Specific(Op1)))) {
Value *YS = // (Y << C) Value *YS = // (Y << C)
Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName()); Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
// (X + (Y << C)) // (X + (Y << C))
Value *X = Builder->CreateBinOp(Op0BO->getOpcode(), V1, YS, Value *X = Builder.CreateBinOp(Op0BO->getOpcode(), V1, YS,
Op0BO->getOperand(0)->getName()); Op0BO->getOperand(0)->getName());
unsigned Op1Val = Op1C->getLimitedValue(TypeBits); unsigned Op1Val = Op1C->getLimitedValue(TypeBits);
APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val); APInt Bits = APInt::getHighBitsSet(TypeBits, TypeBits - Op1Val);
@ -456,10 +455,10 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))), m_And(m_OneUse(m_Shr(m_Value(V1), m_Value(V2))),
m_ConstantInt(CC))) && V2 == Op1) { m_ConstantInt(CC))) && V2 == Op1) {
Value *YS = // (Y << C) Value *YS = // (Y << C)
Builder->CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName()); Builder.CreateShl(Op0BO->getOperand(1), Op1, Op0BO->getName());
// X & (CC << C) // X & (CC << C)
Value *XM = Builder->CreateAnd(V1, ConstantExpr::getShl(CC, Op1), Value *XM = Builder.CreateAnd(V1, ConstantExpr::getShl(CC, Op1),
V1->getName()+".mask"); V1->getName()+".mask");
return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS); return BinaryOperator::Create(Op0BO->getOpcode(), XM, YS);
} }
@ -502,7 +501,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1,
Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1); Constant *NewRHS = ConstantExpr::get(I.getOpcode(), Op0C, Op1);
Value *NewShift = Value *NewShift =
Builder->CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1); Builder.CreateBinOp(I.getOpcode(), Op0BO->getOperand(0), Op1);
NewShift->takeName(Op0BO); NewShift->takeName(Op0BO);
return BinaryOperator::Create(Op0BO->getOpcode(), NewShift, return BinaryOperator::Create(Op0BO->getOpcode(), NewShift,
@ -541,7 +540,7 @@ Instruction *InstCombiner::visitShl(BinaryOperator &I) {
unsigned SrcWidth = X->getType()->getScalarSizeInBits(); unsigned SrcWidth = X->getType()->getScalarSizeInBits();
if (ShAmt < SrcWidth && if (ShAmt < SrcWidth &&
MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I)) MaskedValueIsZero(X, APInt::getHighBitsSet(SrcWidth, ShAmt), 0, &I))
return new ZExtInst(Builder->CreateShl(X, ShAmt), Ty); return new ZExtInst(Builder.CreateShl(X, ShAmt), Ty);
} }
// (X >>u C) << C --> X & (-1 << C) // (X >>u C) << C --> X & (-1 << C)
@ -641,7 +640,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
// ctpop.i32(x)>>5 --> zext(x == -1) // ctpop.i32(x)>>5 --> zext(x == -1)
bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop; bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0); Constant *RHS = ConstantInt::getSigned(Ty, IsPop ? -1 : 0);
Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS); Value *Cmp = Builder.CreateICmpEQ(II->getArgOperand(0), RHS);
return new ZExtInst(Cmp, Ty); return new ZExtInst(Cmp, Ty);
} }
@ -658,7 +657,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
return NewLShr; return NewLShr;
} }
// (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2) // (X << C1) >>u C2 --> (X >>u (C2 - C1)) & (-1 >> C2)
Value *NewLShr = Builder->CreateLShr(X, ShiftDiff, "", I.isExact()); Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt)); APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask)); return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
} }
@ -671,7 +670,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
return NewShl; return NewShl;
} }
// (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2) // (X << C1) >>u C2 --> X << (C1 - C2) & (-1 >> C2)
Value *NewShl = Builder->CreateShl(X, ShiftDiff); Value *NewShl = Builder.CreateShl(X, ShiftDiff);
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt)); APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt));
return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask)); return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
} }
@ -692,7 +691,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
// lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN // lshr (sext iM X to iN), N-1 --> zext (lshr X, M-1) to iN
if (Op0->hasOneUse()) { if (Op0->hasOneUse()) {
Value *NewLShr = Builder->CreateLShr(X, SrcTyBitWidth - 1); Value *NewLShr = Builder.CreateLShr(X, SrcTyBitWidth - 1);
return new ZExtInst(NewLShr, Ty); return new ZExtInst(NewLShr, Ty);
} }
} }
@ -701,7 +700,7 @@ Instruction *InstCombiner::visitLShr(BinaryOperator &I) {
if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) { if (ShAmt == BitWidth - SrcTyBitWidth && Op0->hasOneUse()) {
// The new shift amount can't be more than the narrow source type. // The new shift amount can't be more than the narrow source type.
unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1); unsigned NewShAmt = std::min(ShAmt, SrcTyBitWidth - 1);
Value *AShr = Builder->CreateAShr(X, NewShAmt); Value *AShr = Builder.CreateAShr(X, NewShAmt);
return new ZExtInst(AShr, Ty); return new ZExtInst(AShr, Ty);
} }
} }

View File

@ -1627,10 +1627,10 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I) for (unsigned I = 0, E = II->getNumArgOperands(); I != E; ++I)
Args.push_back(II->getArgOperand(I)); Args.push_back(II->getArgOperand(I));
IRBuilderBase::InsertPointGuard Guard(*Builder); IRBuilderBase::InsertPointGuard Guard(Builder);
Builder->SetInsertPoint(II); Builder.SetInsertPoint(II);
CallInst *NewCall = Builder->CreateCall(NewIntrin, Args); CallInst *NewCall = Builder.CreateCall(NewIntrin, Args);
NewCall->takeName(II); NewCall->takeName(II);
NewCall->copyMetadata(*II); NewCall->copyMetadata(*II);
@ -1657,15 +1657,15 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
if (NewNumElts == 1) { if (NewNumElts == 1) {
return Builder->CreateInsertElement(UndefValue::get(V->getType()), return Builder.CreateInsertElement(UndefValue::get(V->getType()),
NewCall, static_cast<uint64_t>(0)); NewCall, static_cast<uint64_t>(0));
} }
SmallVector<uint32_t, 8> EltMask; SmallVector<uint32_t, 8> EltMask;
for (unsigned I = 0; I < VWidth; ++I) for (unsigned I = 0; I < VWidth; ++I)
EltMask.push_back(I); EltMask.push_back(I);
Value *Shuffle = Builder->CreateShuffleVector( Value *Shuffle = Builder.CreateShuffleVector(
NewCall, UndefValue::get(NewTy), EltMask); NewCall, UndefValue::get(NewTy), EltMask);
MadeChange = true; MadeChange = true;

View File

@ -204,11 +204,11 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
if (I->hasOneUse() && if (I->hasOneUse() &&
cheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) { cheapToScalarize(BO, isa<ConstantInt>(EI.getOperand(1)))) {
Value *newEI0 = Value *newEI0 =
Builder->CreateExtractElement(BO->getOperand(0), EI.getOperand(1), Builder.CreateExtractElement(BO->getOperand(0), EI.getOperand(1),
EI.getName()+".lhs"); EI.getName()+".lhs");
Value *newEI1 = Value *newEI1 =
Builder->CreateExtractElement(BO->getOperand(1), EI.getOperand(1), Builder.CreateExtractElement(BO->getOperand(1), EI.getOperand(1),
EI.getName()+".rhs"); EI.getName()+".rhs");
return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(), return BinaryOperator::CreateWithCopiedFlags(BO->getOpcode(),
newEI0, newEI1, BO); newEI0, newEI1, BO);
} }
@ -250,8 +250,8 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
// Bitcasts can change the number of vector elements, and they cost // Bitcasts can change the number of vector elements, and they cost
// nothing. // nothing.
if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) { if (CI->hasOneUse() && (CI->getOpcode() != Instruction::BitCast)) {
Value *EE = Builder->CreateExtractElement(CI->getOperand(0), Value *EE = Builder.CreateExtractElement(CI->getOperand(0),
EI.getIndexOperand()); EI.getIndexOperand());
Worklist.AddValue(EE); Worklist.AddValue(EE);
return CastInst::Create(CI->getOpcode(), EE, EI.getType()); return CastInst::Create(CI->getOpcode(), EE, EI.getType());
} }
@ -269,20 +269,20 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
Value *Cond = SI->getCondition(); Value *Cond = SI->getCondition();
if (Cond->getType()->isVectorTy()) { if (Cond->getType()->isVectorTy()) {
Cond = Builder->CreateExtractElement(Cond, Cond = Builder.CreateExtractElement(Cond,
EI.getIndexOperand(), EI.getIndexOperand(),
Cond->getName() + ".elt"); Cond->getName() + ".elt");
} }
Value *V1Elem Value *V1Elem
= Builder->CreateExtractElement(TrueVal, = Builder.CreateExtractElement(TrueVal,
EI.getIndexOperand(), EI.getIndexOperand(),
TrueVal->getName() + ".elt"); TrueVal->getName() + ".elt");
Value *V2Elem Value *V2Elem
= Builder->CreateExtractElement(FalseVal, = Builder.CreateExtractElement(FalseVal,
EI.getIndexOperand(), EI.getIndexOperand(),
FalseVal->getName() + ".elt"); FalseVal->getName() + ".elt");
return SelectInst::Create(Cond, return SelectInst::Create(Cond,
V1Elem, V1Elem,
V2Elem, V2Elem,
@ -837,7 +837,7 @@ Instruction *InstCombiner::visitInsertElementInst(InsertElementInst &IE) {
if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE)) if (Instruction *Shuf = foldConstantInsEltIntoShuffle(IE))
return Shuf; return Shuf;
if (Instruction *NewInsElt = hoistInsEltConst(IE, *Builder)) if (Instruction *NewInsElt = hoistInsEltConst(IE, Builder))
return NewInsElt; return NewInsElt;
// Turn a sequence of inserts that broadcasts a scalar into a single // Turn a sequence of inserts that broadcasts a scalar into a single
@ -1020,9 +1020,9 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
SmallVector<Constant *, 16> MaskValues; SmallVector<Constant *, 16> MaskValues;
for (int i = 0, e = Mask.size(); i != e; ++i) { for (int i = 0, e = Mask.size(); i != e; ++i) {
if (Mask[i] == -1) if (Mask[i] == -1)
MaskValues.push_back(UndefValue::get(Builder->getInt32Ty())); MaskValues.push_back(UndefValue::get(Builder.getInt32Ty()));
else else
MaskValues.push_back(Builder->getInt32(Mask[i])); MaskValues.push_back(Builder.getInt32(Mask[i]));
} }
return ConstantExpr::getShuffleVector(C, UndefValue::get(C->getType()), return ConstantExpr::getShuffleVector(C, UndefValue::get(C->getType()),
ConstantVector::get(MaskValues)); ConstantVector::get(MaskValues));
@ -1095,7 +1095,7 @@ InstCombiner::EvaluateInDifferentElementOrder(Value *V, ArrayRef<int> Mask) {
Value *V = EvaluateInDifferentElementOrder(I->getOperand(0), Mask); Value *V = EvaluateInDifferentElementOrder(I->getOperand(0), Mask);
return InsertElementInst::Create(V, I->getOperand(1), return InsertElementInst::Create(V, I->getOperand(1),
Builder->getInt32(Index), "", I); Builder.getInt32(Index), "", I);
} }
} }
llvm_unreachable("failed to reorder elements of vector instruction!"); llvm_unreachable("failed to reorder elements of vector instruction!");
@ -1275,9 +1275,9 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
UndefValue::get(Int32Ty)); UndefValue::get(Int32Ty));
for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I) for (unsigned I = 0, E = MaskElems, Idx = BegIdx; I != E; ++Idx, ++I)
ShuffleMask[I] = ConstantInt::get(Int32Ty, Idx); ShuffleMask[I] = ConstantInt::get(Int32Ty, Idx);
V = Builder->CreateShuffleVector(V, UndefValue::get(V->getType()), V = Builder.CreateShuffleVector(V, UndefValue::get(V->getType()),
ConstantVector::get(ShuffleMask), ConstantVector::get(ShuffleMask),
SVI.getName() + ".extract"); SVI.getName() + ".extract");
BegIdx = 0; BegIdx = 0;
} }
unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth; unsigned SrcElemsPerTgtElem = TgtElemBitWidth / SrcElemBitWidth;
@ -1287,10 +1287,10 @@ Instruction *InstCombiner::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
auto *NewBC = auto *NewBC =
BCAlreadyExists BCAlreadyExists
? NewBCs[CastSrcTy] ? NewBCs[CastSrcTy]
: Builder->CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc"); : Builder.CreateBitCast(V, CastSrcTy, SVI.getName() + ".bc");
if (!BCAlreadyExists) if (!BCAlreadyExists)
NewBCs[CastSrcTy] = NewBC; NewBCs[CastSrcTy] = NewBC;
auto *Ext = Builder->CreateExtractElement( auto *Ext = Builder.CreateExtractElement(
NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract"); NewBC, ConstantInt::get(Int32Ty, BegIdx), SVI.getName() + ".extract");
// The shufflevector isn't being replaced: the bitcast that used it // The shufflevector isn't being replaced: the bitcast that used it
// is. InstCombine will visit the newly-created instructions. // is. InstCombine will visit the newly-created instructions.

View File

@ -88,7 +88,7 @@ MaxArraySize("instcombine-maxarray-size", cl::init(1024),
cl::desc("Maximum array size considered when doing a combine")); cl::desc("Maximum array size considered when doing a combine"));
Value *InstCombiner::EmitGEPOffset(User *GEP) { Value *InstCombiner::EmitGEPOffset(User *GEP) {
return llvm::EmitGEPOffset(Builder, DL, GEP); return llvm::EmitGEPOffset(&Builder, DL, GEP);
} }
/// Return true if it is desirable to convert an integer computation from a /// Return true if it is desirable to convert an integer computation from a
@ -524,9 +524,9 @@ Value *InstCombiner::tryFactorization(BinaryOperator &I,
// If "B op D" doesn't simplify then only go on if both of the existing // If "B op D" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used. // operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && LHS->hasOneUse() && RHS->hasOneUse()) if (!V && LHS->hasOneUse() && RHS->hasOneUse())
V = Builder->CreateBinOp(TopLevelOpcode, B, D, RHS->getName()); V = Builder.CreateBinOp(TopLevelOpcode, B, D, RHS->getName());
if (V) { if (V) {
SimplifiedInst = Builder->CreateBinOp(InnerOpcode, A, V); SimplifiedInst = Builder.CreateBinOp(InnerOpcode, A, V);
} }
} }
@ -544,9 +544,9 @@ Value *InstCombiner::tryFactorization(BinaryOperator &I,
// If "A op C" doesn't simplify then only go on if both of the existing // If "A op C" doesn't simplify then only go on if both of the existing
// operations "A op' B" and "C op' D" will be zapped as no longer used. // operations "A op' B" and "C op' D" will be zapped as no longer used.
if (!V && LHS->hasOneUse() && RHS->hasOneUse()) if (!V && LHS->hasOneUse() && RHS->hasOneUse())
V = Builder->CreateBinOp(TopLevelOpcode, A, C, LHS->getName()); V = Builder.CreateBinOp(TopLevelOpcode, A, C, LHS->getName());
if (V) { if (V) {
SimplifiedInst = Builder->CreateBinOp(InnerOpcode, V, B); SimplifiedInst = Builder.CreateBinOp(InnerOpcode, V, B);
} }
} }
@ -643,7 +643,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
SimplifyBinOp(TopLevelOpcode, B, C, SQ.getWithInstruction(&I))) { SimplifyBinOp(TopLevelOpcode, B, C, SQ.getWithInstruction(&I))) {
// They do! Return "L op' R". // They do! Return "L op' R".
++NumExpand; ++NumExpand;
C = Builder->CreateBinOp(InnerOpcode, L, R); C = Builder.CreateBinOp(InnerOpcode, L, R);
C->takeName(&I); C->takeName(&I);
return C; return C;
} }
@ -662,7 +662,7 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I))) { SimplifyBinOp(TopLevelOpcode, A, C, SQ.getWithInstruction(&I))) {
// They do! Return "L op' R". // They do! Return "L op' R".
++NumExpand; ++NumExpand;
A = Builder->CreateBinOp(InnerOpcode, L, R); A = Builder.CreateBinOp(InnerOpcode, L, R);
A->takeName(&I); A->takeName(&I);
return A; return A;
} }
@ -677,18 +677,18 @@ Value *InstCombiner::SimplifyUsingDistributiveLaws(BinaryOperator &I) {
if (Value *V = if (Value *V =
SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(), SimplifyBinOp(TopLevelOpcode, SI0->getFalseValue(),
SI1->getFalseValue(), SQ.getWithInstruction(&I))) SI1->getFalseValue(), SQ.getWithInstruction(&I)))
SI = Builder->CreateSelect(SI0->getCondition(), SI = Builder.CreateSelect(SI0->getCondition(),
Builder->CreateBinOp(TopLevelOpcode, Builder.CreateBinOp(TopLevelOpcode,
SI0->getTrueValue(), SI0->getTrueValue(),
SI1->getTrueValue()), SI1->getTrueValue()),
V); V);
if (Value *V = if (Value *V =
SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(), SimplifyBinOp(TopLevelOpcode, SI0->getTrueValue(),
SI1->getTrueValue(), SQ.getWithInstruction(&I))) SI1->getTrueValue(), SQ.getWithInstruction(&I)))
SI = Builder->CreateSelect( SI = Builder.CreateSelect(
SI0->getCondition(), V, SI0->getCondition(), V,
Builder->CreateBinOp(TopLevelOpcode, SI0->getFalseValue(), Builder.CreateBinOp(TopLevelOpcode, SI0->getFalseValue(),
SI1->getFalseValue())); SI1->getFalseValue()));
if (SI) { if (SI) {
SI->takeName(&I); SI->takeName(&I);
return SI; return SI;
@ -750,9 +750,9 @@ Value *InstCombiner::dyn_castFNegVal(Value *V, bool IgnoreZeroSign) const {
} }
static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO, static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
if (auto *Cast = dyn_cast<CastInst>(&I)) if (auto *Cast = dyn_cast<CastInst>(&I))
return Builder->CreateCast(Cast->getOpcode(), SO, I.getType()); return Builder.CreateCast(Cast->getOpcode(), SO, I.getType());
assert(I.isBinaryOp() && "Unexpected opcode for select folding"); assert(I.isBinaryOp() && "Unexpected opcode for select folding");
@ -771,8 +771,8 @@ static Value *foldOperationIntoSelectOperand(Instruction &I, Value *SO,
std::swap(Op0, Op1); std::swap(Op0, Op1);
auto *BO = cast<BinaryOperator>(&I); auto *BO = cast<BinaryOperator>(&I);
Value *RI = Builder->CreateBinOp(BO->getOpcode(), Op0, Op1, Value *RI = Builder.CreateBinOp(BO->getOpcode(), Op0, Op1,
SO->getName() + ".op"); SO->getName() + ".op");
auto *FPInst = dyn_cast<Instruction>(RI); auto *FPInst = dyn_cast<Instruction>(RI);
if (FPInst && isa<FPMathOperator>(FPInst)) if (FPInst && isa<FPMathOperator>(FPInst))
FPInst->copyFastMathFlags(BO); FPInst->copyFastMathFlags(BO);
@ -830,7 +830,7 @@ Instruction *InstCombiner::FoldOpIntoSelect(Instruction &Op, SelectInst *SI) {
} }
static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV, static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
InstCombiner::BuilderTy *Builder) { InstCombiner::BuilderTy &Builder) {
bool ConstIsRHS = isa<Constant>(I->getOperand(1)); bool ConstIsRHS = isa<Constant>(I->getOperand(1));
Constant *C = cast<Constant>(I->getOperand(ConstIsRHS)); Constant *C = cast<Constant>(I->getOperand(ConstIsRHS));
@ -844,7 +844,7 @@ static Value *foldOperationIntoPhiValue(BinaryOperator *I, Value *InV,
if (!ConstIsRHS) if (!ConstIsRHS)
std::swap(Op0, Op1); std::swap(Op0, Op1);
Value *RI = Builder->CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp"); Value *RI = Builder.CreateBinOp(I->getOpcode(), Op0, Op1, "phitmp");
auto *FPInst = dyn_cast<Instruction>(RI); auto *FPInst = dyn_cast<Instruction>(RI);
if (FPInst && isa<FPMathOperator>(FPInst)) if (FPInst && isa<FPMathOperator>(FPInst))
FPInst->copyFastMathFlags(I); FPInst->copyFastMathFlags(I);
@ -915,7 +915,7 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// If we are going to have to insert a new computation, do so right before the // If we are going to have to insert a new computation, do so right before the
// predecessor's terminator. // predecessor's terminator.
if (NonConstBB) if (NonConstBB)
Builder->SetInsertPoint(NonConstBB->getTerminator()); Builder.SetInsertPoint(NonConstBB->getTerminator());
// Next, add all of the operands to the PHI. // Next, add all of the operands to the PHI.
if (SelectInst *SI = dyn_cast<SelectInst>(&I)) { if (SelectInst *SI = dyn_cast<SelectInst>(&I)) {
@ -947,9 +947,9 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
// folded to TrueVInPred or FalseVInPred as done for ConstantInt. For // folded to TrueVInPred or FalseVInPred as done for ConstantInt. For
// non-vector phis, this transformation was always profitable because // non-vector phis, this transformation was always profitable because
// the select would be generated exactly once in the NonConstBB. // the select would be generated exactly once in the NonConstBB.
Builder->SetInsertPoint(ThisBB->getTerminator()); Builder.SetInsertPoint(ThisBB->getTerminator());
InV = Builder->CreateSelect(PN->getIncomingValue(i), InV = Builder.CreateSelect(PN->getIncomingValue(i), TrueVInPred,
TrueVInPred, FalseVInPred, "phitmp"); FalseVInPred, "phitmp");
} }
NewPN->addIncoming(InV, ThisBB); NewPN->addIncoming(InV, ThisBB);
} }
@ -960,11 +960,11 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C); InV = ConstantExpr::getCompare(CI->getPredicate(), InC, C);
else if (isa<ICmpInst>(CI)) else if (isa<ICmpInst>(CI))
InV = Builder->CreateICmp(CI->getPredicate(), PN->getIncomingValue(i), InV = Builder.CreateICmp(CI->getPredicate(), PN->getIncomingValue(i),
C, "phitmp"); C, "phitmp");
else else
InV = Builder->CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i), InV = Builder.CreateFCmp(CI->getPredicate(), PN->getIncomingValue(i),
C, "phitmp"); C, "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i)); NewPN->addIncoming(InV, PN->getIncomingBlock(i));
} }
} else if (auto *BO = dyn_cast<BinaryOperator>(&I)) { } else if (auto *BO = dyn_cast<BinaryOperator>(&I)) {
@ -981,8 +981,8 @@ Instruction *InstCombiner::foldOpIntoPhi(Instruction &I, PHINode *PN) {
if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i))) if (Constant *InC = dyn_cast<Constant>(PN->getIncomingValue(i)))
InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy); InV = ConstantExpr::getCast(CI->getOpcode(), InC, RetTy);
else else
InV = Builder->CreateCast(CI->getOpcode(), InV = Builder.CreateCast(CI->getOpcode(), PN->getIncomingValue(i),
PN->getIncomingValue(i), I.getType(), "phitmp"); I.getType(), "phitmp");
NewPN->addIncoming(InV, PN->getIncomingBlock(i)); NewPN->addIncoming(InV, PN->getIncomingBlock(i));
} }
} }
@ -1328,8 +1328,8 @@ Value *InstCombiner::Descale(Value *Val, APInt Scale, bool &NoSignedWrap) {
/// \brief Creates node of binary operation with the same attributes as the /// \brief Creates node of binary operation with the same attributes as the
/// specified one but with other operands. /// specified one but with other operands.
static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS, static Value *CreateBinOpAsGiven(BinaryOperator &Inst, Value *LHS, Value *RHS,
InstCombiner::BuilderTy *B) { InstCombiner::BuilderTy &B) {
Value *BO = B->CreateBinOp(Inst.getOpcode(), LHS, RHS); Value *BO = B.CreateBinOp(Inst.getOpcode(), LHS, RHS);
// If LHS and RHS are constant, BO won't be a binary operator. // If LHS and RHS are constant, BO won't be a binary operator.
if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO)) if (BinaryOperator *NewBO = dyn_cast<BinaryOperator>(BO))
NewBO->copyIRFlags(&Inst); NewBO->copyIRFlags(&Inst);
@ -1365,7 +1365,7 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) { LShuf->getOperand(0)->getType() == RShuf->getOperand(0)->getType()) {
Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0), Value *NewBO = CreateBinOpAsGiven(Inst, LShuf->getOperand(0),
RShuf->getOperand(0), Builder); RShuf->getOperand(0), Builder);
return Builder->CreateShuffleVector( return Builder.CreateShuffleVector(
NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask()); NewBO, UndefValue::get(NewBO->getType()), LShuf->getMask());
} }
@ -1404,7 +1404,7 @@ Value *InstCombiner::SimplifyVectorOp(BinaryOperator &Inst) {
Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0); Value *NewLHS = isa<Constant>(LHS) ? C2 : Shuffle->getOperand(0);
Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2; Value *NewRHS = isa<Constant>(LHS) ? Shuffle->getOperand(0) : C2;
Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder); Value *NewBO = CreateBinOpAsGiven(Inst, NewLHS, NewRHS, Builder);
return Builder->CreateShuffleVector(NewBO, return Builder.CreateShuffleVector(NewBO,
UndefValue::get(Inst.getType()), Shuffle->getMask()); UndefValue::get(Inst.getType()), Shuffle->getMask());
} }
} }
@ -1452,7 +1452,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If we are using a wider index than needed for this platform, shrink // If we are using a wider index than needed for this platform, shrink
// it to what we need. If narrower, sign-extend it to what we need. // it to what we need. If narrower, sign-extend it to what we need.
// This explicit cast can make subsequent optimizations more obvious. // This explicit cast can make subsequent optimizations more obvious.
*I = Builder->CreateIntCast(*I, NewIndexType, true); *I = Builder.CreateIntCast(*I, NewIndexType, true);
MadeChange = true; MadeChange = true;
} }
} }
@ -1546,10 +1546,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// set that index. // set that index.
PHINode *NewPN; PHINode *NewPN;
{ {
IRBuilderBase::InsertPointGuard Guard(*Builder); IRBuilderBase::InsertPointGuard Guard(Builder);
Builder->SetInsertPoint(PN); Builder.SetInsertPoint(PN);
NewPN = Builder->CreatePHI(Op1->getOperand(DI)->getType(), NewPN = Builder.CreatePHI(Op1->getOperand(DI)->getType(),
PN->getNumOperands()); PN->getNumOperands());
} }
for (auto &I : PN->operands()) for (auto &I : PN->operands())
@ -1669,8 +1669,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// pointer arithmetic. // pointer arithmetic.
if (match(V, m_Neg(m_PtrToInt(m_Value())))) { if (match(V, m_Neg(m_PtrToInt(m_Value())))) {
Operator *Index = cast<Operator>(V); Operator *Index = cast<Operator>(V);
Value *PtrToInt = Builder->CreatePtrToInt(PtrOp, Index->getType()); Value *PtrToInt = Builder.CreatePtrToInt(PtrOp, Index->getType());
Value *NewSub = Builder->CreateSub(PtrToInt, Index->getOperand(1)); Value *NewSub = Builder.CreateSub(PtrToInt, Index->getOperand(1));
return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType()); return CastInst::Create(Instruction::IntToPtr, NewSub, GEP.getType());
} }
// Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X)) // Canonicalize (gep i8* X, (ptrtoint Y)-(ptrtoint X))
@ -1723,7 +1723,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// -> // ->
// %0 = GEP i8 addrspace(1)* X, ... // %0 = GEP i8 addrspace(1)* X, ...
// addrspacecast i8 addrspace(1)* %0 to i8* // addrspacecast i8 addrspace(1)* %0 to i8*
return new AddrSpaceCastInst(Builder->Insert(Res), GEP.getType()); return new AddrSpaceCastInst(Builder.Insert(Res), GEP.getType());
} }
if (ArrayType *XATy = if (ArrayType *XATy =
@ -1751,10 +1751,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// addrspacecast i8 addrspace(1)* %0 to i8* // addrspacecast i8 addrspace(1)* %0 to i8*
SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end()); SmallVector<Value*, 8> Idx(GEP.idx_begin(), GEP.idx_end());
Value *NewGEP = GEP.isInBounds() Value *NewGEP = GEP.isInBounds()
? Builder->CreateInBoundsGEP( ? Builder.CreateInBoundsGEP(
nullptr, StrippedPtr, Idx, GEP.getName()) nullptr, StrippedPtr, Idx, GEP.getName())
: Builder->CreateGEP(nullptr, StrippedPtr, Idx, : Builder.CreateGEP(nullptr, StrippedPtr, Idx,
GEP.getName()); GEP.getName());
return new AddrSpaceCastInst(NewGEP, GEP.getType()); return new AddrSpaceCastInst(NewGEP, GEP.getType());
} }
} }
@ -1772,9 +1772,9 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) }; Value *Idx[2] = { Constant::getNullValue(IdxType), GEP.getOperand(1) };
Value *NewGEP = Value *NewGEP =
GEP.isInBounds() GEP.isInBounds()
? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, Idx, ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, Idx,
GEP.getName()) GEP.getName())
: Builder->CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName()); : Builder.CreateGEP(nullptr, StrippedPtr, Idx, GEP.getName());
// V and GEP are both pointer types --> BitCast // V and GEP are both pointer types --> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
@ -1807,10 +1807,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// GEP may not be "inbounds". // GEP may not be "inbounds".
Value *NewGEP = Value *NewGEP =
GEP.isInBounds() && NSW GEP.isInBounds() && NSW
? Builder->CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx, ? Builder.CreateInBoundsGEP(nullptr, StrippedPtr, NewIdx,
GEP.getName()) GEP.getName())
: Builder->CreateGEP(nullptr, StrippedPtr, NewIdx, : Builder.CreateGEP(nullptr, StrippedPtr, NewIdx,
GEP.getName()); GEP.getName());
// The NewGEP must be pointer typed, so must the old one -> BitCast // The NewGEP must be pointer typed, so must the old one -> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
@ -1849,10 +1849,10 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
NewIdx}; NewIdx};
Value *NewGEP = GEP.isInBounds() && NSW Value *NewGEP = GEP.isInBounds() && NSW
? Builder->CreateInBoundsGEP( ? Builder.CreateInBoundsGEP(
SrcElTy, StrippedPtr, Off, GEP.getName()) SrcElTy, StrippedPtr, Off, GEP.getName())
: Builder->CreateGEP(SrcElTy, StrippedPtr, Off, : Builder.CreateGEP(SrcElTy, StrippedPtr, Off,
GEP.getName()); GEP.getName());
// The NewGEP must be pointer typed, so must the old one -> BitCast // The NewGEP must be pointer typed, so must the old one -> BitCast
return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP, return CastInst::CreatePointerBitCastOrAddrSpaceCast(NewGEP,
GEP.getType()); GEP.getType());
@ -1916,8 +1916,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) { if (FindElementAtOffset(OpType, Offset.getSExtValue(), NewIndices)) {
Value *NGEP = Value *NGEP =
GEP.isInBounds() GEP.isInBounds()
? Builder->CreateInBoundsGEP(nullptr, Operand, NewIndices) ? Builder.CreateInBoundsGEP(nullptr, Operand, NewIndices)
: Builder->CreateGEP(nullptr, Operand, NewIndices); : Builder.CreateGEP(nullptr, Operand, NewIndices);
if (NGEP->getType() == GEP.getType()) if (NGEP->getType() == GEP.getType())
return replaceInstUsesWith(GEP, NGEP); return replaceInstUsesWith(GEP, NGEP);
@ -2166,8 +2166,8 @@ Instruction *InstCombiner::visitFree(CallInst &FI) {
// free undef -> unreachable. // free undef -> unreachable.
if (isa<UndefValue>(Op)) { if (isa<UndefValue>(Op)) {
// Insert a new store to null because we cannot modify the CFG here. // Insert a new store to null because we cannot modify the CFG here.
Builder->CreateStore(ConstantInt::getTrue(FI.getContext()), Builder.CreateStore(ConstantInt::getTrue(FI.getContext()),
UndefValue::get(Type::getInt1PtrTy(FI.getContext()))); UndefValue::get(Type::getInt1PtrTy(FI.getContext())));
return eraseInstFromFunction(FI); return eraseInstFromFunction(FI);
} }
@ -2281,8 +2281,8 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
// the backend should extend back to a legal type for the target. // the backend should extend back to a legal type for the target.
if (NewWidth > 0 && NewWidth < Known.getBitWidth()) { if (NewWidth > 0 && NewWidth < Known.getBitWidth()) {
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth); IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
Builder->SetInsertPoint(&SI); Builder.SetInsertPoint(&SI);
Value *NewCond = Builder->CreateTrunc(Cond, Ty, "trunc"); Value *NewCond = Builder.CreateTrunc(Cond, Ty, "trunc");
SI.setCondition(NewCond); SI.setCondition(NewCond);
for (auto Case : SI.cases()) { for (auto Case : SI.cases()) {
@ -2339,8 +2339,8 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// %E = insertvalue { i32 } %X, i32 42, 0 // %E = insertvalue { i32 } %X, i32 42, 0
// by switching the order of the insert and extract (though the // by switching the order of the insert and extract (though the
// insertvalue should be left in, since it may have other uses). // insertvalue should be left in, since it may have other uses).
Value *NewEV = Builder->CreateExtractValue(IV->getAggregateOperand(), Value *NewEV = Builder.CreateExtractValue(IV->getAggregateOperand(),
EV.getIndices()); EV.getIndices());
return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(), return InsertValueInst::Create(NewEV, IV->getInsertedValueOperand(),
makeArrayRef(insi, inse)); makeArrayRef(insi, inse));
} }
@ -2415,17 +2415,17 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) {
// extractvalue has integer indices, getelementptr has Value*s. Convert. // extractvalue has integer indices, getelementptr has Value*s. Convert.
SmallVector<Value*, 4> Indices; SmallVector<Value*, 4> Indices;
// Prefix an i32 0 since we need the first element. // Prefix an i32 0 since we need the first element.
Indices.push_back(Builder->getInt32(0)); Indices.push_back(Builder.getInt32(0));
for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end(); for (ExtractValueInst::idx_iterator I = EV.idx_begin(), E = EV.idx_end();
I != E; ++I) I != E; ++I)
Indices.push_back(Builder->getInt32(*I)); Indices.push_back(Builder.getInt32(*I));
// We need to insert these at the location of the old load, not at that of // We need to insert these at the location of the old load, not at that of
// the extractvalue. // the extractvalue.
Builder->SetInsertPoint(L); Builder.SetInsertPoint(L);
Value *GEP = Builder->CreateInBoundsGEP(L->getType(), Value *GEP = Builder.CreateInBoundsGEP(L->getType(),
L->getPointerOperand(), Indices); L->getPointerOperand(), Indices);
Instruction *NL = Builder->CreateLoad(GEP); Instruction *NL = Builder.CreateLoad(GEP);
// Whatever aliasing information we had for the orignal load must also // Whatever aliasing information we had for the orignal load must also
// hold for the smaller load, so propagate the annotations. // hold for the smaller load, so propagate the annotations.
AAMDNodes Nodes; AAMDNodes Nodes;
@ -2922,8 +2922,8 @@ bool InstCombiner::run() {
} }
// Now that we have an instruction, try combining it to simplify it. // Now that we have an instruction, try combining it to simplify it.
Builder->SetInsertPoint(I); Builder.SetInsertPoint(I);
Builder->SetCurrentDebugLocation(I->getDebugLoc()); Builder.SetCurrentDebugLocation(I->getDebugLoc());
#ifndef NDEBUG #ifndef NDEBUG
std::string OrigI; std::string OrigI;
@ -3160,7 +3160,7 @@ combineInstructionsOverFunction(Function &F, InstCombineWorklist &Worklist,
MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist); MadeIRChange |= prepareICWorklistFromFunction(F, DL, &TLI, Worklist);
InstCombiner IC(Worklist, &Builder, F.optForMinSize(), ExpensiveCombines, InstCombiner IC(Worklist, Builder, F.optForMinSize(), ExpensiveCombines,
AA, AC, TLI, DT, DL, LI); AA, AC, TLI, DT, DL, LI);
IC.MaxArraySizeForCombine = MaxArraySize; IC.MaxArraySizeForCombine = MaxArraySize;