mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-26 18:36:05 +00:00
[IR][PatternMatch] Only accept poison in getSplatValue() (#89159)
In #88217 a large set of matchers was changed to only accept poison values in splats, but not undef values. This is because we now use poison for non-demanded vector elements, and allowing undef can cause correctness issues. This patch covers the remaining matchers by changing the AllowUndef parameter of getSplatValue() to AllowPoison instead. We also carry out corresponding renames in matchers. As a followup, we may want to change the default for things like m_APInt to m_APIntAllowPoison (as this is much less risky when only allowing poison), but this change doesn't do that. There is one caveat here: We have a single place (X86FixupVectorConstants) which does require handling of vector splats with undefs. This is because this works on backend constant pool entries, which currently still use undef instead of poison for non-demanded elements (because SDAG as a whole does not have an explicit poison representation). As it's just the single use, I've open-coded a getSplatValueAllowUndef() helper there, to discourage use in any other places.
This commit is contained in:
parent
7ec342ba16
commit
1baa385065
@ -146,9 +146,9 @@ public:
|
|||||||
Constant *getAggregateElement(Constant *Elt) const;
|
Constant *getAggregateElement(Constant *Elt) const;
|
||||||
|
|
||||||
/// If all elements of the vector constant have the same value, return that
|
/// If all elements of the vector constant have the same value, return that
|
||||||
/// value. Otherwise, return nullptr. Ignore undefined elements by setting
|
/// value. Otherwise, return nullptr. Ignore poison elements by setting
|
||||||
/// AllowUndefs to true.
|
/// AllowPoison to true.
|
||||||
Constant *getSplatValue(bool AllowUndefs = false) const;
|
Constant *getSplatValue(bool AllowPoison = false) const;
|
||||||
|
|
||||||
/// If C is a constant integer then return its value, otherwise C must be a
|
/// If C is a constant integer then return its value, otherwise C must be a
|
||||||
/// vector of constant integers, all equal, and the common value is returned.
|
/// vector of constant integers, all equal, and the common value is returned.
|
||||||
|
@ -532,9 +532,9 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// If all elements of the vector constant have the same value, return that
|
/// If all elements of the vector constant have the same value, return that
|
||||||
/// value. Otherwise, return nullptr. Ignore undefined elements by setting
|
/// value. Otherwise, return nullptr. Ignore poison elements by setting
|
||||||
/// AllowUndefs to true.
|
/// AllowPoison to true.
|
||||||
Constant *getSplatValue(bool AllowUndefs = false) const;
|
Constant *getSplatValue(bool AllowPoison = false) const;
|
||||||
|
|
||||||
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
/// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||||
static bool classof(const Value *V) {
|
static bool classof(const Value *V) {
|
||||||
|
@ -243,10 +243,10 @@ inline match_combine_and<LTy, RTy> m_CombineAnd(const LTy &L, const RTy &R) {
|
|||||||
|
|
||||||
struct apint_match {
|
struct apint_match {
|
||||||
const APInt *&Res;
|
const APInt *&Res;
|
||||||
bool AllowUndef;
|
bool AllowPoison;
|
||||||
|
|
||||||
apint_match(const APInt *&Res, bool AllowUndef)
|
apint_match(const APInt *&Res, bool AllowPoison)
|
||||||
: Res(Res), AllowUndef(AllowUndef) {}
|
: Res(Res), AllowPoison(AllowPoison) {}
|
||||||
|
|
||||||
template <typename ITy> bool match(ITy *V) {
|
template <typename ITy> bool match(ITy *V) {
|
||||||
if (auto *CI = dyn_cast<ConstantInt>(V)) {
|
if (auto *CI = dyn_cast<ConstantInt>(V)) {
|
||||||
@ -256,7 +256,7 @@ struct apint_match {
|
|||||||
if (V->getType()->isVectorTy())
|
if (V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
if (auto *CI =
|
if (auto *CI =
|
||||||
dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndef))) {
|
dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowPoison))) {
|
||||||
Res = &CI->getValue();
|
Res = &CI->getValue();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -268,10 +268,10 @@ struct apint_match {
|
|||||||
// function for both apint/apfloat.
|
// function for both apint/apfloat.
|
||||||
struct apfloat_match {
|
struct apfloat_match {
|
||||||
const APFloat *&Res;
|
const APFloat *&Res;
|
||||||
bool AllowUndef;
|
bool AllowPoison;
|
||||||
|
|
||||||
apfloat_match(const APFloat *&Res, bool AllowUndef)
|
apfloat_match(const APFloat *&Res, bool AllowPoison)
|
||||||
: Res(Res), AllowUndef(AllowUndef) {}
|
: Res(Res), AllowPoison(AllowPoison) {}
|
||||||
|
|
||||||
template <typename ITy> bool match(ITy *V) {
|
template <typename ITy> bool match(ITy *V) {
|
||||||
if (auto *CI = dyn_cast<ConstantFP>(V)) {
|
if (auto *CI = dyn_cast<ConstantFP>(V)) {
|
||||||
@ -281,7 +281,7 @@ struct apfloat_match {
|
|||||||
if (V->getType()->isVectorTy())
|
if (V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
if (auto *CI =
|
if (auto *CI =
|
||||||
dyn_cast_or_null<ConstantFP>(C->getSplatValue(AllowUndef))) {
|
dyn_cast_or_null<ConstantFP>(C->getSplatValue(AllowPoison))) {
|
||||||
Res = &CI->getValueAPF();
|
Res = &CI->getValueAPF();
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -292,35 +292,35 @@ struct apfloat_match {
|
|||||||
/// Match a ConstantInt or splatted ConstantVector, binding the
|
/// Match a ConstantInt or splatted ConstantVector, binding the
|
||||||
/// specified pointer to the contained APInt.
|
/// specified pointer to the contained APInt.
|
||||||
inline apint_match m_APInt(const APInt *&Res) {
|
inline apint_match m_APInt(const APInt *&Res) {
|
||||||
// Forbid undefs by default to maintain previous behavior.
|
// Forbid poison by default to maintain previous behavior.
|
||||||
return apint_match(Res, /* AllowUndef */ false);
|
return apint_match(Res, /* AllowPoison */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match APInt while allowing undefs in splat vector constants.
|
/// Match APInt while allowing poison in splat vector constants.
|
||||||
inline apint_match m_APIntAllowUndef(const APInt *&Res) {
|
inline apint_match m_APIntAllowPoison(const APInt *&Res) {
|
||||||
return apint_match(Res, /* AllowUndef */ true);
|
return apint_match(Res, /* AllowPoison */ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match APInt while forbidding undefs in splat vector constants.
|
/// Match APInt while forbidding poison in splat vector constants.
|
||||||
inline apint_match m_APIntForbidUndef(const APInt *&Res) {
|
inline apint_match m_APIntForbidPoison(const APInt *&Res) {
|
||||||
return apint_match(Res, /* AllowUndef */ false);
|
return apint_match(Res, /* AllowPoison */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match a ConstantFP or splatted ConstantVector, binding the
|
/// Match a ConstantFP or splatted ConstantVector, binding the
|
||||||
/// specified pointer to the contained APFloat.
|
/// specified pointer to the contained APFloat.
|
||||||
inline apfloat_match m_APFloat(const APFloat *&Res) {
|
inline apfloat_match m_APFloat(const APFloat *&Res) {
|
||||||
// Forbid undefs by default to maintain previous behavior.
|
// Forbid undefs by default to maintain previous behavior.
|
||||||
return apfloat_match(Res, /* AllowUndef */ false);
|
return apfloat_match(Res, /* AllowPoison */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match APFloat while allowing undefs in splat vector constants.
|
/// Match APFloat while allowing poison in splat vector constants.
|
||||||
inline apfloat_match m_APFloatAllowUndef(const APFloat *&Res) {
|
inline apfloat_match m_APFloatAllowPoison(const APFloat *&Res) {
|
||||||
return apfloat_match(Res, /* AllowUndef */ true);
|
return apfloat_match(Res, /* AllowPoison */ true);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Match APFloat while forbidding undefs in splat vector constants.
|
/// Match APFloat while forbidding poison in splat vector constants.
|
||||||
inline apfloat_match m_APFloatForbidUndef(const APFloat *&Res) {
|
inline apfloat_match m_APFloatForbidPoison(const APFloat *&Res) {
|
||||||
return apfloat_match(Res, /* AllowUndef */ false);
|
return apfloat_match(Res, /* AllowPoison */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <int64_t Val> struct constantint_match {
|
template <int64_t Val> struct constantint_match {
|
||||||
@ -418,7 +418,7 @@ template <typename Predicate> struct api_pred_ty : public Predicate {
|
|||||||
|
|
||||||
/// This helper class is used to match scalar and vector constants that
|
/// This helper class is used to match scalar and vector constants that
|
||||||
/// satisfy a specified predicate, and bind them to an APFloat.
|
/// satisfy a specified predicate, and bind them to an APFloat.
|
||||||
/// Undefs are allowed in splat vector constants.
|
/// Poison is allowed in splat vector constants.
|
||||||
template <typename Predicate> struct apf_pred_ty : public Predicate {
|
template <typename Predicate> struct apf_pred_ty : public Predicate {
|
||||||
const APFloat *&Res;
|
const APFloat *&Res;
|
||||||
|
|
||||||
@ -433,7 +433,7 @@ template <typename Predicate> struct apf_pred_ty : public Predicate {
|
|||||||
if (V->getType()->isVectorTy())
|
if (V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
if (auto *CI = dyn_cast_or_null<ConstantFP>(
|
if (auto *CI = dyn_cast_or_null<ConstantFP>(
|
||||||
C->getSplatValue(/* AllowUndef */ true)))
|
C->getSplatValue(/* AllowPoison */ true)))
|
||||||
if (this->isValue(CI->getValue())) {
|
if (this->isValue(CI->getValue())) {
|
||||||
Res = &CI->getValue();
|
Res = &CI->getValue();
|
||||||
return true;
|
return true;
|
||||||
@ -883,7 +883,7 @@ struct bind_const_intval_ty {
|
|||||||
|
|
||||||
/// Match a specified integer value or vector of all elements of that
|
/// Match a specified integer value or vector of all elements of that
|
||||||
/// value.
|
/// value.
|
||||||
template <bool AllowUndefs> struct specific_intval {
|
template <bool AllowPoison> struct specific_intval {
|
||||||
const APInt &Val;
|
const APInt &Val;
|
||||||
|
|
||||||
specific_intval(const APInt &V) : Val(V) {}
|
specific_intval(const APInt &V) : Val(V) {}
|
||||||
@ -892,13 +892,13 @@ template <bool AllowUndefs> struct specific_intval {
|
|||||||
const auto *CI = dyn_cast<ConstantInt>(V);
|
const auto *CI = dyn_cast<ConstantInt>(V);
|
||||||
if (!CI && V->getType()->isVectorTy())
|
if (!CI && V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));
|
CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowPoison));
|
||||||
|
|
||||||
return CI && APInt::isSameValue(CI->getValue(), Val);
|
return CI && APInt::isSameValue(CI->getValue(), Val);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
template <bool AllowUndefs> struct specific_intval64 {
|
template <bool AllowPoison> struct specific_intval64 {
|
||||||
uint64_t Val;
|
uint64_t Val;
|
||||||
|
|
||||||
specific_intval64(uint64_t V) : Val(V) {}
|
specific_intval64(uint64_t V) : Val(V) {}
|
||||||
@ -907,7 +907,7 @@ template <bool AllowUndefs> struct specific_intval64 {
|
|||||||
const auto *CI = dyn_cast<ConstantInt>(V);
|
const auto *CI = dyn_cast<ConstantInt>(V);
|
||||||
if (!CI && V->getType()->isVectorTy())
|
if (!CI && V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowUndefs));
|
CI = dyn_cast_or_null<ConstantInt>(C->getSplatValue(AllowPoison));
|
||||||
|
|
||||||
return CI && CI->getValue() == Val;
|
return CI && CI->getValue() == Val;
|
||||||
}
|
}
|
||||||
@ -923,11 +923,11 @@ inline specific_intval64<false> m_SpecificInt(uint64_t V) {
|
|||||||
return specific_intval64<false>(V);
|
return specific_intval64<false>(V);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline specific_intval<true> m_SpecificIntAllowUndef(const APInt &V) {
|
inline specific_intval<true> m_SpecificIntAllowPoison(const APInt &V) {
|
||||||
return specific_intval<true>(V);
|
return specific_intval<true>(V);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline specific_intval64<true> m_SpecificIntAllowUndef(uint64_t V) {
|
inline specific_intval64<true> m_SpecificIntAllowPoison(uint64_t V) {
|
||||||
return specific_intval64<true>(V);
|
return specific_intval64<true>(V);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1699,9 +1699,9 @@ struct m_SpecificMask {
|
|||||||
bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
|
bool match(ArrayRef<int> Mask) { return MaskRef == Mask; }
|
||||||
};
|
};
|
||||||
|
|
||||||
struct m_SplatOrUndefMask {
|
struct m_SplatOrPoisonMask {
|
||||||
int &SplatIndex;
|
int &SplatIndex;
|
||||||
m_SplatOrUndefMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
|
m_SplatOrPoisonMask(int &SplatIndex) : SplatIndex(SplatIndex) {}
|
||||||
bool match(ArrayRef<int> Mask) {
|
bool match(ArrayRef<int> Mask) {
|
||||||
const auto *First = find_if(Mask, [](int Elem) { return Elem != -1; });
|
const auto *First = find_if(Mask, [](int Elem) { return Elem != -1; });
|
||||||
if (First == Mask.end())
|
if (First == Mask.end())
|
||||||
|
@ -79,7 +79,7 @@ bool llvm::decomposeBitTestICmp(Value *LHS, Value *RHS,
|
|||||||
using namespace PatternMatch;
|
using namespace PatternMatch;
|
||||||
|
|
||||||
const APInt *C;
|
const APInt *C;
|
||||||
if (!match(RHS, m_APIntAllowUndef(C)))
|
if (!match(RHS, m_APIntAllowPoison(C)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
switch (Pred) {
|
switch (Pred) {
|
||||||
|
@ -3023,7 +3023,7 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
|
|||||||
|
|
||||||
Value *X;
|
Value *X;
|
||||||
const APInt *C;
|
const APInt *C;
|
||||||
if (!match(RHS, m_APIntAllowUndef(C)))
|
if (!match(RHS, m_APIntAllowPoison(C)))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// Sign-bit checks can be optimized to true/false after unsigned
|
// Sign-bit checks can be optimized to true/false after unsigned
|
||||||
@ -3056,9 +3056,9 @@ static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
|
|||||||
// (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
|
// (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
|
||||||
const APInt *MulC;
|
const APInt *MulC;
|
||||||
if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
|
if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
|
||||||
((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
|
((match(LHS, m_NUWMul(m_Value(), m_APIntAllowPoison(MulC))) &&
|
||||||
*MulC != 0 && C->urem(*MulC) != 0) ||
|
*MulC != 0 && C->urem(*MulC) != 0) ||
|
||||||
(match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
|
(match(LHS, m_NSWMul(m_Value(), m_APIntAllowPoison(MulC))) &&
|
||||||
*MulC != 0 && C->srem(*MulC) != 0)))
|
*MulC != 0 && C->srem(*MulC) != 0)))
|
||||||
return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
|
return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
|
||||||
|
|
||||||
@ -3203,7 +3203,7 @@ static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
|
|||||||
|
|
||||||
// (sub C, X) == X, C is odd --> false
|
// (sub C, X) == X, C is odd --> false
|
||||||
// (sub C, X) != X, C is odd --> true
|
// (sub C, X) != X, C is odd --> true
|
||||||
if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
|
if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
|
||||||
(*C & 1) == 1 && ICmpInst::isEquality(Pred))
|
(*C & 1) == 1 && ICmpInst::isEquality(Pred))
|
||||||
return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
|
return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
|
||||||
|
|
||||||
@ -3354,7 +3354,7 @@ static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
|
|||||||
// (C2 << X) != C --> true
|
// (C2 << X) != C --> true
|
||||||
const APInt *C;
|
const APInt *C;
|
||||||
if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
|
if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
|
||||||
match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
|
match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
|
||||||
// C2 << X can equal zero in some circumstances.
|
// C2 << X can equal zero in some circumstances.
|
||||||
// This simplification might be unsafe if C is zero.
|
// This simplification might be unsafe if C is zero.
|
||||||
//
|
//
|
||||||
@ -4105,7 +4105,7 @@ static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
|||||||
}
|
}
|
||||||
|
|
||||||
const APFloat *C = nullptr;
|
const APFloat *C = nullptr;
|
||||||
match(RHS, m_APFloatAllowUndef(C));
|
match(RHS, m_APFloatAllowPoison(C));
|
||||||
std::optional<KnownFPClass> FullKnownClassLHS;
|
std::optional<KnownFPClass> FullKnownClassLHS;
|
||||||
|
|
||||||
// Lazily compute the possible classes for LHS. Avoid computing it twice if
|
// Lazily compute the possible classes for LHS. Avoid computing it twice if
|
||||||
@ -6459,7 +6459,7 @@ Value *llvm::simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType,
|
|||||||
ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
|
ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
|
||||||
|
|
||||||
const APInt *C;
|
const APInt *C;
|
||||||
if (match(Op1, m_APIntAllowUndef(C))) {
|
if (match(Op1, m_APIntAllowPoison(C))) {
|
||||||
// Clamp to limit value. For example:
|
// Clamp to limit value. For example:
|
||||||
// umax(i8 %x, i8 255) --> 255
|
// umax(i8 %x, i8 255) --> 255
|
||||||
if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
|
if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
|
||||||
|
@ -4116,7 +4116,7 @@ std::pair<Value *, FPClassTest> llvm::fcmpToClassTest(FCmpInst::Predicate Pred,
|
|||||||
Value *LHS, Value *RHS,
|
Value *LHS, Value *RHS,
|
||||||
bool LookThroughSrc) {
|
bool LookThroughSrc) {
|
||||||
const APFloat *ConstRHS;
|
const APFloat *ConstRHS;
|
||||||
if (!match(RHS, m_APFloatAllowUndef(ConstRHS)))
|
if (!match(RHS, m_APFloatAllowPoison(ConstRHS)))
|
||||||
return {nullptr, fcAllFlags};
|
return {nullptr, fcAllFlags};
|
||||||
|
|
||||||
return fcmpToClassTest(Pred, F, LHS, ConstRHS, LookThroughSrc);
|
return fcmpToClassTest(Pred, F, LHS, ConstRHS, LookThroughSrc);
|
||||||
@ -4517,7 +4517,7 @@ std::tuple<Value *, FPClassTest, FPClassTest>
|
|||||||
llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
|
llvm::fcmpImpliesClass(CmpInst::Predicate Pred, const Function &F, Value *LHS,
|
||||||
Value *RHS, bool LookThroughSrc) {
|
Value *RHS, bool LookThroughSrc) {
|
||||||
const APFloat *ConstRHS;
|
const APFloat *ConstRHS;
|
||||||
if (!match(RHS, m_APFloatAllowUndef(ConstRHS)))
|
if (!match(RHS, m_APFloatAllowPoison(ConstRHS)))
|
||||||
return {nullptr, fcAllFlags, fcAllFlags};
|
return {nullptr, fcAllFlags, fcAllFlags};
|
||||||
|
|
||||||
// TODO: Just call computeKnownFPClass for RHS to handle non-constants.
|
// TODO: Just call computeKnownFPClass for RHS to handle non-constants.
|
||||||
|
@ -1696,14 +1696,14 @@ void ConstantVector::destroyConstantImpl() {
|
|||||||
getType()->getContext().pImpl->VectorConstants.remove(this);
|
getType()->getContext().pImpl->VectorConstants.remove(this);
|
||||||
}
|
}
|
||||||
|
|
||||||
Constant *Constant::getSplatValue(bool AllowUndefs) const {
|
Constant *Constant::getSplatValue(bool AllowPoison) const {
|
||||||
assert(this->getType()->isVectorTy() && "Only valid for vectors!");
|
assert(this->getType()->isVectorTy() && "Only valid for vectors!");
|
||||||
if (isa<ConstantAggregateZero>(this))
|
if (isa<ConstantAggregateZero>(this))
|
||||||
return getNullValue(cast<VectorType>(getType())->getElementType());
|
return getNullValue(cast<VectorType>(getType())->getElementType());
|
||||||
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
|
if (const ConstantDataVector *CV = dyn_cast<ConstantDataVector>(this))
|
||||||
return CV->getSplatValue();
|
return CV->getSplatValue();
|
||||||
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
|
if (const ConstantVector *CV = dyn_cast<ConstantVector>(this))
|
||||||
return CV->getSplatValue(AllowUndefs);
|
return CV->getSplatValue(AllowPoison);
|
||||||
|
|
||||||
// Check if this is a constant expression splat of the form returned by
|
// Check if this is a constant expression splat of the form returned by
|
||||||
// ConstantVector::getSplat()
|
// ConstantVector::getSplat()
|
||||||
@ -1728,7 +1728,7 @@ Constant *Constant::getSplatValue(bool AllowUndefs) const {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Constant *ConstantVector::getSplatValue(bool AllowUndefs) const {
|
Constant *ConstantVector::getSplatValue(bool AllowPoison) const {
|
||||||
// Check out first element.
|
// Check out first element.
|
||||||
Constant *Elt = getOperand(0);
|
Constant *Elt = getOperand(0);
|
||||||
// Then make sure all remaining elements point to the same value.
|
// Then make sure all remaining elements point to the same value.
|
||||||
@ -1738,15 +1738,15 @@ Constant *ConstantVector::getSplatValue(bool AllowUndefs) const {
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
// Strict mode: any mismatch is not a splat.
|
// Strict mode: any mismatch is not a splat.
|
||||||
if (!AllowUndefs)
|
if (!AllowPoison)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// Allow undefs mode: ignore undefined elements.
|
// Allow poison mode: ignore poison elements.
|
||||||
if (isa<UndefValue>(OpC))
|
if (isa<PoisonValue>(OpC))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
// If we do not have a defined element yet, use the current operand.
|
// If we do not have a defined element yet, use the current operand.
|
||||||
if (isa<UndefValue>(Elt))
|
if (isa<PoisonValue>(Elt))
|
||||||
Elt = OpC;
|
Elt = OpC;
|
||||||
|
|
||||||
if (OpC != Elt)
|
if (OpC != Elt)
|
||||||
|
@ -906,8 +906,8 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
|
|||||||
|
|
||||||
const APFloat *CF = nullptr;
|
const APFloat *CF = nullptr;
|
||||||
const APInt *CINT = nullptr;
|
const APInt *CINT = nullptr;
|
||||||
if (!match(opr1, m_APFloatAllowUndef(CF)))
|
if (!match(opr1, m_APFloatAllowPoison(CF)))
|
||||||
match(opr1, m_APIntAllowUndef(CINT));
|
match(opr1, m_APIntAllowPoison(CINT));
|
||||||
|
|
||||||
// 0x1111111 means that we don't do anything for this call.
|
// 0x1111111 means that we don't do anything for this call.
|
||||||
int ci_opr1 = (CINT ? (int)CINT->getSExtValue() : 0x1111111);
|
int ci_opr1 = (CINT ? (int)CINT->getSExtValue() : 0x1111111);
|
||||||
@ -1039,7 +1039,7 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
|
|||||||
Constant *cnval = nullptr;
|
Constant *cnval = nullptr;
|
||||||
if (getVecSize(FInfo) == 1) {
|
if (getVecSize(FInfo) == 1) {
|
||||||
CF = nullptr;
|
CF = nullptr;
|
||||||
match(opr0, m_APFloatAllowUndef(CF));
|
match(opr0, m_APFloatAllowPoison(CF));
|
||||||
|
|
||||||
if (CF) {
|
if (CF) {
|
||||||
double V = (getArgType(FInfo) == AMDGPULibFunc::F32)
|
double V = (getArgType(FInfo) == AMDGPULibFunc::F32)
|
||||||
|
@ -64,6 +64,23 @@ FunctionPass *llvm::createX86FixupVectorConstants() {
|
|||||||
return new X86FixupVectorConstantsPass();
|
return new X86FixupVectorConstantsPass();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Normally, we only allow poison in vector splats. However, as this is part
|
||||||
|
/// of the backend, and working with the DAG representation, which currently
|
||||||
|
/// only natively represents undef values, we need to accept undefs here.
|
||||||
|
static Constant *getSplatValueAllowUndef(const ConstantVector *C) {
|
||||||
|
Constant *Res = nullptr;
|
||||||
|
for (Value *Op : C->operands()) {
|
||||||
|
Constant *OpC = cast<Constant>(Op);
|
||||||
|
if (isa<UndefValue>(OpC))
|
||||||
|
continue;
|
||||||
|
if (!Res)
|
||||||
|
Res = OpC;
|
||||||
|
else if (Res != OpC)
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
return Res;
|
||||||
|
}
|
||||||
|
|
||||||
// Attempt to extract the full width of bits data from the constant.
|
// Attempt to extract the full width of bits data from the constant.
|
||||||
static std::optional<APInt> extractConstantBits(const Constant *C) {
|
static std::optional<APInt> extractConstantBits(const Constant *C) {
|
||||||
unsigned NumBits = C->getType()->getPrimitiveSizeInBits();
|
unsigned NumBits = C->getType()->getPrimitiveSizeInBits();
|
||||||
@ -78,7 +95,7 @@ static std::optional<APInt> extractConstantBits(const Constant *C) {
|
|||||||
return CFP->getValue().bitcastToAPInt();
|
return CFP->getValue().bitcastToAPInt();
|
||||||
|
|
||||||
if (auto *CV = dyn_cast<ConstantVector>(C)) {
|
if (auto *CV = dyn_cast<ConstantVector>(C)) {
|
||||||
if (auto *CVSplat = CV->getSplatValue(/*AllowUndefs*/ true)) {
|
if (auto *CVSplat = getSplatValueAllowUndef(CV)) {
|
||||||
if (std::optional<APInt> Bits = extractConstantBits(CVSplat)) {
|
if (std::optional<APInt> Bits = extractConstantBits(CVSplat)) {
|
||||||
assert((NumBits % Bits->getBitWidth()) == 0 && "Illegal splat");
|
assert((NumBits % Bits->getBitWidth()) == 0 && "Illegal splat");
|
||||||
return APInt::getSplat(NumBits, *Bits);
|
return APInt::getSplat(NumBits, *Bits);
|
||||||
|
@ -896,7 +896,7 @@ Instruction *InstCombinerImpl::foldAddWithConstant(BinaryOperator &Add) {
|
|||||||
const APInt *C;
|
const APInt *C;
|
||||||
unsigned BitWidth = Ty->getScalarSizeInBits();
|
unsigned BitWidth = Ty->getScalarSizeInBits();
|
||||||
if (match(Op0, m_OneUse(m_AShr(m_Value(X),
|
if (match(Op0, m_OneUse(m_AShr(m_Value(X),
|
||||||
m_SpecificIntAllowUndef(BitWidth - 1)))) &&
|
m_SpecificIntAllowPoison(BitWidth - 1)))) &&
|
||||||
match(Op1, m_One()))
|
match(Op1, m_One()))
|
||||||
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
|
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
|
||||||
|
|
||||||
@ -1656,7 +1656,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) {
|
|||||||
// (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
|
// (A s>> (BW - 1)) + (zext (A s> 0)) --> (A s>> (BW - 1)) | (zext (A != 0))
|
||||||
ICmpInst::Predicate Pred;
|
ICmpInst::Predicate Pred;
|
||||||
uint64_t BitWidth = Ty->getScalarSizeInBits();
|
uint64_t BitWidth = Ty->getScalarSizeInBits();
|
||||||
if (match(LHS, m_AShr(m_Value(A), m_SpecificIntAllowUndef(BitWidth - 1))) &&
|
if (match(LHS, m_AShr(m_Value(A), m_SpecificIntAllowPoison(BitWidth - 1))) &&
|
||||||
match(RHS, m_OneUse(m_ZExt(
|
match(RHS, m_OneUse(m_ZExt(
|
||||||
m_OneUse(m_ICmp(Pred, m_Specific(A), m_ZeroInt()))))) &&
|
m_OneUse(m_ICmp(Pred, m_Specific(A), m_ZeroInt()))))) &&
|
||||||
Pred == CmpInst::ICMP_SGT) {
|
Pred == CmpInst::ICMP_SGT) {
|
||||||
|
@ -947,9 +947,9 @@ static Value *foldNegativePower2AndShiftedMask(
|
|||||||
// bits (0).
|
// bits (0).
|
||||||
auto isReducible = [](const Value *B, const Value *D, const Value *E) {
|
auto isReducible = [](const Value *B, const Value *D, const Value *E) {
|
||||||
const APInt *BCst, *DCst, *ECst;
|
const APInt *BCst, *DCst, *ECst;
|
||||||
return match(B, m_APIntAllowUndef(BCst)) && match(D, m_APInt(DCst)) &&
|
return match(B, m_APIntAllowPoison(BCst)) && match(D, m_APInt(DCst)) &&
|
||||||
match(E, m_APInt(ECst)) && *DCst == *ECst &&
|
match(E, m_APInt(ECst)) && *DCst == *ECst &&
|
||||||
(isa<UndefValue>(B) ||
|
(isa<PoisonValue>(B) ||
|
||||||
(BCst->countLeadingOnes() == DCst->countLeadingZeros()));
|
(BCst->countLeadingOnes() == DCst->countLeadingZeros()));
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1424,8 +1424,8 @@ Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
|
|||||||
const APFloat *LHSC, *RHSC;
|
const APFloat *LHSC, *RHSC;
|
||||||
if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
|
if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
|
||||||
FCmpInst::getSwappedPredicate(PredL) == PredR &&
|
FCmpInst::getSwappedPredicate(PredL) == PredR &&
|
||||||
match(LHS1, m_APFloatAllowUndef(LHSC)) &&
|
match(LHS1, m_APFloatAllowPoison(LHSC)) &&
|
||||||
match(RHS1, m_APFloatAllowUndef(RHSC)) &&
|
match(RHS1, m_APFloatAllowPoison(RHSC)) &&
|
||||||
LHSC->bitwiseIsEqual(neg(*RHSC))) {
|
LHSC->bitwiseIsEqual(neg(*RHSC))) {
|
||||||
auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
|
auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
|
||||||
switch (Pred) {
|
switch (Pred) {
|
||||||
@ -2730,7 +2730,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
|
|||||||
|
|
||||||
// (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
|
// (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
|
||||||
if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
|
if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
|
||||||
m_AShr(m_Value(X), m_APIntAllowUndef(C)))),
|
m_AShr(m_Value(X), m_APIntAllowPoison(C)))),
|
||||||
m_Value(Y))) &&
|
m_Value(Y))) &&
|
||||||
*C == X->getType()->getScalarSizeInBits() - 1) {
|
*C == X->getType()->getScalarSizeInBits() - 1) {
|
||||||
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
|
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
|
||||||
@ -2739,7 +2739,7 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
|
|||||||
// If there's a 'not' of the shifted value, swap the select operands:
|
// If there's a 'not' of the shifted value, swap the select operands:
|
||||||
// ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
|
// ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
|
||||||
if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
|
if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
|
||||||
m_Not(m_AShr(m_Value(X), m_APIntAllowUndef(C))))),
|
m_Not(m_AShr(m_Value(X), m_APIntAllowPoison(C))))),
|
||||||
m_Value(Y))) &&
|
m_Value(Y))) &&
|
||||||
*C == X->getType()->getScalarSizeInBits() - 1) {
|
*C == X->getType()->getScalarSizeInBits() - 1) {
|
||||||
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
|
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
|
||||||
@ -2840,7 +2840,7 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
|
|||||||
auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
|
auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
|
||||||
// Check for constant shift amounts that sum to the bitwidth.
|
// Check for constant shift amounts that sum to the bitwidth.
|
||||||
const APInt *LI, *RI;
|
const APInt *LI, *RI;
|
||||||
if (match(L, m_APIntAllowUndef(LI)) && match(R, m_APIntAllowUndef(RI)))
|
if (match(L, m_APIntAllowPoison(LI)) && match(R, m_APIntAllowPoison(RI)))
|
||||||
if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width)
|
if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width)
|
||||||
return ConstantInt::get(L->getType(), *LI);
|
return ConstantInt::get(L->getType(), *LI);
|
||||||
|
|
||||||
@ -2850,7 +2850,7 @@ InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
|
|||||||
m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
|
m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
|
||||||
match(R,
|
match(R,
|
||||||
m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
|
m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
|
||||||
match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowUndef(Width)))
|
match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowPoison(Width)))
|
||||||
return ConstantExpr::mergeUndefsWith(LC, RC);
|
return ConstantExpr::mergeUndefsWith(LC, RC);
|
||||||
|
|
||||||
// (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
|
// (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
|
||||||
@ -3195,14 +3195,14 @@ static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
|
|||||||
|
|
||||||
const APInt *CInt;
|
const APInt *CInt;
|
||||||
if (LPred != ICmpInst::ICMP_EQ ||
|
if (LPred != ICmpInst::ICMP_EQ ||
|
||||||
!match(LHS->getOperand(1), m_APIntAllowUndef(CInt)) ||
|
!match(LHS->getOperand(1), m_APIntAllowPoison(CInt)) ||
|
||||||
!LHS0->getType()->isIntOrIntVectorTy() ||
|
!LHS0->getType()->isIntOrIntVectorTy() ||
|
||||||
!(LHS->hasOneUse() || RHS->hasOneUse()))
|
!(LHS->hasOneUse() || RHS->hasOneUse()))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
|
auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
|
||||||
return match(RHSOp,
|
return match(RHSOp,
|
||||||
m_Add(m_Specific(LHS0), m_SpecificIntAllowUndef(-*CInt))) ||
|
m_Add(m_Specific(LHS0), m_SpecificIntAllowPoison(-*CInt))) ||
|
||||||
(CInt->isZero() && RHSOp == LHS0);
|
(CInt->isZero() && RHSOp == LHS0);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -4635,7 +4635,7 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
|
|||||||
// constant depending on whether this input is less than 0.
|
// constant depending on whether this input is less than 0.
|
||||||
const APInt *CA;
|
const APInt *CA;
|
||||||
if (match(Op0, m_OneUse(m_TruncOrSelf(
|
if (match(Op0, m_OneUse(m_TruncOrSelf(
|
||||||
m_AShr(m_Value(X), m_APIntAllowUndef(CA))))) &&
|
m_AShr(m_Value(X), m_APIntAllowPoison(CA))))) &&
|
||||||
*CA == X->getType()->getScalarSizeInBits() - 1 &&
|
*CA == X->getType()->getScalarSizeInBits() - 1 &&
|
||||||
!match(C1, m_AllOnes())) {
|
!match(C1, m_AllOnes())) {
|
||||||
assert(!C1->isZeroValue() && "Unexpected xor with 0");
|
assert(!C1->isZeroValue() && "Unexpected xor with 0");
|
||||||
|
@ -1821,7 +1821,7 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
|||||||
return NewMinMax;
|
return NewMinMax;
|
||||||
|
|
||||||
// Try to fold minmax with constant RHS based on range information
|
// Try to fold minmax with constant RHS based on range information
|
||||||
if (match(I1, m_APIntAllowUndef(RHSC))) {
|
if (match(I1, m_APIntAllowPoison(RHSC))) {
|
||||||
ICmpInst::Predicate Pred =
|
ICmpInst::Predicate Pred =
|
||||||
ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
|
ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
|
||||||
bool IsSigned = MinMaxIntrinsic::isSigned(IID);
|
bool IsSigned = MinMaxIntrinsic::isSigned(IID);
|
||||||
@ -1865,12 +1865,8 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
|
|||||||
// bswap (lshr X, Y) --> shl (bswap X), Y
|
// bswap (lshr X, Y) --> shl (bswap X), Y
|
||||||
Value *X, *Y;
|
Value *X, *Y;
|
||||||
if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) {
|
if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) {
|
||||||
// The transform allows undef vector elements, so try a constant match
|
|
||||||
// first. If knownbits can handle that case, that clause could be removed.
|
|
||||||
unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits();
|
unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits();
|
||||||
const APInt *C;
|
if (MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) {
|
||||||
if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) ||
|
|
||||||
MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) {
|
|
||||||
Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
|
Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
|
||||||
BinaryOperator::BinaryOps InverseShift =
|
BinaryOperator::BinaryOps InverseShift =
|
||||||
cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl
|
cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl
|
||||||
|
@ -1481,7 +1481,7 @@ Instruction *InstCombinerImpl::visitSExt(SExtInst &Sext) {
|
|||||||
Value *Y;
|
Value *Y;
|
||||||
if (Src->hasOneUse() &&
|
if (Src->hasOneUse() &&
|
||||||
match(X, m_LShr(m_Value(Y),
|
match(X, m_LShr(m_Value(Y),
|
||||||
m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) {
|
m_SpecificIntAllowPoison(XBitSize - SrcBitSize)))) {
|
||||||
Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize);
|
Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize);
|
||||||
return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
|
return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
|
||||||
}
|
}
|
||||||
|
@ -3417,8 +3417,8 @@ Instruction *InstCombinerImpl::foldICmpInstWithConstant(ICmpInst &Cmp) {
|
|||||||
return new ICmpInst(Cmp.getPredicate(), X, Y);
|
return new ICmpInst(Cmp.getPredicate(), X, Y);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (match(Cmp.getOperand(1), m_APIntAllowUndef(C)))
|
if (match(Cmp.getOperand(1), m_APIntAllowPoison(C)))
|
||||||
return foldICmpInstWithConstantAllowUndef(Cmp, *C);
|
return foldICmpInstWithConstantAllowPoison(Cmp, *C);
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
@ -3735,11 +3735,11 @@ foldICmpIntrinsicWithIntrinsic(ICmpInst &Cmp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
|
/// Try to fold integer comparisons with a constant operand: icmp Pred X, C
|
||||||
/// where X is some kind of instruction and C is AllowUndef.
|
/// where X is some kind of instruction and C is AllowPoison.
|
||||||
/// TODO: Move more folds which allow undef to this function.
|
/// TODO: Move more folds which allow poison to this function.
|
||||||
Instruction *
|
Instruction *
|
||||||
InstCombinerImpl::foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
|
InstCombinerImpl::foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp,
|
||||||
const APInt &C) {
|
const APInt &C) {
|
||||||
const ICmpInst::Predicate Pred = Cmp.getPredicate();
|
const ICmpInst::Predicate Pred = Cmp.getPredicate();
|
||||||
if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
|
if (auto *II = dyn_cast<IntrinsicInst>(Cmp.getOperand(0))) {
|
||||||
switch (II->getIntrinsicID()) {
|
switch (II->getIntrinsicID()) {
|
||||||
@ -4844,7 +4844,7 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
|
|||||||
const APInt *C;
|
const APInt *C;
|
||||||
if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
|
if ((Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_UGE) &&
|
||||||
match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
|
match(Op0, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
|
||||||
match(BO, m_Add(m_Specific(Op1), m_SpecificIntAllowUndef(*C)))) {
|
match(BO, m_Add(m_Specific(Op1), m_SpecificIntAllowPoison(*C)))) {
|
||||||
CmpInst::Predicate NewPred =
|
CmpInst::Predicate NewPred =
|
||||||
Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
|
Pred == ICmpInst::ICMP_ULT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
|
||||||
Constant *Zero = ConstantInt::getNullValue(Op1->getType());
|
Constant *Zero = ConstantInt::getNullValue(Op1->getType());
|
||||||
@ -4853,7 +4853,7 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
|
|||||||
|
|
||||||
if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
|
if ((Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_ULE) &&
|
||||||
match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
|
match(Op1, m_And(m_BinOp(BO), m_LowBitMask(C))) &&
|
||||||
match(BO, m_Add(m_Specific(Op0), m_SpecificIntAllowUndef(*C)))) {
|
match(BO, m_Add(m_Specific(Op0), m_SpecificIntAllowPoison(*C)))) {
|
||||||
CmpInst::Predicate NewPred =
|
CmpInst::Predicate NewPred =
|
||||||
Pred == ICmpInst::ICMP_UGT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
|
Pred == ICmpInst::ICMP_UGT ? ICmpInst::ICMP_NE : ICmpInst::ICMP_EQ;
|
||||||
Constant *Zero = ConstantInt::getNullValue(Op1->getType());
|
Constant *Zero = ConstantInt::getNullValue(Op1->getType());
|
||||||
@ -5003,8 +5003,9 @@ Instruction *InstCombinerImpl::foldICmpBinOp(ICmpInst &I,
|
|||||||
(BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
|
(BO0->hasOneUse() || BO1->hasOneUse()) && !I.isUnsigned()) {
|
||||||
const APInt *AP1, *AP2;
|
const APInt *AP1, *AP2;
|
||||||
// TODO: Support non-uniform vectors.
|
// TODO: Support non-uniform vectors.
|
||||||
// TODO: Allow undef passthrough if B AND D's element is undef.
|
// TODO: Allow poison passthrough if B or D's element is poison.
|
||||||
if (match(B, m_APIntAllowUndef(AP1)) && match(D, m_APIntAllowUndef(AP2)) &&
|
if (match(B, m_APIntAllowPoison(AP1)) &&
|
||||||
|
match(D, m_APIntAllowPoison(AP2)) &&
|
||||||
AP1->isNegative() == AP2->isNegative()) {
|
AP1->isNegative() == AP2->isNegative()) {
|
||||||
APInt AP1Abs = AP1->abs();
|
APInt AP1Abs = AP1->abs();
|
||||||
APInt AP2Abs = AP2->abs();
|
APInt AP2Abs = AP2->abs();
|
||||||
@ -5575,10 +5576,10 @@ Instruction *InstCombinerImpl::foldICmpEquality(ICmpInst &I) {
|
|||||||
// (A >> C) == (B >> C) --> (A^B) u< (1 << C)
|
// (A >> C) == (B >> C) --> (A^B) u< (1 << C)
|
||||||
// For lshr and ashr pairs.
|
// For lshr and ashr pairs.
|
||||||
const APInt *AP1, *AP2;
|
const APInt *AP1, *AP2;
|
||||||
if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowUndef(AP1)))) &&
|
if ((match(Op0, m_OneUse(m_LShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
|
||||||
match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowUndef(AP2))))) ||
|
match(Op1, m_OneUse(m_LShr(m_Value(B), m_APIntAllowPoison(AP2))))) ||
|
||||||
(match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowUndef(AP1)))) &&
|
(match(Op0, m_OneUse(m_AShr(m_Value(A), m_APIntAllowPoison(AP1)))) &&
|
||||||
match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowUndef(AP2)))))) {
|
match(Op1, m_OneUse(m_AShr(m_Value(B), m_APIntAllowPoison(AP2)))))) {
|
||||||
if (AP1 != AP2)
|
if (AP1 != AP2)
|
||||||
return nullptr;
|
return nullptr;
|
||||||
unsigned TypeBits = AP1->getBitWidth();
|
unsigned TypeBits = AP1->getBitWidth();
|
||||||
@ -6969,10 +6970,10 @@ static Instruction *foldVectorCmp(CmpInst &Cmp,
|
|||||||
|
|
||||||
// Length-changing splats are ok, so adjust the constants as needed:
|
// Length-changing splats are ok, so adjust the constants as needed:
|
||||||
// cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
|
// cmp (shuffle V1, M), C --> shuffle (cmp V1, C'), M
|
||||||
Constant *ScalarC = C->getSplatValue(/* AllowUndefs */ true);
|
Constant *ScalarC = C->getSplatValue(/* AllowPoison */ true);
|
||||||
int MaskSplatIndex;
|
int MaskSplatIndex;
|
||||||
if (ScalarC && match(M, m_SplatOrUndefMask(MaskSplatIndex))) {
|
if (ScalarC && match(M, m_SplatOrPoisonMask(MaskSplatIndex))) {
|
||||||
// We allow undefs in matching, but this transform removes those for safety.
|
// We allow poison in matching, but this transform removes it for safety.
|
||||||
// Demanded elements analysis should be able to recover some/all of that.
|
// Demanded elements analysis should be able to recover some/all of that.
|
||||||
C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
|
C = ConstantVector::getSplat(cast<VectorType>(V1Ty)->getElementCount(),
|
||||||
ScalarC);
|
ScalarC);
|
||||||
@ -7444,7 +7445,7 @@ Instruction *InstCombinerImpl::visitICmpInst(ICmpInst &I) {
|
|||||||
unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
|
unsigned OpWidth = Op0->getType()->getScalarSizeInBits();
|
||||||
Instruction *ShiftI;
|
Instruction *ShiftI;
|
||||||
if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
|
if (match(Op0, m_CombineAnd(m_Instruction(ShiftI),
|
||||||
m_Shr(m_Value(X), m_SpecificIntAllowUndef(
|
m_Shr(m_Value(X), m_SpecificIntAllowPoison(
|
||||||
OpWidth - 1))))) {
|
OpWidth - 1))))) {
|
||||||
unsigned ExtOpc = ExtI->getOpcode();
|
unsigned ExtOpc = ExtI->getOpcode();
|
||||||
unsigned ShiftOpc = ShiftI->getOpcode();
|
unsigned ShiftOpc = ShiftI->getOpcode();
|
||||||
|
@ -661,8 +661,8 @@ public:
|
|||||||
Instruction *foldICmpUsingBoolRange(ICmpInst &I);
|
Instruction *foldICmpUsingBoolRange(ICmpInst &I);
|
||||||
Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
|
Instruction *foldICmpInstWithConstant(ICmpInst &Cmp);
|
||||||
Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
|
Instruction *foldICmpInstWithConstantNotInt(ICmpInst &Cmp);
|
||||||
Instruction *foldICmpInstWithConstantAllowUndef(ICmpInst &Cmp,
|
Instruction *foldICmpInstWithConstantAllowPoison(ICmpInst &Cmp,
|
||||||
const APInt &C);
|
const APInt &C);
|
||||||
Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
|
Instruction *foldICmpBinOp(ICmpInst &Cmp, const SimplifyQuery &SQ);
|
||||||
Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
|
Instruction *foldICmpWithMinMax(Instruction &I, MinMaxIntrinsic *MinMax,
|
||||||
Value *Z, ICmpInst::Predicate Pred);
|
Value *Z, ICmpInst::Predicate Pred);
|
||||||
|
@ -276,7 +276,7 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
|
|||||||
const APInt *NegPow2C;
|
const APInt *NegPow2C;
|
||||||
Value *X;
|
Value *X;
|
||||||
if (match(Op0, m_ZExtOrSExt(m_Value(X))) &&
|
if (match(Op0, m_ZExtOrSExt(m_Value(X))) &&
|
||||||
match(Op1, m_APIntAllowUndef(NegPow2C))) {
|
match(Op1, m_APIntAllowPoison(NegPow2C))) {
|
||||||
unsigned SrcWidth = X->getType()->getScalarSizeInBits();
|
unsigned SrcWidth = X->getType()->getScalarSizeInBits();
|
||||||
unsigned ShiftAmt = NegPow2C->countr_zero();
|
unsigned ShiftAmt = NegPow2C->countr_zero();
|
||||||
if (ShiftAmt >= BitWidth - SrcWidth) {
|
if (ShiftAmt >= BitWidth - SrcWidth) {
|
||||||
@ -485,7 +485,7 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
|
|||||||
// ((ashr X, 31) | 1) * X --> abs(X)
|
// ((ashr X, 31) | 1) * X --> abs(X)
|
||||||
// X * ((ashr X, 31) | 1) --> abs(X)
|
// X * ((ashr X, 31) | 1) --> abs(X)
|
||||||
if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
|
if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
|
||||||
m_SpecificIntAllowUndef(BitWidth - 1)),
|
m_SpecificIntAllowPoison(BitWidth - 1)),
|
||||||
m_One()),
|
m_One()),
|
||||||
m_Deferred(X)))) {
|
m_Deferred(X)))) {
|
||||||
Value *Abs = Builder.CreateBinaryIntrinsic(
|
Value *Abs = Builder.CreateBinaryIntrinsic(
|
||||||
@ -836,7 +836,7 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
|
|||||||
// X * 0.0 --> copysign(0.0, X)
|
// X * 0.0 --> copysign(0.0, X)
|
||||||
// X * -0.0 --> copysign(0.0, -X)
|
// X * -0.0 --> copysign(0.0, -X)
|
||||||
const APFloat *FPC;
|
const APFloat *FPC;
|
||||||
if (match(Op1, m_APFloatAllowUndef(FPC)) && FPC->isZero() &&
|
if (match(Op1, m_APFloatAllowPoison(FPC)) && FPC->isZero() &&
|
||||||
((I.hasNoInfs() &&
|
((I.hasNoInfs() &&
|
||||||
isKnownNeverNaN(Op0, /*Depth=*/0, SQ.getWithInstruction(&I))) ||
|
isKnownNeverNaN(Op0, /*Depth=*/0, SQ.getWithInstruction(&I))) ||
|
||||||
isKnownNeverNaN(&I, /*Depth=*/0, SQ.getWithInstruction(&I)))) {
|
isKnownNeverNaN(&I, /*Depth=*/0, SQ.getWithInstruction(&I)))) {
|
||||||
|
@ -249,7 +249,7 @@ std::array<Value *, 2> Negator::getSortedOperandsOfBinOp(Instruction *I) {
|
|||||||
unsigned SrcWidth = SrcOp->getType()->getScalarSizeInBits();
|
unsigned SrcWidth = SrcOp->getType()->getScalarSizeInBits();
|
||||||
const APInt &FullShift = APInt(SrcWidth, SrcWidth - 1);
|
const APInt &FullShift = APInt(SrcWidth, SrcWidth - 1);
|
||||||
if (IsTrulyNegation &&
|
if (IsTrulyNegation &&
|
||||||
match(SrcOp, m_LShr(m_Value(X), m_SpecificIntAllowUndef(FullShift)))) {
|
match(SrcOp, m_LShr(m_Value(X), m_SpecificIntAllowPoison(FullShift)))) {
|
||||||
Value *Ashr = Builder.CreateAShr(X, FullShift);
|
Value *Ashr = Builder.CreateAShr(X, FullShift);
|
||||||
return Builder.CreateSExt(Ashr, I->getType());
|
return Builder.CreateSExt(Ashr, I->getType());
|
||||||
}
|
}
|
||||||
|
@ -1621,7 +1621,7 @@ static Instruction *foldSelectZeroOrOnes(ICmpInst *Cmp, Value *TVal,
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
const APInt *CmpC;
|
const APInt *CmpC;
|
||||||
if (!match(Cmp->getOperand(1), m_APIntAllowUndef(CmpC)))
|
if (!match(Cmp->getOperand(1), m_APIntAllowPoison(CmpC)))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// (X u< 2) ? -X : -1 --> sext (X != 0)
|
// (X u< 2) ? -X : -1 --> sext (X != 0)
|
||||||
@ -2489,8 +2489,8 @@ static Instruction *foldSelectToCopysign(SelectInst &Sel,
|
|||||||
// Match select ?, TC, FC where the constants are equal but negated.
|
// Match select ?, TC, FC where the constants are equal but negated.
|
||||||
// TODO: Generalize to handle a negated variable operand?
|
// TODO: Generalize to handle a negated variable operand?
|
||||||
const APFloat *TC, *FC;
|
const APFloat *TC, *FC;
|
||||||
if (!match(TVal, m_APFloatAllowUndef(TC)) ||
|
if (!match(TVal, m_APFloatAllowPoison(TC)) ||
|
||||||
!match(FVal, m_APFloatAllowUndef(FC)) ||
|
!match(FVal, m_APFloatAllowPoison(FC)) ||
|
||||||
!abs(*TC).bitwiseIsEqual(abs(*FC)))
|
!abs(*TC).bitwiseIsEqual(abs(*FC)))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
@ -2939,17 +2939,17 @@ foldRoundUpIntegerWithPow2Alignment(SelectInst &SI,
|
|||||||
// FIXME: we could support non non-splats here.
|
// FIXME: we could support non non-splats here.
|
||||||
|
|
||||||
const APInt *LowBitMaskCst;
|
const APInt *LowBitMaskCst;
|
||||||
if (!match(XLowBits, m_And(m_Specific(X), m_APIntAllowUndef(LowBitMaskCst))))
|
if (!match(XLowBits, m_And(m_Specific(X), m_APIntAllowPoison(LowBitMaskCst))))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
// Match even if the AND and ADD are swapped.
|
// Match even if the AND and ADD are swapped.
|
||||||
const APInt *BiasCst, *HighBitMaskCst;
|
const APInt *BiasCst, *HighBitMaskCst;
|
||||||
if (!match(XBiasedHighBits,
|
if (!match(XBiasedHighBits,
|
||||||
m_And(m_Add(m_Specific(X), m_APIntAllowUndef(BiasCst)),
|
m_And(m_Add(m_Specific(X), m_APIntAllowPoison(BiasCst)),
|
||||||
m_APIntAllowUndef(HighBitMaskCst))) &&
|
m_APIntAllowPoison(HighBitMaskCst))) &&
|
||||||
!match(XBiasedHighBits,
|
!match(XBiasedHighBits,
|
||||||
m_Add(m_And(m_Specific(X), m_APIntAllowUndef(HighBitMaskCst)),
|
m_Add(m_And(m_Specific(X), m_APIntAllowPoison(HighBitMaskCst)),
|
||||||
m_APIntAllowUndef(BiasCst))))
|
m_APIntAllowPoison(BiasCst))))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
|
||||||
if (!LowBitMaskCst->isMask())
|
if (!LowBitMaskCst->isMask())
|
||||||
|
@ -778,7 +778,7 @@ Instruction *InstCombinerImpl::FoldShiftByConstant(Value *Op0, Constant *C1,
|
|||||||
// (X / +DivC) >> (Width - 1) --> ext (X <= -DivC)
|
// (X / +DivC) >> (Width - 1) --> ext (X <= -DivC)
|
||||||
// (X / -DivC) >> (Width - 1) --> ext (X >= +DivC)
|
// (X / -DivC) >> (Width - 1) --> ext (X >= +DivC)
|
||||||
const APInt *DivC;
|
const APInt *DivC;
|
||||||
if (!IsLeftShift && match(C1, m_SpecificIntAllowUndef(TypeBits - 1)) &&
|
if (!IsLeftShift && match(C1, m_SpecificIntAllowPoison(TypeBits - 1)) &&
|
||||||
match(Op0, m_SDiv(m_Value(X), m_APInt(DivC))) && !DivC->isZero() &&
|
match(Op0, m_SDiv(m_Value(X), m_APInt(DivC))) && !DivC->isZero() &&
|
||||||
!DivC->isMinSignedValue()) {
|
!DivC->isMinSignedValue()) {
|
||||||
Constant *NegDivC = ConstantInt::get(Ty, -(*DivC));
|
Constant *NegDivC = ConstantInt::get(Ty, -(*DivC));
|
||||||
@ -1264,7 +1264,7 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
|
|||||||
|
|
||||||
// (iN (~X) u>> (N - 1)) --> zext (X > -1)
|
// (iN (~X) u>> (N - 1)) --> zext (X > -1)
|
||||||
if (match(Op0, m_OneUse(m_Not(m_Value(X)))) &&
|
if (match(Op0, m_OneUse(m_Not(m_Value(X)))) &&
|
||||||
match(Op1, m_SpecificIntAllowUndef(BitWidth - 1)))
|
match(Op1, m_SpecificIntAllowPoison(BitWidth - 1)))
|
||||||
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
|
return new ZExtInst(Builder.CreateIsNotNeg(X, "isnotneg"), Ty);
|
||||||
|
|
||||||
if (match(Op1, m_APInt(C))) {
|
if (match(Op1, m_APInt(C))) {
|
||||||
@ -1666,9 +1666,9 @@ Instruction *InstCombinerImpl::visitAShr(BinaryOperator &I) {
|
|||||||
// as the pattern to splat the lowest bit.
|
// as the pattern to splat the lowest bit.
|
||||||
// FIXME: iff X is already masked, we don't need the one-use check.
|
// FIXME: iff X is already masked, we don't need the one-use check.
|
||||||
Value *X;
|
Value *X;
|
||||||
if (match(Op1, m_SpecificIntAllowUndef(BitWidth - 1)) &&
|
if (match(Op1, m_SpecificIntAllowPoison(BitWidth - 1)) &&
|
||||||
match(Op0, m_OneUse(m_Shl(m_Value(X),
|
match(Op0, m_OneUse(m_Shl(m_Value(X),
|
||||||
m_SpecificIntAllowUndef(BitWidth - 1))))) {
|
m_SpecificIntAllowPoison(BitWidth - 1))))) {
|
||||||
Constant *Mask = ConstantInt::get(Ty, 1);
|
Constant *Mask = ConstantInt::get(Ty, 1);
|
||||||
// Retain the knowledge about the ignored lanes.
|
// Retain the knowledge about the ignored lanes.
|
||||||
Mask = Constant::mergeUndefsWith(
|
Mask = Constant::mergeUndefsWith(
|
||||||
|
@ -2190,7 +2190,7 @@ Instruction *InstCombinerImpl::foldVectorBinop(BinaryOperator &Inst) {
|
|||||||
Value *Y, *OtherOp;
|
Value *Y, *OtherOp;
|
||||||
if (!match(LHS,
|
if (!match(LHS,
|
||||||
m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
|
m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(MaskC)))) ||
|
||||||
!match(MaskC, m_SplatOrUndefMask(SplatIndex)) ||
|
!match(MaskC, m_SplatOrPoisonMask(SplatIndex)) ||
|
||||||
X->getType() != Inst.getType() ||
|
X->getType() != Inst.getType() ||
|
||||||
!match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
|
!match(RHS, m_OneUse(m_BinOp(Opcode, m_Value(Y), m_Value(OtherOp)))))
|
||||||
return nullptr;
|
return nullptr;
|
||||||
@ -3740,7 +3740,7 @@ InstCombinerImpl::foldExtractOfOverflowIntrinsic(ExtractValueInst &EV) {
|
|||||||
|
|
||||||
Intrinsic::ID OvID = WO->getIntrinsicID();
|
Intrinsic::ID OvID = WO->getIntrinsicID();
|
||||||
const APInt *C = nullptr;
|
const APInt *C = nullptr;
|
||||||
if (match(WO->getRHS(), m_APIntAllowUndef(C))) {
|
if (match(WO->getRHS(), m_APIntAllowPoison(C))) {
|
||||||
if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
|
if (*EV.idx_begin() == 0 && (OvID == Intrinsic::smul_with_overflow ||
|
||||||
OvID == Intrinsic::umul_with_overflow)) {
|
OvID == Intrinsic::umul_with_overflow)) {
|
||||||
// extractvalue (any_mul_with_overflow X, -1), 0 --> -X
|
// extractvalue (any_mul_with_overflow X, -1), 0 --> -X
|
||||||
|
@ -65,7 +65,7 @@ struct specific_intval {
|
|||||||
if (!CI && V->getType()->isVectorTy())
|
if (!CI && V->getType()->isVectorTy())
|
||||||
if (const auto *C = dyn_cast<Constant>(V))
|
if (const auto *C = dyn_cast<Constant>(V))
|
||||||
CI = dyn_cast_or_null<ConstantInt>(
|
CI = dyn_cast_or_null<ConstantInt>(
|
||||||
C->getSplatValue(/*UndefsAllowed=*/false));
|
C->getSplatValue(/*AllowPoison=*/false));
|
||||||
|
|
||||||
return CI && APInt::isSameValue(CI->getValue(), Val);
|
return CI && APInt::isSameValue(CI->getValue(), Val);
|
||||||
}
|
}
|
||||||
|
@ -223,30 +223,30 @@ define <2 x i1> @ne_vector_unequal2(<2 x i8> %x, <2 x i8> %y) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
; Tests with undef
|
; Tests with poison
|
||||||
; ==============================================================================
|
; ==============================================================================
|
||||||
define <2 x i1> @eq_vector_undef_icmp(<2 x i8> %x, <2 x i8> %y) {
|
define <2 x i1> @eq_vector_poison_icmp(<2 x i8> %x, <2 x i8> %y) {
|
||||||
; CHECK-LABEL: define <2 x i1> @eq_vector_undef_icmp
|
; CHECK-LABEL: define <2 x i1> @eq_vector_poison_icmp
|
||||||
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
|
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
|
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
|
||||||
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
|
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
|
||||||
; CHECK-NEXT: ret <2 x i1> [[OR]]
|
; CHECK-NEXT: ret <2 x i1> [[OR]]
|
||||||
;
|
;
|
||||||
%sub = add <2 x i8> %x, <i8 -5, i8 -5>
|
%sub = add <2 x i8> %x, <i8 -5, i8 -5>
|
||||||
%c1 = icmp eq <2 x i8> %x, <i8 5, i8 undef>
|
%c1 = icmp eq <2 x i8> %x, <i8 5, i8 poison>
|
||||||
%c2 = icmp ugt <2 x i8> %sub, %y
|
%c2 = icmp ugt <2 x i8> %sub, %y
|
||||||
%or = or <2 x i1> %c1, %c2
|
%or = or <2 x i1> %c1, %c2
|
||||||
ret <2 x i1> %or
|
ret <2 x i1> %or
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @eq_vector_undef_add(<2 x i8> %x, <2 x i8> %y) {
|
define <2 x i1> @eq_vector_poison_add(<2 x i8> %x, <2 x i8> %y) {
|
||||||
; CHECK-LABEL: define <2 x i1> @eq_vector_undef_add
|
; CHECK-LABEL: define <2 x i1> @eq_vector_poison_add
|
||||||
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
|
; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]]) {
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
|
; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X]], <i8 -6, i8 -6>
|
||||||
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
|
; CHECK-NEXT: [[OR:%.*]] = icmp uge <2 x i8> [[TMP1]], [[Y]]
|
||||||
; CHECK-NEXT: ret <2 x i1> [[OR]]
|
; CHECK-NEXT: ret <2 x i1> [[OR]]
|
||||||
;
|
;
|
||||||
%sub = add <2 x i8> %x, <i8 -5, i8 undef>
|
%sub = add <2 x i8> %x, <i8 -5, i8 poison>
|
||||||
%c1 = icmp eq <2 x i8> %x, <i8 5, i8 5>
|
%c1 = icmp eq <2 x i8> %x, <i8 5, i8 5>
|
||||||
%c2 = icmp ugt <2 x i8> %sub, %y
|
%c2 = icmp ugt <2 x i8> %sub, %y
|
||||||
%or = or <2 x i1> %c1, %c2
|
%or = or <2 x i1> %c1, %c2
|
||||||
|
@ -1063,8 +1063,8 @@ define float @negzero_check_on_constant_for_si_fmul(i1 %c, i1 %.b, ptr %g_2345)
|
|||||||
ret float %mul3.i.i
|
ret float %mul3.i.i
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
|
define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_poison(i1 %c, i1 %.b, ptr %g_2345) {
|
||||||
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_vec_w_undef(
|
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_vec_w_poison(
|
||||||
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
||||||
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
||||||
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
||||||
@ -1079,34 +1079,33 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_vec_w_undef(i1 %c, i1
|
|||||||
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
||||||
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
||||||
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
||||||
%mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float 0.000000e+00>
|
%mul3.i.i = fmul <2 x float> %conv1.i, <float poison, float 0.000000e+00>
|
||||||
store i32 %sel, ptr %g_2345, align 4
|
store i32 %sel, ptr %g_2345, align 4
|
||||||
ret <2 x float> %mul3.i.i
|
ret <2 x float> %mul3.i.i
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
|
define <2 x float> @nonzero_check_on_constant_for_si_fmul_nz_vec_w_poison(i1 %c, i1 %.b, ptr %g_2345) {
|
||||||
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_nz_vec_w_undef(
|
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_nz_vec_w_poison(
|
||||||
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
||||||
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
||||||
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
||||||
; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
|
; CHECK-NEXT: [[CONV_I:%.*]] = shufflevector <2 x i16> [[CONV_I_V]], <2 x i16> poison, <2 x i32> zeroinitializer
|
||||||
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
|
; CHECK-NEXT: [[CONV1_I:%.*]] = sitofp <2 x i16> [[CONV_I]] to <2 x float>
|
||||||
; CHECK-NEXT: [[MUL3_I_I:%.*]] = fmul <2 x float> [[CONV1_I]], <float undef, float 1.000000e+00>
|
|
||||||
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
|
; CHECK-NEXT: store i32 [[SEL]], ptr [[G_2345:%.*]], align 4
|
||||||
; CHECK-NEXT: ret <2 x float> [[MUL3_I_I]]
|
; CHECK-NEXT: ret <2 x float> [[CONV1_I]]
|
||||||
;
|
;
|
||||||
%sel = select i1 %c, i32 65529, i32 53264
|
%sel = select i1 %c, i32 65529, i32 53264
|
||||||
%conv.i.s = trunc i32 %sel to i16
|
%conv.i.s = trunc i32 %sel to i16
|
||||||
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
||||||
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
||||||
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
||||||
%mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float 1.000000e+00>
|
%mul3.i.i = fmul <2 x float> %conv1.i, <float poison, float 1.000000e+00>
|
||||||
store i32 %sel, ptr %g_2345, align 4
|
store i32 %sel, ptr %g_2345, align 4
|
||||||
ret <2 x float> %mul3.i.i
|
ret <2 x float> %mul3.i.i
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(i1 %c, i1 %.b, ptr %g_2345) {
|
define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_poison(i1 %c, i1 %.b, ptr %g_2345) {
|
||||||
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(
|
; CHECK-LABEL: @nonzero_check_on_constant_for_si_fmul_negz_vec_w_poison(
|
||||||
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
; CHECK-NEXT: [[SEL:%.*]] = select i1 [[C:%.*]], i32 65529, i32 53264
|
||||||
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
; CHECK-NEXT: [[CONV_I_S:%.*]] = trunc nuw i32 [[SEL]] to i16
|
||||||
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
; CHECK-NEXT: [[CONV_I_V:%.*]] = insertelement <2 x i16> poison, i16 [[CONV_I_S]], i64 0
|
||||||
@ -1122,7 +1121,7 @@ define <2 x float> @nonzero_check_on_constant_for_si_fmul_negz_vec_w_undef(i1 %c
|
|||||||
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
%conv.i.v = insertelement <2 x i16> poison, i16 %conv.i.s, i64 0
|
||||||
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
%conv.i = insertelement <2 x i16> %conv.i.v, i16 %conv.i.s, i64 1
|
||||||
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
%conv1.i = sitofp <2 x i16> %conv.i to <2 x float>
|
||||||
%mul3.i.i = fmul <2 x float> %conv1.i, <float undef, float -0.000000e+00>
|
%mul3.i.i = fmul <2 x float> %conv1.i, <float poison, float -0.000000e+00>
|
||||||
store i32 %sel, ptr %g_2345, align 4
|
store i32 %sel, ptr %g_2345, align 4
|
||||||
ret <2 x float> %mul3.i.i
|
ret <2 x float> %mul3.i.i
|
||||||
}
|
}
|
||||||
|
@ -870,13 +870,13 @@ define <2 x i64> @bs_active_high_different_negative(<2 x i64> %0) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
; TODO: This should fold to 'and'.
|
; TODO: This should fold to 'and'.
|
||||||
define <2 x i64> @bs_active_high_undef(<2 x i64> %0) {
|
define <2 x i64> @bs_active_high_poison(<2 x i64> %0) {
|
||||||
; CHECK-LABEL: @bs_active_high_undef(
|
; CHECK-LABEL: @bs_active_high_poison(
|
||||||
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP0:%.*]])
|
; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[TMP0:%.*]])
|
||||||
; CHECK-NEXT: [[TMP3:%.*]] = lshr <2 x i64> [[TMP2]], <i64 56, i64 undef>
|
; CHECK-NEXT: [[TMP3:%.*]] = lshr <2 x i64> [[TMP2]], <i64 56, i64 poison>
|
||||||
; CHECK-NEXT: ret <2 x i64> [[TMP3]]
|
; CHECK-NEXT: ret <2 x i64> [[TMP3]]
|
||||||
;
|
;
|
||||||
%2 = shl <2 x i64> %0, <i64 56, i64 undef>
|
%2 = shl <2 x i64> %0, <i64 56, i64 poison>
|
||||||
%3 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %2)
|
%3 = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %2)
|
||||||
ret <2 x i64> %3
|
ret <2 x i64> %3
|
||||||
}
|
}
|
||||||
|
@ -107,27 +107,27 @@ define <2 x i32> @test2_vector(<2 x i32> %arg) {
|
|||||||
ret <2 x i32> %t14
|
ret <2 x i32> %t14
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i32> @test2_vector_undef(<2 x i32> %arg) {
|
define <2 x i32> @test2_vector_poison(<2 x i32> %arg) {
|
||||||
; CHECK-LABEL: @test2_vector_undef(
|
; CHECK-LABEL: @test2_vector_poison(
|
||||||
; CHECK-NEXT: [[T2:%.*]] = shl <2 x i32> [[ARG:%.*]], <i32 24, i32 undef>
|
; CHECK-NEXT: [[T2:%.*]] = shl <2 x i32> [[ARG:%.*]], <i32 24, i32 poison>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = shl <2 x i32> [[ARG]], <i32 8, i32 8>
|
; CHECK-NEXT: [[T4:%.*]] = shl <2 x i32> [[ARG]], <i32 8, i32 8>
|
||||||
; CHECK-NEXT: [[T5:%.*]] = and <2 x i32> [[T4]], <i32 16711680, i32 undef>
|
; CHECK-NEXT: [[T5:%.*]] = and <2 x i32> [[T4]], <i32 16711680, i32 poison>
|
||||||
; CHECK-NEXT: [[T6:%.*]] = or <2 x i32> [[T2]], [[T5]]
|
; CHECK-NEXT: [[T6:%.*]] = or disjoint <2 x i32> [[T2]], [[T5]]
|
||||||
; CHECK-NEXT: [[T8:%.*]] = lshr <2 x i32> [[ARG]], <i32 8, i32 8>
|
; CHECK-NEXT: [[T8:%.*]] = lshr <2 x i32> [[ARG]], <i32 8, i32 8>
|
||||||
; CHECK-NEXT: [[T9:%.*]] = and <2 x i32> [[T8]], <i32 65280, i32 undef>
|
; CHECK-NEXT: [[T9:%.*]] = and <2 x i32> [[T8]], <i32 65280, i32 poison>
|
||||||
; CHECK-NEXT: [[T10:%.*]] = or <2 x i32> [[T6]], [[T9]]
|
; CHECK-NEXT: [[T10:%.*]] = or disjoint <2 x i32> [[T6]], [[T9]]
|
||||||
; CHECK-NEXT: [[T12:%.*]] = lshr <2 x i32> [[ARG]], <i32 24, i32 undef>
|
; CHECK-NEXT: [[T12:%.*]] = lshr <2 x i32> [[ARG]], <i32 24, i32 poison>
|
||||||
; CHECK-NEXT: [[T14:%.*]] = or <2 x i32> [[T10]], [[T12]]
|
; CHECK-NEXT: [[T14:%.*]] = or disjoint <2 x i32> [[T10]], [[T12]]
|
||||||
; CHECK-NEXT: ret <2 x i32> [[T14]]
|
; CHECK-NEXT: ret <2 x i32> [[T14]]
|
||||||
;
|
;
|
||||||
%t2 = shl <2 x i32> %arg, <i32 24, i32 undef>
|
%t2 = shl <2 x i32> %arg, <i32 24, i32 poison>
|
||||||
%t4 = shl <2 x i32> %arg, <i32 8, i32 8>
|
%t4 = shl <2 x i32> %arg, <i32 8, i32 8>
|
||||||
%t5 = and <2 x i32> %t4, <i32 16711680, i32 undef>
|
%t5 = and <2 x i32> %t4, <i32 16711680, i32 poison>
|
||||||
%t6 = or <2 x i32> %t2, %t5
|
%t6 = or <2 x i32> %t2, %t5
|
||||||
%t8 = lshr <2 x i32> %arg, <i32 8, i32 8>
|
%t8 = lshr <2 x i32> %arg, <i32 8, i32 8>
|
||||||
%t9 = and <2 x i32> %t8, <i32 65280, i32 undef>
|
%t9 = and <2 x i32> %t8, <i32 65280, i32 poison>
|
||||||
%t10 = or <2 x i32> %t6, %t9
|
%t10 = or <2 x i32> %t6, %t9
|
||||||
%t12 = lshr <2 x i32> %arg, <i32 24, i32 undef>
|
%t12 = lshr <2 x i32> %arg, <i32 24, i32 poison>
|
||||||
%t14 = or <2 x i32> %t10, %t12
|
%t14 = or <2 x i32> %t10, %t12
|
||||||
ret <2 x i32> %t14
|
ret <2 x i32> %t14
|
||||||
}
|
}
|
||||||
@ -154,13 +154,13 @@ define <2 x i16> @test3_vector(<2 x i16> %s) {
|
|||||||
ret <2 x i16> %t5
|
ret <2 x i16> %t5
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i16> @test3_vector_undef(<2 x i16> %s) {
|
define <2 x i16> @test3_vector_poison(<2 x i16> %s) {
|
||||||
; CHECK-LABEL: @test3_vector_undef(
|
; CHECK-LABEL: @test3_vector_poison(
|
||||||
; CHECK-NEXT: [[T5:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[S:%.*]])
|
; CHECK-NEXT: [[T5:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[S:%.*]])
|
||||||
; CHECK-NEXT: ret <2 x i16> [[T5]]
|
; CHECK-NEXT: ret <2 x i16> [[T5]]
|
||||||
;
|
;
|
||||||
%t2 = lshr <2 x i16> %s, <i16 undef, i16 8>
|
%t2 = lshr <2 x i16> %s, <i16 poison, i16 8>
|
||||||
%t4 = shl <2 x i16> %s, <i16 8, i16 undef>
|
%t4 = shl <2 x i16> %s, <i16 8, i16 poison>
|
||||||
%t5 = or <2 x i16> %t2, %t4
|
%t5 = or <2 x i16> %t2, %t4
|
||||||
ret <2 x i16> %t5
|
ret <2 x i16> %t5
|
||||||
}
|
}
|
||||||
@ -657,7 +657,7 @@ define i32 @shuf_4bytes(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CAST:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
|
; CHECK-NEXT: [[CAST:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
|
||||||
; CHECK-NEXT: ret i32 [[CAST]]
|
; CHECK-NEXT: ret i32 [[CAST]]
|
||||||
;
|
;
|
||||||
%bswap = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
%bswap = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
||||||
%cast = bitcast <4 x i8> %bswap to i32
|
%cast = bitcast <4 x i8> %bswap to i32
|
||||||
ret i32 %cast
|
ret i32 %cast
|
||||||
}
|
}
|
||||||
@ -669,7 +669,7 @@ define i32 @shuf_load_4bytes(ptr %p) {
|
|||||||
; CHECK-NEXT: ret i32 [[CAST]]
|
; CHECK-NEXT: ret i32 [[CAST]]
|
||||||
;
|
;
|
||||||
%x = load <4 x i8>, ptr %p
|
%x = load <4 x i8>, ptr %p
|
||||||
%bswap = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 undef, i32 0>
|
%bswap = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 poison, i32 0>
|
||||||
%cast = bitcast <4 x i8> %bswap to i32
|
%cast = bitcast <4 x i8> %bswap to i32
|
||||||
ret i32 %cast
|
ret i32 %cast
|
||||||
}
|
}
|
||||||
@ -680,7 +680,7 @@ define i32 @shuf_bitcast_twice_4bytes(i32 %x) {
|
|||||||
; CHECK-NEXT: ret i32 [[CAST2]]
|
; CHECK-NEXT: ret i32 [[CAST2]]
|
||||||
;
|
;
|
||||||
%cast1 = bitcast i32 %x to <4 x i8>
|
%cast1 = bitcast i32 %x to <4 x i8>
|
||||||
%bswap = shufflevector <4 x i8> %cast1, <4 x i8> undef, <4 x i32> <i32 undef, i32 2, i32 1, i32 0>
|
%bswap = shufflevector <4 x i8> %cast1, <4 x i8> poison, <4 x i32> <i32 poison, i32 2, i32 1, i32 0>
|
||||||
%cast2 = bitcast <4 x i8> %bswap to i32
|
%cast2 = bitcast <4 x i8> %bswap to i32
|
||||||
ret i32 %cast2
|
ret i32 %cast2
|
||||||
}
|
}
|
||||||
@ -695,7 +695,7 @@ define i32 @shuf_4bytes_extra_use(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i8> [[BSWAP]] to i32
|
; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i8> [[BSWAP]] to i32
|
||||||
; CHECK-NEXT: ret i32 [[CAST]]
|
; CHECK-NEXT: ret i32 [[CAST]]
|
||||||
;
|
;
|
||||||
%bswap = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
%bswap = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
|
||||||
call void @use(<4 x i8> %bswap)
|
call void @use(<4 x i8> %bswap)
|
||||||
%cast = bitcast <4 x i8> %bswap to i32
|
%cast = bitcast <4 x i8> %bswap to i32
|
||||||
ret i32 %cast
|
ret i32 %cast
|
||||||
@ -709,7 +709,7 @@ define i128 @shuf_16bytes(<16 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CAST:%.*]] = bitcast <16 x i8> [[BSWAP]] to i128
|
; CHECK-NEXT: [[CAST:%.*]] = bitcast <16 x i8> [[BSWAP]] to i128
|
||||||
; CHECK-NEXT: ret i128 [[CAST]]
|
; CHECK-NEXT: ret i128 [[CAST]]
|
||||||
;
|
;
|
||||||
%bswap = shufflevector <16 x i8> %x, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
%bswap = shufflevector <16 x i8> %x, <16 x i8> poison, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
|
||||||
%cast = bitcast <16 x i8> %bswap to i128
|
%cast = bitcast <16 x i8> %bswap to i128
|
||||||
ret i128 %cast
|
ret i128 %cast
|
||||||
}
|
}
|
||||||
@ -722,7 +722,7 @@ define i32 @shuf_2bytes_widening(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i8> [[BSWAP]] to i32
|
; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i8> [[BSWAP]] to i32
|
||||||
; CHECK-NEXT: ret i32 [[CAST]]
|
; CHECK-NEXT: ret i32 [[CAST]]
|
||||||
;
|
;
|
||||||
%bswap = shufflevector <2 x i8> %x, <2 x i8> undef, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
|
%bswap = shufflevector <2 x i8> %x, <2 x i8> poison, <4 x i32> <i32 1, i32 0, i32 poison, i32 poison>
|
||||||
%cast = bitcast <4 x i8> %bswap to i32
|
%cast = bitcast <4 x i8> %bswap to i32
|
||||||
ret i32 %cast
|
ret i32 %cast
|
||||||
}
|
}
|
||||||
|
@ -61,29 +61,29 @@ define <2 x i32> @test3vec(<2 x i32> %a, <2 x i32> %b) nounwind readnone {
|
|||||||
ret <2 x i32> %t3
|
ret <2 x i32> %t3
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i32> @test3vec_undef1(<2 x i32> %a, <2 x i32> %b) nounwind readnone {
|
define <2 x i32> @test3vec_poison1(<2 x i32> %a, <2 x i32> %b) nounwind readnone {
|
||||||
; CHECK-LABEL: @test3vec_undef1(
|
; CHECK-LABEL: @test3vec_poison1(
|
||||||
; CHECK-NEXT: [[T2_UNSHIFTED:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
|
; CHECK-NEXT: [[T2_UNSHIFTED:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
|
||||||
; CHECK-NEXT: [[T2:%.*]] = icmp ult <2 x i32> [[T2_UNSHIFTED]], <i32 16777216, i32 16777216>
|
; CHECK-NEXT: [[T2:%.*]] = icmp ult <2 x i32> [[T2_UNSHIFTED]], <i32 16777216, i32 16777216>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = zext <2 x i1> [[T2]] to <2 x i32>
|
; CHECK-NEXT: [[T3:%.*]] = zext <2 x i1> [[T2]] to <2 x i32>
|
||||||
; CHECK-NEXT: ret <2 x i32> [[T3]]
|
; CHECK-NEXT: ret <2 x i32> [[T3]]
|
||||||
;
|
;
|
||||||
%t0 = lshr <2 x i32> %a, <i32 24, i32 undef>
|
%t0 = lshr <2 x i32> %a, <i32 24, i32 poison>
|
||||||
%t1 = lshr <2 x i32> %b, <i32 24, i32 24>
|
%t1 = lshr <2 x i32> %b, <i32 24, i32 24>
|
||||||
%t2 = icmp eq <2 x i32> %t0, %t1
|
%t2 = icmp eq <2 x i32> %t0, %t1
|
||||||
%t3 = zext <2 x i1> %t2 to <2 x i32>
|
%t3 = zext <2 x i1> %t2 to <2 x i32>
|
||||||
ret <2 x i32> %t3
|
ret <2 x i32> %t3
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i32> @test3vec_undef2(<2 x i32> %a, <2 x i32> %b) nounwind readnone {
|
define <2 x i32> @test3vec_poison2(<2 x i32> %a, <2 x i32> %b) nounwind readnone {
|
||||||
; CHECK-LABEL: @test3vec_undef2(
|
; CHECK-LABEL: @test3vec_poison2(
|
||||||
; CHECK-NEXT: [[T2_UNSHIFTED:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
|
; CHECK-NEXT: [[T2_UNSHIFTED:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
|
||||||
; CHECK-NEXT: [[T2:%.*]] = icmp ult <2 x i32> [[T2_UNSHIFTED]], <i32 131072, i32 131072>
|
; CHECK-NEXT: [[T2:%.*]] = icmp ult <2 x i32> [[T2_UNSHIFTED]], <i32 131072, i32 131072>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = zext <2 x i1> [[T2]] to <2 x i32>
|
; CHECK-NEXT: [[T3:%.*]] = zext <2 x i1> [[T2]] to <2 x i32>
|
||||||
; CHECK-NEXT: ret <2 x i32> [[T3]]
|
; CHECK-NEXT: ret <2 x i32> [[T3]]
|
||||||
;
|
;
|
||||||
%t0 = lshr <2 x i32> %a, <i32 undef, i32 17>
|
%t0 = lshr <2 x i32> %a, <i32 poison, i32 17>
|
||||||
%t1 = lshr <2 x i32> %b, <i32 undef, i32 17>
|
%t1 = lshr <2 x i32> %b, <i32 poison, i32 17>
|
||||||
%t2 = icmp eq <2 x i32> %t0, %t1
|
%t2 = icmp eq <2 x i32> %t0, %t1
|
||||||
%t3 = zext <2 x i1> %t2 to <2 x i32>
|
%t3 = zext <2 x i1> %t2 to <2 x i32>
|
||||||
ret <2 x i32> %t3
|
ret <2 x i32> %t3
|
||||||
|
@ -182,15 +182,15 @@ define i1 @test_and_olt_logical(float %x) {
|
|||||||
ret i1 %cond
|
ret i1 %cond
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @test_and_olt_undef(<2 x float> %x) {
|
define <2 x i1> @test_and_olt_poison(<2 x float> %x) {
|
||||||
; CHECK-LABEL: define <2 x i1> @test_and_olt_undef(
|
; CHECK-LABEL: define <2 x i1> @test_and_olt_poison(
|
||||||
; CHECK-SAME: <2 x float> [[X:%.*]]) {
|
; CHECK-SAME: <2 x float> [[X:%.*]]) {
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[X]])
|
; CHECK-NEXT: [[TMP1:%.*]] = call <2 x float> @llvm.fabs.v2f32(<2 x float> [[X]])
|
||||||
; CHECK-NEXT: [[COND:%.*]] = fcmp olt <2 x float> [[TMP1]], <float 0x3C00000000000000, float 0x3C00000000000000>
|
; CHECK-NEXT: [[COND:%.*]] = fcmp olt <2 x float> [[TMP1]], <float 0x3C00000000000000, float 0x3C00000000000000>
|
||||||
; CHECK-NEXT: ret <2 x i1> [[COND]]
|
; CHECK-NEXT: ret <2 x i1> [[COND]]
|
||||||
;
|
;
|
||||||
%cmp1 = fcmp olt <2 x float> %x, <float 0x3C00000000000000, float undef>
|
%cmp1 = fcmp olt <2 x float> %x, <float 0x3C00000000000000, float poison>
|
||||||
%cmp2 = fcmp ogt <2 x float> %x, <float 0xBC00000000000000, float undef>
|
%cmp2 = fcmp ogt <2 x float> %x, <float 0xBC00000000000000, float poison>
|
||||||
%cond = and <2 x i1> %cmp1, %cmp2
|
%cond = and <2 x i1> %cmp1, %cmp2
|
||||||
ret <2 x i1> %cond
|
ret <2 x i1> %cond
|
||||||
}
|
}
|
||||||
|
@ -53,49 +53,49 @@ define <2 x i1> @rotl_ne_n1(<2 x i5> %x, <2 x i5> %y) {
|
|||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @rotl_ne_n1_undef(<2 x i5> %x, <2 x i5> %y) {
|
define <2 x i1> @rotl_ne_n1_poison(<2 x i5> %x, <2 x i5> %y) {
|
||||||
; CHECK-LABEL: @rotl_ne_n1_undef(
|
; CHECK-LABEL: @rotl_ne_n1_poison(
|
||||||
; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i5> [[X:%.*]], <i5 -1, i5 undef>
|
; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i5> [[X:%.*]], <i5 -1, i5 poison>
|
||||||
; CHECK-NEXT: ret <2 x i1> [[R]]
|
; CHECK-NEXT: ret <2 x i1> [[R]]
|
||||||
;
|
;
|
||||||
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
||||||
%r = icmp ne <2 x i5> %rot, <i5 -1, i5 undef>
|
%r = icmp ne <2 x i5> %rot, <i5 -1, i5 poison>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @rotl_eq_0_undef(<2 x i5> %x, <2 x i5> %y) {
|
define <2 x i1> @rotl_eq_0_poison(<2 x i5> %x, <2 x i5> %y) {
|
||||||
; CHECK-LABEL: @rotl_eq_0_undef(
|
; CHECK-LABEL: @rotl_eq_0_poison(
|
||||||
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[X:%.*]], <i5 0, i5 undef>
|
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[X:%.*]], <i5 0, i5 poison>
|
||||||
; CHECK-NEXT: ret <2 x i1> [[R]]
|
; CHECK-NEXT: ret <2 x i1> [[R]]
|
||||||
;
|
;
|
||||||
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
||||||
%r = icmp eq <2 x i5> %rot, <i5 0, i5 undef>
|
%r = icmp eq <2 x i5> %rot, <i5 0, i5 poison>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
; negative test - wrong constant value
|
; negative test - wrong constant value
|
||||||
|
|
||||||
define <2 x i1> @rotl_eq_1_undef(<2 x i5> %x, <2 x i5> %y) {
|
define <2 x i1> @rotl_eq_1_poison(<2 x i5> %x, <2 x i5> %y) {
|
||||||
; CHECK-LABEL: @rotl_eq_1_undef(
|
; CHECK-LABEL: @rotl_eq_1_poison(
|
||||||
; CHECK-NEXT: [[ROT:%.*]] = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> [[X:%.*]], <2 x i5> [[X]], <2 x i5> [[Y:%.*]])
|
; CHECK-NEXT: [[ROT:%.*]] = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> [[X:%.*]], <2 x i5> [[X]], <2 x i5> [[Y:%.*]])
|
||||||
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[ROT]], <i5 undef, i5 1>
|
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i5> [[ROT]], <i5 poison, i5 1>
|
||||||
; CHECK-NEXT: ret <2 x i1> [[R]]
|
; CHECK-NEXT: ret <2 x i1> [[R]]
|
||||||
;
|
;
|
||||||
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
||||||
%r = icmp eq <2 x i5> %rot, <i5 undef, i5 1>
|
%r = icmp eq <2 x i5> %rot, <i5 poison, i5 1>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
; negative test - wrong predicate
|
; negative test - wrong predicate
|
||||||
|
|
||||||
define <2 x i1> @rotl_sgt_0_undef(<2 x i5> %x, <2 x i5> %y) {
|
define <2 x i1> @rotl_sgt_0_poison(<2 x i5> %x, <2 x i5> %y) {
|
||||||
; CHECK-LABEL: @rotl_sgt_0_undef(
|
; CHECK-LABEL: @rotl_sgt_0_poison(
|
||||||
; CHECK-NEXT: [[ROT:%.*]] = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> [[X:%.*]], <2 x i5> [[X]], <2 x i5> [[Y:%.*]])
|
; CHECK-NEXT: [[ROT:%.*]] = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5> [[X:%.*]], <2 x i5> [[X]], <2 x i5> [[Y:%.*]])
|
||||||
; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i5> [[ROT]], <i5 0, i5 undef>
|
; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i5> [[ROT]], <i5 0, i5 poison>
|
||||||
; CHECK-NEXT: ret <2 x i1> [[R]]
|
; CHECK-NEXT: ret <2 x i1> [[R]]
|
||||||
;
|
;
|
||||||
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
%rot = tail call <2 x i5> @llvm.fshl.v2i5(<2 x i5>%x, <2 x i5> %x, <2 x i5> %y)
|
||||||
%r = icmp sgt <2 x i5> %rot, <i5 0, i5 undef>
|
%r = icmp sgt <2 x i5> %rot, <i5 0, i5 poison>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,7 +448,10 @@ define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_swapped_splat_poison_2
|
|||||||
; Vector of 2 reduction with splat containing undef
|
; Vector of 2 reduction with splat containing undef
|
||||||
define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_splat_undef_2147483648_1610612736_2147483647(<2 x i32> %x) {
|
define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_splat_undef_2147483648_1610612736_2147483647(<2 x i32> %x) {
|
||||||
; CHECK-LABEL: @icmp_power2_and_icmp_shifted_mask_vector_splat_undef_2147483648_1610612736_2147483647(
|
; CHECK-LABEL: @icmp_power2_and_icmp_shifted_mask_vector_splat_undef_2147483648_1610612736_2147483647(
|
||||||
; CHECK-NEXT: [[T4:%.*]] = icmp ult <2 x i32> [[X:%.*]], <i32 1610612736, i32 2147483647>
|
; CHECK-NEXT: [[T1:%.*]] = icmp ult <2 x i32> [[X:%.*]], <i32 -2147483648, i32 undef>
|
||||||
|
; CHECK-NEXT: [[T2:%.*]] = and <2 x i32> [[X]], <i32 1610612736, i32 2147483647>
|
||||||
|
; CHECK-NEXT: [[T3:%.*]] = icmp ne <2 x i32> [[T2]], <i32 1610612736, i32 2147483647>
|
||||||
|
; CHECK-NEXT: [[T4:%.*]] = and <2 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <2 x i1> [[T4]]
|
; CHECK-NEXT: ret <2 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp ult <2 x i32> %x, <i32 2147483648, i32 undef>
|
%t1 = icmp ult <2 x i32> %x, <i32 2147483648, i32 undef>
|
||||||
@ -460,7 +463,10 @@ define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_splat_undef_2147483648
|
|||||||
|
|
||||||
define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_swapped_splat_undef_2147483648_1610612736_2147483647(<2 x i32> %x) {
|
define <2 x i1> @icmp_power2_and_icmp_shifted_mask_vector_swapped_splat_undef_2147483648_1610612736_2147483647(<2 x i32> %x) {
|
||||||
; CHECK-LABEL: @icmp_power2_and_icmp_shifted_mask_vector_swapped_splat_undef_2147483648_1610612736_2147483647(
|
; CHECK-LABEL: @icmp_power2_and_icmp_shifted_mask_vector_swapped_splat_undef_2147483648_1610612736_2147483647(
|
||||||
; CHECK-NEXT: [[T4:%.*]] = icmp ult <2 x i32> [[X:%.*]], <i32 1610612736, i32 2147483647>
|
; CHECK-NEXT: [[T1:%.*]] = icmp ult <2 x i32> [[X:%.*]], <i32 -2147483648, i32 undef>
|
||||||
|
; CHECK-NEXT: [[T2:%.*]] = and <2 x i32> [[X]], <i32 1610612736, i32 2147483647>
|
||||||
|
; CHECK-NEXT: [[T3:%.*]] = icmp ne <2 x i32> [[T2]], <i32 1610612736, i32 2147483647>
|
||||||
|
; CHECK-NEXT: [[T4:%.*]] = and <2 x i1> [[T3]], [[T1]]
|
||||||
; CHECK-NEXT: ret <2 x i1> [[T4]]
|
; CHECK-NEXT: ret <2 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp ult <2 x i32> %x, <i32 2147483648, i32 undef>
|
%t1 = icmp ult <2 x i32> %x, <i32 2147483648, i32 undef>
|
||||||
|
@ -184,7 +184,7 @@ define <3 x i1> @PR27756_2(<3 x i8> %a) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <3 x i8> [[A:%.*]], <i8 43, i8 43, i8 1>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <3 x i8> [[A:%.*]], <i8 43, i8 43, i8 1>
|
||||||
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%cmp = icmp sle <3 x i8> %a, <i8 42, i8 undef, i8 0>
|
%cmp = icmp sle <3 x i8> %a, <i8 42, i8 poison, i8 0>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -193,7 +193,7 @@ define <3 x i1> @PR27756_3(<3 x i8> %a) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <3 x i8> [[A:%.*]], <i8 0, i8 0, i8 41>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <3 x i8> [[A:%.*]], <i8 0, i8 0, i8 41>
|
||||||
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%cmp = icmp sge <3 x i8> %a, <i8 undef, i8 1, i8 42>
|
%cmp = icmp sge <3 x i8> %a, <i8 poison, i8 1, i8 42>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,14 +300,14 @@ define <4 x i1> @splat_icmp(<4 x i8> %x) {
|
|||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i1> @splat_icmp_undef(<4 x i8> %x) {
|
define <4 x i1> @splat_icmp_poison(<4 x i8> %x) {
|
||||||
; CHECK-LABEL: @splat_icmp_undef(
|
; CHECK-LABEL: @splat_icmp_poison(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i8> [[X:%.*]], <i8 42, i8 42, i8 42, i8 42>
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i8> [[X:%.*]], <i8 42, i8 42, i8 42, i8 42>
|
||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 2, i32 undef, i32 undef, i32 2>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 2, i32 poison, i32 poison, i32 2>
|
||||||
%cmp = icmp ult <4 x i8> %splatx, <i8 undef, i8 42, i8 undef, i8 42>
|
%cmp = icmp ult <4 x i8> %splatx, <i8 poison, i8 42, i8 poison, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,8 +317,8 @@ define <4 x i1> @splat_icmp_larger_size(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <2 x i1> [[TMP1]], <2 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <2 x i1> [[TMP1]], <2 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <2 x i8> %x, <2 x i8> poison, <4 x i32> <i32 1, i32 undef, i32 1, i32 undef>
|
%splatx = shufflevector <2 x i8> %x, <2 x i8> poison, <4 x i32> <i32 1, i32 poison, i32 1, i32 poison>
|
||||||
%cmp = icmp eq <4 x i8> %splatx, <i8 42, i8 42, i8 undef, i8 42>
|
%cmp = icmp eq <4 x i8> %splatx, <i8 42, i8 42, i8 poison, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,8 +328,8 @@ define <4 x i1> @splat_fcmp_smaller_size(<5 x float> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <5 x i1> [[TMP1]], <5 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <5 x i1> [[TMP1]], <5 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <5 x float> %x, <5 x float> poison, <4 x i32> <i32 1, i32 undef, i32 1, i32 undef>
|
%splatx = shufflevector <5 x float> %x, <5 x float> poison, <4 x i32> <i32 1, i32 poison, i32 1, i32 poison>
|
||||||
%cmp = fcmp oeq <4 x float> %splatx, <float 42.0, float 42.0, float undef, float 42.0>
|
%cmp = fcmp oeq <4 x float> %splatx, <float 42.0, float 42.0, float poison, float 42.0>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ define <3 x i1> @PR27756_2(<3 x i8> %a) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <3 x i8> [[A:%.*]], <i8 43, i8 43, i8 1>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp slt <3 x i8> [[A:%.*]], <i8 43, i8 43, i8 1>
|
||||||
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%cmp = icmp sle <3 x i8> %a, <i8 42, i8 undef, i8 0>
|
%cmp = icmp sle <3 x i8> %a, <i8 42, i8 poison, i8 0>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +195,7 @@ define <3 x i1> @PR27756_3(<3 x i8> %a) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <3 x i8> [[A:%.*]], <i8 0, i8 0, i8 41>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <3 x i8> [[A:%.*]], <i8 0, i8 0, i8 41>
|
||||||
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
; CHECK-NEXT: ret <3 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%cmp = icmp sge <3 x i8> %a, <i8 undef, i8 1, i8 42>
|
%cmp = icmp sge <3 x i8> %a, <i8 poison, i8 1, i8 42>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,8 +218,8 @@ define <4 x i1> @same_shuffle_inputs_icmp(<4 x i8> %x, <4 x i8> %y) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 3, i32 3, i32 2, i32 0>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 3, i32 3, i32 2, i32 0>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%shufx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> < i32 3, i32 3, i32 2, i32 0 >
|
%shufx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> < i32 3, i32 3, i32 2, i32 0 >
|
||||||
%shufy = shufflevector <4 x i8> %y, <4 x i8> undef, <4 x i32> < i32 3, i32 3, i32 2, i32 0 >
|
%shufy = shufflevector <4 x i8> %y, <4 x i8> poison, <4 x i32> < i32 3, i32 3, i32 2, i32 0 >
|
||||||
%cmp = icmp sgt <4 x i8> %shufx, %shufy
|
%cmp = icmp sgt <4 x i8> %shufx, %shufy
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
@ -232,8 +232,8 @@ define <5 x i1> @same_shuffle_inputs_fcmp(<4 x float> %x, <4 x float> %y) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <5 x i32> <i32 0, i32 1, i32 3, i32 2, i32 0>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <5 x i32> <i32 0, i32 1, i32 3, i32 2, i32 0>
|
||||||
; CHECK-NEXT: ret <5 x i1> [[CMP]]
|
; CHECK-NEXT: ret <5 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%shufx = shufflevector <4 x float> %x, <4 x float> undef, <5 x i32> < i32 0, i32 1, i32 3, i32 2, i32 0 >
|
%shufx = shufflevector <4 x float> %x, <4 x float> poison, <5 x i32> < i32 0, i32 1, i32 3, i32 2, i32 0 >
|
||||||
%shufy = shufflevector <4 x float> %y, <4 x float> undef, <5 x i32> < i32 0, i32 1, i32 3, i32 2, i32 0 >
|
%shufy = shufflevector <4 x float> %y, <4 x float> poison, <5 x i32> < i32 0, i32 1, i32 3, i32 2, i32 0 >
|
||||||
%cmp = fcmp oeq <5 x float> %shufx, %shufy
|
%cmp = fcmp oeq <5 x float> %shufx, %shufy
|
||||||
ret <5 x i1> %cmp
|
ret <5 x i1> %cmp
|
||||||
}
|
}
|
||||||
@ -248,8 +248,8 @@ define <4 x i1> @same_shuffle_inputs_icmp_extra_use1(<4 x i8> %x, <4 x i8> %y) {
|
|||||||
; CHECK-NEXT: call void @use_v4i8(<4 x i8> [[SHUFX]])
|
; CHECK-NEXT: call void @use_v4i8(<4 x i8> [[SHUFX]])
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%shufx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 >
|
%shufx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> < i32 3, i32 3, i32 3, i32 3 >
|
||||||
%shufy = shufflevector <4 x i8> %y, <4 x i8> undef, <4 x i32> < i32 3, i32 3, i32 3, i32 3 >
|
%shufy = shufflevector <4 x i8> %y, <4 x i8> poison, <4 x i32> < i32 3, i32 3, i32 3, i32 3 >
|
||||||
%cmp = icmp ugt <4 x i8> %shufx, %shufy
|
%cmp = icmp ugt <4 x i8> %shufx, %shufy
|
||||||
call void @use_v4i8(<4 x i8> %shufx)
|
call void @use_v4i8(<4 x i8> %shufx)
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
@ -265,8 +265,8 @@ define <2 x i1> @same_shuffle_inputs_icmp_extra_use2(<4 x i8> %x, <4 x i8> %y) {
|
|||||||
; CHECK-NEXT: call void @use_v2i8(<2 x i8> [[SHUFY]])
|
; CHECK-NEXT: call void @use_v2i8(<2 x i8> [[SHUFY]])
|
||||||
; CHECK-NEXT: ret <2 x i1> [[CMP]]
|
; CHECK-NEXT: ret <2 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%shufx = shufflevector <4 x i8> %x, <4 x i8> undef, <2 x i32> < i32 3, i32 2 >
|
%shufx = shufflevector <4 x i8> %x, <4 x i8> poison, <2 x i32> < i32 3, i32 2 >
|
||||||
%shufy = shufflevector <4 x i8> %y, <4 x i8> undef, <2 x i32> < i32 3, i32 2 >
|
%shufy = shufflevector <4 x i8> %y, <4 x i8> poison, <2 x i32> < i32 3, i32 2 >
|
||||||
%cmp = icmp eq <2 x i8> %shufx, %shufy
|
%cmp = icmp eq <2 x i8> %shufx, %shufy
|
||||||
call void @use_v2i8(<2 x i8> %shufy)
|
call void @use_v2i8(<2 x i8> %shufy)
|
||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
@ -283,8 +283,8 @@ define <2 x i1> @same_shuffle_inputs_icmp_extra_use3(<4 x i8> %x, <4 x i8> %y) {
|
|||||||
; CHECK-NEXT: call void @use_v2i8(<2 x i8> [[SHUFY]])
|
; CHECK-NEXT: call void @use_v2i8(<2 x i8> [[SHUFY]])
|
||||||
; CHECK-NEXT: ret <2 x i1> [[CMP]]
|
; CHECK-NEXT: ret <2 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%shufx = shufflevector <4 x i8> %x, <4 x i8> undef, <2 x i32> < i32 0, i32 0 >
|
%shufx = shufflevector <4 x i8> %x, <4 x i8> poison, <2 x i32> < i32 0, i32 0 >
|
||||||
%shufy = shufflevector <4 x i8> %y, <4 x i8> undef, <2 x i32> < i32 0, i32 0 >
|
%shufy = shufflevector <4 x i8> %y, <4 x i8> poison, <2 x i32> < i32 0, i32 0 >
|
||||||
%cmp = icmp eq <2 x i8> %shufx, %shufy
|
%cmp = icmp eq <2 x i8> %shufx, %shufy
|
||||||
call void @use_v2i8(<2 x i8> %shufx)
|
call void @use_v2i8(<2 x i8> %shufx)
|
||||||
call void @use_v2i8(<2 x i8> %shufy)
|
call void @use_v2i8(<2 x i8> %shufy)
|
||||||
@ -297,19 +297,19 @@ define <4 x i1> @splat_icmp(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||||
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i1> @splat_icmp_undef(<4 x i8> %x) {
|
define <4 x i1> @splat_icmp_poison(<4 x i8> %x) {
|
||||||
; CHECK-LABEL: @splat_icmp_undef(
|
; CHECK-LABEL: @splat_icmp_poison(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i8> [[X:%.*]], <i8 42, i8 42, i8 42, i8 42>
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <4 x i8> [[X:%.*]], <i8 42, i8 42, i8 42, i8 42>
|
||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <4 x i1> [[TMP1]], <4 x i1> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 2, i32 undef, i32 undef, i32 2>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 2, i32 poison, i32 poison, i32 2>
|
||||||
%cmp = icmp ult <4 x i8> %splatx, <i8 undef, i8 42, i8 undef, i8 42>
|
%cmp = icmp ult <4 x i8> %splatx, <i8 poison, i8 42, i8 poison, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,8 +319,8 @@ define <4 x i1> @splat_icmp_larger_size(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <2 x i1> [[TMP1]], <2 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <2 x i1> [[TMP1]], <2 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <2 x i8> %x, <2 x i8> undef, <4 x i32> <i32 1, i32 undef, i32 1, i32 undef>
|
%splatx = shufflevector <2 x i8> %x, <2 x i8> poison, <4 x i32> <i32 1, i32 poison, i32 1, i32 poison>
|
||||||
%cmp = icmp eq <4 x i8> %splatx, <i8 42, i8 42, i8 undef, i8 42>
|
%cmp = icmp eq <4 x i8> %splatx, <i8 42, i8 42, i8 poison, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,8 +330,8 @@ define <4 x i1> @splat_fcmp_smaller_size(<5 x float> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <5 x i1> [[TMP1]], <5 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
; CHECK-NEXT: [[CMP:%.*]] = shufflevector <5 x i1> [[TMP1]], <5 x i1> poison, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <5 x float> %x, <5 x float> undef, <4 x i32> <i32 1, i32 undef, i32 1, i32 undef>
|
%splatx = shufflevector <5 x float> %x, <5 x float> poison, <4 x i32> <i32 1, i32 poison, i32 1, i32 poison>
|
||||||
%cmp = fcmp oeq <4 x float> %splatx, <float 42.0, float 42.0, float undef, float 42.0>
|
%cmp = fcmp oeq <4 x float> %splatx, <float 42.0, float 42.0, float poison, float 42.0>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -344,7 +344,7 @@ define <4 x i1> @splat_icmp_extra_use(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 42, i8 42, i8 42, i8 42>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 42, i8 42, i8 42, i8 42>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||||
call void @use_v4i8(<4 x i8> %splatx)
|
call void @use_v4i8(<4 x i8> %splatx)
|
||||||
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
@ -358,7 +358,7 @@ define <4 x i1> @not_splat_icmp(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 42, i8 42, i8 42, i8 42>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 42, i8 42, i8 42, i8 42>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 3, i32 2, i32 3, i32 3>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 3, i32 3>
|
||||||
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
%cmp = icmp sgt <4 x i8> %splatx, <i8 42, i8 42, i8 42, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
@ -371,7 +371,7 @@ define <4 x i1> @not_splat_icmp2(<4 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 43, i8 42, i8 42, i8 42>
|
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <4 x i8> [[SPLATX]], <i8 43, i8 42, i8 42, i8 42>
|
||||||
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
; CHECK-NEXT: ret <4 x i1> [[CMP]]
|
||||||
;
|
;
|
||||||
%splatx = shufflevector <4 x i8> %x, <4 x i8> undef, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
%splatx = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 2, i32 2, i32 2, i32 2>
|
||||||
%cmp = icmp sgt <4 x i8> %splatx, <i8 43, i8 42, i8 42, i8 42>
|
%cmp = icmp sgt <4 x i8> %splatx, <i8 43, i8 42, i8 42, i8 42>
|
||||||
ret <4 x i1> %cmp
|
ret <4 x i1> %cmp
|
||||||
}
|
}
|
||||||
@ -385,7 +385,7 @@ define <2 x i1> @icmp_logical_or_vec(<2 x i64> %x, <2 x i64> %y, <2 x i1> %false
|
|||||||
; CHECK-NEXT: ret <2 x i1> [[SEL]]
|
; CHECK-NEXT: ret <2 x i1> [[SEL]]
|
||||||
;
|
;
|
||||||
%cmp.ne = icmp ne <2 x i64> %x, zeroinitializer
|
%cmp.ne = icmp ne <2 x i64> %x, zeroinitializer
|
||||||
%sel = select <2 x i1> %cmp.ne, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), <2 x i1> %falseval
|
%sel = select <2 x i1> %cmp.ne, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> poison, i1 true, i32 0), <2 x i1> poison, <2 x i32> zeroinitializer), <2 x i1> %falseval
|
||||||
ret <2 x i1> %sel
|
ret <2 x i1> %sel
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,34 +39,37 @@ define <2 x i8> @t2_vec(<2 x i8> %x) {
|
|||||||
%r = ashr <2 x i8> %i0, <i8 7, i8 7>
|
%r = ashr <2 x i8> %i0, <i8 7, i8 7>
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
define <3 x i8> @t3_vec_undef0(<3 x i8> %x) {
|
|
||||||
; CHECK-LABEL: @t3_vec_undef0(
|
; TODO: The result constants should contain poison instead of undef.
|
||||||
|
|
||||||
|
define <3 x i8> @t3_vec_poison0(<3 x i8> %x) {
|
||||||
|
; CHECK-LABEL: @t3_vec_poison0(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
||||||
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
||||||
; CHECK-NEXT: ret <3 x i8> [[R]]
|
; CHECK-NEXT: ret <3 x i8> [[R]]
|
||||||
;
|
;
|
||||||
%i0 = shl <3 x i8> %x, <i8 7, i8 undef, i8 7>
|
%i0 = shl <3 x i8> %x, <i8 7, i8 poison, i8 7>
|
||||||
%r = ashr <3 x i8> %i0, <i8 7, i8 7, i8 7>
|
%r = ashr <3 x i8> %i0, <i8 7, i8 7, i8 7>
|
||||||
ret <3 x i8> %r
|
ret <3 x i8> %r
|
||||||
}
|
}
|
||||||
define <3 x i8> @t4_vec_undef1(<3 x i8> %x) {
|
define <3 x i8> @t4_vec_poison1(<3 x i8> %x) {
|
||||||
; CHECK-LABEL: @t4_vec_undef1(
|
; CHECK-LABEL: @t4_vec_poison1(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
||||||
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
||||||
; CHECK-NEXT: ret <3 x i8> [[R]]
|
; CHECK-NEXT: ret <3 x i8> [[R]]
|
||||||
;
|
;
|
||||||
%i0 = shl <3 x i8> %x, <i8 7, i8 7, i8 7>
|
%i0 = shl <3 x i8> %x, <i8 7, i8 7, i8 7>
|
||||||
%r = ashr <3 x i8> %i0, <i8 7, i8 undef, i8 7>
|
%r = ashr <3 x i8> %i0, <i8 7, i8 poison, i8 7>
|
||||||
ret <3 x i8> %r
|
ret <3 x i8> %r
|
||||||
}
|
}
|
||||||
define <3 x i8> @t5_vec_undef2(<3 x i8> %x) {
|
define <3 x i8> @t5_vec_poison2(<3 x i8> %x) {
|
||||||
; CHECK-LABEL: @t5_vec_undef2(
|
; CHECK-LABEL: @t5_vec_poison2(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
; CHECK-NEXT: [[TMP1:%.*]] = and <3 x i8> [[X:%.*]], <i8 1, i8 undef, i8 1>
|
||||||
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
; CHECK-NEXT: [[R:%.*]] = sub <3 x i8> zeroinitializer, [[TMP1]]
|
||||||
; CHECK-NEXT: ret <3 x i8> [[R]]
|
; CHECK-NEXT: ret <3 x i8> [[R]]
|
||||||
;
|
;
|
||||||
%i0 = shl <3 x i8> %x, <i8 7, i8 undef, i8 7>
|
%i0 = shl <3 x i8> %x, <i8 7, i8 poison, i8 7>
|
||||||
%r = ashr <3 x i8> %i0, <i8 7, i8 undef, i8 7>
|
%r = ashr <3 x i8> %i0, <i8 7, i8 poison, i8 7>
|
||||||
ret <3 x i8> %r
|
ret <3 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,13 +74,13 @@ define <2 x i16> @t4_vec_splat(<2 x i8> %x) {
|
|||||||
ret <2 x i16> %c
|
ret <2 x i16> %c
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i16> @t5_vec_undef(<2 x i8> %x) {
|
define <2 x i16> @t5_vec_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @t5_vec_undef(
|
; CHECK-LABEL: @t5_vec_poison(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 4>
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 4, i8 4>
|
||||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
|
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
|
||||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||||
;
|
;
|
||||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
%a = lshr <2 x i8> %x, <i8 4, i8 poison>
|
||||||
%b = trunc <2 x i8> %a to <2 x i4>
|
%b = trunc <2 x i8> %a to <2 x i4>
|
||||||
%c = sext <2 x i4> %b to <2 x i16>
|
%c = sext <2 x i4> %b to <2 x i16>
|
||||||
ret <2 x i16> %c
|
ret <2 x i16> %c
|
||||||
@ -105,15 +105,15 @@ define i16 @t6_extrause0(i8 %x) {
|
|||||||
|
|
||||||
; TODO: We could convert %a to ashr and eliminate 2nd use of %b.
|
; TODO: We could convert %a to ashr and eliminate 2nd use of %b.
|
||||||
|
|
||||||
define <2 x i16> @t7_extrause0_vec_undef(<2 x i8> %x) {
|
define <2 x i16> @t7_extrause0_vec_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @t7_extrause0_vec_undef(
|
; CHECK-LABEL: @t7_extrause0_vec_poison(
|
||||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 poison>
|
||||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
; CHECK-NEXT: [[B:%.*]] = trunc nuw <2 x i8> [[A]] to <2 x i4>
|
||||||
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B]])
|
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B]])
|
||||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||||
;
|
;
|
||||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
%a = lshr <2 x i8> %x, <i8 4, i8 poison>
|
||||||
%b = trunc <2 x i8> %a to <2 x i4>
|
%b = trunc <2 x i8> %a to <2 x i4>
|
||||||
call void @usevec4(<2 x i4> %b)
|
call void @usevec4(<2 x i4> %b)
|
||||||
%c = sext <2 x i4> %b to <2 x i16>
|
%c = sext <2 x i4> %b to <2 x i16>
|
||||||
@ -139,15 +139,15 @@ define i16 @t8_extrause1(i8 %x) {
|
|||||||
|
|
||||||
; TODO: We could convert %a to ashr + mask (and) and eliminate %b.
|
; TODO: We could convert %a to ashr + mask (and) and eliminate %b.
|
||||||
|
|
||||||
define <2 x i16> @t9_extrause1_vec_undef(<2 x i8> %x) {
|
define <2 x i16> @t9_extrause1_vec_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @t9_extrause1_vec_undef(
|
; CHECK-LABEL: @t9_extrause1_vec_poison(
|
||||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 poison>
|
||||||
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A]])
|
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A]])
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i8> [[X]], <i8 4, i8 4>
|
; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i8> [[X]], <i8 4, i8 4>
|
||||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
|
; CHECK-NEXT: [[C:%.*]] = sext <2 x i8> [[TMP1]] to <2 x i16>
|
||||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||||
;
|
;
|
||||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
%a = lshr <2 x i8> %x, <i8 4, i8 poison>
|
||||||
call void @usevec8(<2 x i8> %a)
|
call void @usevec8(<2 x i8> %a)
|
||||||
%b = trunc <2 x i8> %a to <2 x i4>
|
%b = trunc <2 x i8> %a to <2 x i4>
|
||||||
%c = sext <2 x i4> %b to <2 x i16>
|
%c = sext <2 x i4> %b to <2 x i16>
|
||||||
@ -169,16 +169,16 @@ define i16 @t10_extrause2(i8 %x) {
|
|||||||
%c = sext i4 %b to i16
|
%c = sext i4 %b to i16
|
||||||
ret i16 %c
|
ret i16 %c
|
||||||
}
|
}
|
||||||
define <2 x i16> @t11_extrause2_vec_undef(<2 x i8> %x) {
|
define <2 x i16> @t11_extrause2_vec_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @t11_extrause2_vec_undef(
|
; CHECK-LABEL: @t11_extrause2_vec_poison(
|
||||||
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 undef>
|
; CHECK-NEXT: [[A:%.*]] = lshr <2 x i8> [[X:%.*]], <i8 4, i8 poison>
|
||||||
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A]])
|
; CHECK-NEXT: call void @usevec8(<2 x i8> [[A]])
|
||||||
; CHECK-NEXT: [[B:%.*]] = trunc <2 x i8> [[A]] to <2 x i4>
|
; CHECK-NEXT: [[B:%.*]] = trunc nuw <2 x i8> [[A]] to <2 x i4>
|
||||||
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B]])
|
; CHECK-NEXT: call void @usevec4(<2 x i4> [[B]])
|
||||||
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
; CHECK-NEXT: [[C:%.*]] = sext <2 x i4> [[B]] to <2 x i16>
|
||||||
; CHECK-NEXT: ret <2 x i16> [[C]]
|
; CHECK-NEXT: ret <2 x i16> [[C]]
|
||||||
;
|
;
|
||||||
%a = lshr <2 x i8> %x, <i8 4, i8 undef>
|
%a = lshr <2 x i8> %x, <i8 4, i8 poison>
|
||||||
call void @usevec8(<2 x i8> %a)
|
call void @usevec8(<2 x i8> %a)
|
||||||
%b = trunc <2 x i8> %a to <2 x i4>
|
%b = trunc <2 x i8> %a to <2 x i4>
|
||||||
call void @usevec4(<2 x i4> %b)
|
call void @usevec4(<2 x i4> %b)
|
||||||
|
@ -1665,37 +1665,37 @@ define float @copysign3(float %x) {
|
|||||||
ret float %r
|
ret float %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @copysign_vec_undef(<2 x float> %x) {
|
define <2 x float> @copysign_vec_poison(<2 x float> %x) {
|
||||||
; CHECK-LABEL: @copysign_vec_undef(
|
; CHECK-LABEL: @copysign_vec_poison(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[X:%.*]]
|
; CHECK-NEXT: [[TMP1:%.*]] = fneg <2 x float> [[X:%.*]]
|
||||||
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[TMP1]])
|
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[TMP1]])
|
||||||
; CHECK-NEXT: ret <2 x float> [[R]]
|
; CHECK-NEXT: ret <2 x float> [[R]]
|
||||||
;
|
;
|
||||||
%i = bitcast <2 x float> %x to <2 x i32>
|
%i = bitcast <2 x float> %x to <2 x i32>
|
||||||
%isneg = icmp ugt <2 x i32> %i, <i32 2147483647, i32 2147483647>
|
%isneg = icmp ugt <2 x i32> %i, <i32 2147483647, i32 2147483647>
|
||||||
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float 42.0, float undef>, <2 x float> <float -42.0, float -42.0>
|
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float 42.0, float poison>, <2 x float> <float -42.0, float -42.0>
|
||||||
ret <2 x float> %r
|
ret <2 x float> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @copysign_vec_undef1(<2 x float> %x) {
|
define <2 x float> @copysign_vec_poison1(<2 x float> %x) {
|
||||||
; CHECK-LABEL: @copysign_vec_undef1(
|
; CHECK-LABEL: @copysign_vec_poison1(
|
||||||
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[X:%.*]])
|
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[X:%.*]])
|
||||||
; CHECK-NEXT: ret <2 x float> [[R]]
|
; CHECK-NEXT: ret <2 x float> [[R]]
|
||||||
;
|
;
|
||||||
%i = bitcast <2 x float> %x to <2 x i32>
|
%i = bitcast <2 x float> %x to <2 x i32>
|
||||||
%isneg = icmp ult <2 x i32> %i, <i32 2147483648, i32 2147483648>
|
%isneg = icmp ult <2 x i32> %i, <i32 2147483648, i32 2147483648>
|
||||||
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float 42.0, float 42.0>, <2 x float> <float undef, float -42.0>
|
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float 42.0, float 42.0>, <2 x float> <float poison, float -42.0>
|
||||||
ret <2 x float> %r
|
ret <2 x float> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x float> @copysign_vec_undef3(<2 x float> %x) {
|
define <2 x float> @copysign_vec_poison3(<2 x float> %x) {
|
||||||
; CHECK-LABEL: @copysign_vec_undef3(
|
; CHECK-LABEL: @copysign_vec_poison3(
|
||||||
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[X:%.*]])
|
; CHECK-NEXT: [[R:%.*]] = call <2 x float> @llvm.copysign.v2f32(<2 x float> <float 4.200000e+01, float 4.200000e+01>, <2 x float> [[X:%.*]])
|
||||||
; CHECK-NEXT: ret <2 x float> [[R]]
|
; CHECK-NEXT: ret <2 x float> [[R]]
|
||||||
;
|
;
|
||||||
%i = bitcast <2 x float> %x to <2 x i32>
|
%i = bitcast <2 x float> %x to <2 x i32>
|
||||||
%isneg = icmp ugt <2 x i32> %i, <i32 2147483647, i32 2147483647>
|
%isneg = icmp ugt <2 x i32> %i, <i32 2147483647, i32 2147483647>
|
||||||
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float -42.0, float undef>, <2 x float> <float +42.0, float undef>
|
%r = select arcp nnan <2 x i1> %isneg, <2 x float> <float -42.0, float poison>, <2 x float> <float +42.0, float poison>
|
||||||
ret <2 x float> %r
|
ret <2 x float> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,104 +198,104 @@ define <2 x i1> @positive_vec_nonsplat(<2 x i32> %arg) {
|
|||||||
ret <2 x i1> %t4
|
ret <2 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef0(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison0(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef0(
|
; CHECK-LABEL: @positive_vec_poison0(
|
||||||
; CHECK-NEXT: [[T4_SIMPLIFIED:%.*]] = icmp ult <3 x i32> [[ARG:%.*]], <i32 128, i32 128, i32 128>
|
; CHECK-NEXT: [[T4_SIMPLIFIED:%.*]] = icmp ult <3 x i32> [[ARG:%.*]], <i32 128, i32 128, i32 128>
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4_SIMPLIFIED]]
|
; CHECK-NEXT: ret <3 x i1> [[T4_SIMPLIFIED]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 poison, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef1(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison1(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef1(
|
; CHECK-LABEL: @positive_vec_poison1(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 poison, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 poison, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef2(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison2(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef2(
|
; CHECK-LABEL: @positive_vec_poison2(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 poison, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 poison, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef3(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison3(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef3(
|
; CHECK-LABEL: @positive_vec_poison3(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 poison, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 poison, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 256, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 poison, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 poison, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 256, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef4(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison4(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef4(
|
; CHECK-LABEL: @positive_vec_poison4(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 poison, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 128, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 poison, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 poison, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 128, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 poison, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef5(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison5(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef5(
|
; CHECK-LABEL: @positive_vec_poison5(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 -1, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 poison, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 poison, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 -1, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 poison, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 poison, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @positive_vec_undef6(<3 x i32> %arg) {
|
define <3 x i1> @positive_vec_poison6(<3 x i32> %arg) {
|
||||||
; CHECK-LABEL: @positive_vec_undef6(
|
; CHECK-LABEL: @positive_vec_poison6(
|
||||||
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 undef, i32 -1>
|
; CHECK-NEXT: [[T1:%.*]] = icmp sgt <3 x i32> [[ARG:%.*]], <i32 -1, i32 poison, i32 -1>
|
||||||
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 undef, i32 128>
|
; CHECK-NEXT: [[T2:%.*]] = add <3 x i32> [[ARG]], <i32 128, i32 poison, i32 128>
|
||||||
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 undef, i32 256>
|
; CHECK-NEXT: [[T3:%.*]] = icmp ult <3 x i32> [[T2]], <i32 256, i32 poison, i32 256>
|
||||||
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
; CHECK-NEXT: [[T4:%.*]] = and <3 x i1> [[T1]], [[T3]]
|
||||||
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
; CHECK-NEXT: ret <3 x i1> [[T4]]
|
||||||
;
|
;
|
||||||
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 undef, i32 -1>
|
%t1 = icmp sgt <3 x i32> %arg, <i32 -1, i32 poison, i32 -1>
|
||||||
%t2 = add <3 x i32> %arg, <i32 128, i32 undef, i32 128>
|
%t2 = add <3 x i32> %arg, <i32 128, i32 poison, i32 128>
|
||||||
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 undef, i32 256>
|
%t3 = icmp ult <3 x i32> %t2, <i32 256, i32 poison, i32 256>
|
||||||
%t4 = and <3 x i1> %t1, %t3
|
%t4 = and <3 x i1> %t1, %t3
|
||||||
ret <3 x i1> %t4
|
ret <3 x i1> %t4
|
||||||
}
|
}
|
||||||
|
@ -214,7 +214,7 @@ define <2 x i1> @low_bitmask_uge(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer
|
; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[X:%.*]], zeroinitializer
|
||||||
; CHECK-NEXT: ret <2 x i1> [[R]]
|
; CHECK-NEXT: ret <2 x i1> [[R]]
|
||||||
;
|
;
|
||||||
%a = add <2 x i8> %x, <i8 15, i8 undef>
|
%a = add <2 x i8> %x, <i8 15, i8 poison>
|
||||||
%m = and <2 x i8> %a, <i8 15, i8 15>
|
%m = and <2 x i8> %a, <i8 15, i8 15>
|
||||||
%r = icmp uge <2 x i8> %m, %x
|
%r = icmp uge <2 x i8> %m, %x
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
|
@ -63,15 +63,15 @@ define <4 x i8> @testv4i16i8(<4 x i16> %add) {
|
|||||||
ret <4 x i8> %x
|
ret <4 x i8> %x
|
||||||
}
|
}
|
||||||
|
|
||||||
define <4 x i8> @testv4i16i8_undef(<4 x i16> %add) {
|
define <4 x i8> @testv4i16i8_poison(<4 x i16> %add) {
|
||||||
; CHECK-LABEL: @testv4i16i8_undef(
|
; CHECK-LABEL: @testv4i16i8_poison(
|
||||||
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i16> [[ADD:%.*]], <i16 -1, i16 -1, i16 -1, i16 -1>
|
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i16> [[ADD:%.*]], <i16 -1, i16 -1, i16 -1, i16 -1>
|
||||||
; CHECK-NEXT: [[X:%.*]] = select <4 x i1> [[TMP1]], <4 x i8> <i8 27, i8 27, i8 undef, i8 27>, <4 x i8> <i8 -28, i8 -28, i8 undef, i8 -28>
|
; CHECK-NEXT: [[X:%.*]] = select <4 x i1> [[TMP1]], <4 x i8> <i8 27, i8 27, i8 poison, i8 27>, <4 x i8> <i8 -28, i8 -28, i8 poison, i8 -28>
|
||||||
; CHECK-NEXT: ret <4 x i8> [[X]]
|
; CHECK-NEXT: ret <4 x i8> [[X]]
|
||||||
;
|
;
|
||||||
%sh = ashr <4 x i16> %add, <i16 15, i16 undef, i16 15, i16 15>
|
%sh = ashr <4 x i16> %add, <i16 15, i16 poison, i16 15, i16 15>
|
||||||
%t = trunc <4 x i16> %sh to <4 x i8>
|
%t = trunc <4 x i16> %sh to <4 x i8>
|
||||||
%x = xor <4 x i8> %t, <i8 27, i8 27, i8 undef, i8 27>
|
%x = xor <4 x i8> %t, <i8 27, i8 27, i8 poison, i8 27>
|
||||||
ret <4 x i8> %x
|
ret <4 x i8> %x
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,13 +27,13 @@ define <2 x i1> @i32_cast_cmp_slt_int_0_uitofp_float_vec(<2 x i32> %i) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_float_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_float_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_float_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_float_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x float>
|
%f = uitofp <3 x i32> %i to <3 x float>
|
||||||
%b = bitcast <3 x float> %f to <3 x i32>
|
%b = bitcast <3 x float> %f to <3 x i32>
|
||||||
%cmp = icmp slt <3 x i32> %b, <i32 0, i32 undef, i32 0>
|
%cmp = icmp slt <3 x i32> %b, <i32 0, i32 poison, i32 0>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,13 +70,13 @@ define i1 @i32_cast_cmp_sgt_int_m1_uitofp_float_vec_mismatch(<2 x i32> %i) {
|
|||||||
ret i1 %cmp
|
ret i1 %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_float_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_float_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_float_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_float_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x float>
|
%f = uitofp <3 x i32> %i to <3 x float>
|
||||||
%b = bitcast <3 x float> %f to <3 x i32>
|
%b = bitcast <3 x float> %f to <3 x i32>
|
||||||
%cmp = icmp sgt <3 x i32> %b, <i32 -1, i32 undef, i32 -1>
|
%cmp = icmp sgt <3 x i32> %b, <i32 -1, i32 poison, i32 -1>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -100,13 +100,13 @@ define <2 x i1> @i32_cast_cmp_slt_int_0_uitofp_double_vec(<2 x i32> %i) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_double_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_double_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_double_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_double_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x double>
|
%f = uitofp <3 x i32> %i to <3 x double>
|
||||||
%b = bitcast <3 x double> %f to <3 x i64>
|
%b = bitcast <3 x double> %f to <3 x i64>
|
||||||
%cmp = icmp slt <3 x i64> %b, <i64 0, i64 undef, i64 0>
|
%cmp = icmp slt <3 x i64> %b, <i64 0, i64 poison, i64 0>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -130,13 +130,13 @@ define <2 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_double_vec(<2 x i32> %i) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_double_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_double_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_double_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_double_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x double>
|
%f = uitofp <3 x i32> %i to <3 x double>
|
||||||
%b = bitcast <3 x double> %f to <3 x i64>
|
%b = bitcast <3 x double> %f to <3 x i64>
|
||||||
%cmp = icmp sgt <3 x i64> %b, <i64 -1, i64 undef, i64 -1>
|
%cmp = icmp sgt <3 x i64> %b, <i64 -1, i64 poison, i64 -1>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,13 +160,13 @@ define <2 x i1> @i32_cast_cmp_slt_int_0_uitofp_half_vec(<2 x i32> %i) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_half_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_slt_int_0_uitofp_half_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_half_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_slt_int_0_uitofp_half_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
; CHECK-NEXT: ret <3 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x half>
|
%f = uitofp <3 x i32> %i to <3 x half>
|
||||||
%b = bitcast <3 x half> %f to <3 x i16>
|
%b = bitcast <3 x half> %f to <3 x i16>
|
||||||
%cmp = icmp slt <3 x i16> %b, <i16 0, i16 undef, i16 0>
|
%cmp = icmp slt <3 x i16> %b, <i16 0, i16 poison, i16 0>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -190,12 +190,12 @@ define <2 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_half_vec(<2 x i32> %i) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_half_vec_undef(<3 x i32> %i) {
|
define <3 x i1> @i32_cast_cmp_sgt_int_m1_uitofp_half_vec_poison(<3 x i32> %i) {
|
||||||
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_half_vec_undef(
|
; CHECK-LABEL: @i32_cast_cmp_sgt_int_m1_uitofp_half_vec_poison(
|
||||||
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
; CHECK-NEXT: ret <3 x i1> <i1 true, i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%f = uitofp <3 x i32> %i to <3 x half>
|
%f = uitofp <3 x i32> %i to <3 x half>
|
||||||
%b = bitcast <3 x half> %f to <3 x i16>
|
%b = bitcast <3 x half> %f to <3 x i16>
|
||||||
%cmp = icmp sgt <3 x i16> %b, <i16 -1, i16 undef, i16 -1>
|
%cmp = icmp sgt <3 x i16> %b, <i16 -1, i16 poison, i16 -1>
|
||||||
ret <3 x i1> %cmp
|
ret <3 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
@ -19,11 +19,11 @@ define <2 x i1> @tautological_ule_vec(<2 x i8> %x) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @tautological_ule_vec_partial_undef(<2 x i8> %x) {
|
define <2 x i1> @tautological_ule_vec_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @tautological_ule_vec_partial_undef(
|
; CHECK-LABEL: @tautological_ule_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%cmp = icmp ule <2 x i8> %x, <i8 255, i8 undef>
|
%cmp = icmp ule <2 x i8> %x, <i8 255, i8 poison>
|
||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -43,11 +43,11 @@ define <2 x i1> @tautological_ugt_vec(<2 x i8> %x) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @tautological_ugt_vec_partial_undef(<2 x i8> %x) {
|
define <2 x i1> @tautological_ugt_vec_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @tautological_ugt_vec_partial_undef(
|
; CHECK-LABEL: @tautological_ugt_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%cmp = icmp ugt <2 x i8> %x, <i8 undef, i8 255>
|
%cmp = icmp ugt <2 x i8> %x, <i8 poison, i8 255>
|
||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,12 +70,12 @@ define <2 x i1> @urem3_vec(<2 x i32> %X) {
|
|||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @urem3_vec_partial_undef(<2 x i32> %X) {
|
define <2 x i1> @urem3_vec_partial_poison(<2 x i32> %X) {
|
||||||
; CHECK-LABEL: @urem3_vec_partial_undef(
|
; CHECK-LABEL: @urem3_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%A = urem <2 x i32> %X, <i32 10, i32 10>
|
%A = urem <2 x i32> %X, <i32 10, i32 10>
|
||||||
%B = icmp ult <2 x i32> %A, <i32 undef, i32 15>
|
%B = icmp ult <2 x i32> %A, <i32 poison, i32 15>
|
||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,12 +98,12 @@ define <2 x i1> @srem1_vec(<2 x i32> %X) {
|
|||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @srem1_vec_partial_undef(<2 x i32> %X) {
|
define <2 x i1> @srem1_vec_partial_poison(<2 x i32> %X) {
|
||||||
; CHECK-LABEL: @srem1_vec_partial_undef(
|
; CHECK-LABEL: @srem1_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%A = srem <2 x i32> %X, <i32 -5, i32 -5>
|
%A = srem <2 x i32> %X, <i32 -5, i32 -5>
|
||||||
%B = icmp sgt <2 x i32> %A, <i32 5, i32 undef>
|
%B = icmp sgt <2 x i32> %A, <i32 5, i32 poison>
|
||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,12 +203,12 @@ define <2 x i1> @shl5_vec(<2 x i32> %X) {
|
|||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @shl5_vec_partial_undef(<2 x i32> %X) {
|
define <2 x i1> @shl5_vec_partial_poison(<2 x i32> %X) {
|
||||||
; CHECK-LABEL: @shl5_vec_partial_undef(
|
; CHECK-LABEL: @shl5_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%sub = shl nuw <2 x i32> <i32 4, i32 4>, %X
|
%sub = shl nuw <2 x i32> <i32 4, i32 4>, %X
|
||||||
%cmp = icmp ugt <2 x i32> %sub, <i32 undef, i32 3>
|
%cmp = icmp ugt <2 x i32> %sub, <i32 poison, i32 3>
|
||||||
ret <2 x i1> %cmp
|
ret <2 x i1> %cmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -421,12 +421,12 @@ define <2 x i1> @or1_vec(<2 x i32> %X) {
|
|||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i1> @or1_vec_partial_undef(<2 x i32> %X) {
|
define <2 x i1> @or1_vec_partial_poison(<2 x i32> %X) {
|
||||||
; CHECK-LABEL: @or1_vec_partial_undef(
|
; CHECK-LABEL: @or1_vec_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
; CHECK-NEXT: ret <2 x i1> zeroinitializer
|
||||||
;
|
;
|
||||||
%A = or <2 x i32> %X, <i32 62, i32 62>
|
%A = or <2 x i32> %X, <i32 62, i32 62>
|
||||||
%B = icmp ult <2 x i32> %A, <i32 undef, i32 50>
|
%B = icmp ult <2 x i32> %A, <i32 poison, i32 50>
|
||||||
ret <2 x i1> %B
|
ret <2 x i1> %B
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -900,22 +900,22 @@ define <2 x i1> @mul_nuw_urem_cmp_constant_vec_splat(<2 x i8> %x) {
|
|||||||
|
|
||||||
; Undefs in vector constants are ok.
|
; Undefs in vector constants are ok.
|
||||||
|
|
||||||
define <2 x i1> @mul_nuw_urem_cmp_constant_vec_splat_undef1(<2 x i8> %x) {
|
define <2 x i1> @mul_nuw_urem_cmp_constant_vec_splat_poison1(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @mul_nuw_urem_cmp_constant_vec_splat_undef1(
|
; CHECK-LABEL: @mul_nuw_urem_cmp_constant_vec_splat_poison1(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%m = mul nuw <2 x i8> %x, <i8 45, i8 45>
|
%m = mul nuw <2 x i8> %x, <i8 45, i8 45>
|
||||||
%r = icmp ne <2 x i8> %m, <i8 15, i8 undef>
|
%r = icmp ne <2 x i8> %m, <i8 15, i8 poison>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
; Undefs in vector constants are ok.
|
; Undefs in vector constants are ok.
|
||||||
|
|
||||||
define <2 x i1> @mul_nuw_urem_cmp_constant_vec_splat_undef2(<2 x i8> %x) {
|
define <2 x i1> @mul_nuw_urem_cmp_constant_vec_splat_poison2(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @mul_nuw_urem_cmp_constant_vec_splat_undef2(
|
; CHECK-LABEL: @mul_nuw_urem_cmp_constant_vec_splat_poison2(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%m = mul nuw <2 x i8> %x, <i8 undef, i8 45>
|
%m = mul nuw <2 x i8> %x, <i8 poison, i8 45>
|
||||||
%r = icmp ne <2 x i8> %m, <i8 15, i8 15>
|
%r = icmp ne <2 x i8> %m, <i8 15, i8 15>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
@ -1007,22 +1007,22 @@ define <2 x i1> @mul_nsw_srem_cmp_constant_vec_splat(<2 x i8> %x) {
|
|||||||
|
|
||||||
; Undefs in vector constants are ok.
|
; Undefs in vector constants are ok.
|
||||||
|
|
||||||
define <2 x i1> @mul_nsw_srem_cmp_constant_vec_splat_undef1(<2 x i8> %x) {
|
define <2 x i1> @mul_nsw_srem_cmp_constant_vec_splat_poison1(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @mul_nsw_srem_cmp_constant_vec_splat_undef1(
|
; CHECK-LABEL: @mul_nsw_srem_cmp_constant_vec_splat_poison1(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%m = mul nsw <2 x i8> %x, <i8 45, i8 45>
|
%m = mul nsw <2 x i8> %x, <i8 45, i8 45>
|
||||||
%r = icmp ne <2 x i8> %m, <i8 15, i8 undef>
|
%r = icmp ne <2 x i8> %m, <i8 15, i8 poison>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
; Undefs in vector constants are ok.
|
; Undefs in vector constants are ok.
|
||||||
|
|
||||||
define <2 x i1> @mul_nsw_srem_cmp_constant_vec_splat_undef2(<2 x i8> %x) {
|
define <2 x i1> @mul_nsw_srem_cmp_constant_vec_splat_poison2(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @mul_nsw_srem_cmp_constant_vec_splat_undef2(
|
; CHECK-LABEL: @mul_nsw_srem_cmp_constant_vec_splat_poison2(
|
||||||
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
; CHECK-NEXT: ret <2 x i1> <i1 true, i1 true>
|
||||||
;
|
;
|
||||||
%m = mul nsw <2 x i8> %x, <i8 undef, i8 45>
|
%m = mul nsw <2 x i8> %x, <i8 poison, i8 45>
|
||||||
%r = icmp ne <2 x i8> %m, <i8 15, i8 15>
|
%r = icmp ne <2 x i8> %m, <i8 15, i8 15>
|
||||||
ret <2 x i1> %r
|
ret <2 x i1> %r
|
||||||
}
|
}
|
||||||
|
@ -257,67 +257,67 @@ define <2 x i8> @umin_maxval_commute(<2 x i8> %x) {
|
|||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @smax_maxval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @smax_maxval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @smax_maxval_partial_undef(
|
; CHECK-LABEL: @smax_maxval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> <i8 127, i8 127>
|
; CHECK-NEXT: ret <2 x i8> <i8 127, i8 127>
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.smax.v2i8(<2 x i8> <i8 undef, i8 127>, <2 x i8> %x)
|
%r = call <2 x i8> @llvm.smax.v2i8(<2 x i8> <i8 poison, i8 127>, <2 x i8> %x)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @smin_minval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @smin_minval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @smin_minval_partial_undef(
|
; CHECK-LABEL: @smin_minval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> <i8 -128, i8 -128>
|
; CHECK-NEXT: ret <2 x i8> <i8 -128, i8 -128>
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 -128, i8 undef>)
|
%r = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 -128, i8 poison>)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @umax_maxval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @umax_maxval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @umax_maxval_partial_undef(
|
; CHECK-LABEL: @umax_maxval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> <i8 -1, i8 -1>
|
; CHECK-NEXT: ret <2 x i8> <i8 -1, i8 -1>
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.umax.v2i8(<2 x i8> <i8 255, i8 undef>, <2 x i8> %x)
|
%r = call <2 x i8> @llvm.umax.v2i8(<2 x i8> <i8 255, i8 poison>, <2 x i8> %x)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @umin_minval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @umin_minval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @umin_minval_partial_undef(
|
; CHECK-LABEL: @umin_minval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> zeroinitializer
|
; CHECK-NEXT: ret <2 x i8> zeroinitializer
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 undef, i8 0>)
|
%r = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 poison, i8 0>)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @smax_minval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @smax_minval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @smax_minval_partial_undef(
|
; CHECK-LABEL: @smax_minval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.smax.v2i8(<2 x i8> <i8 undef, i8 -128>, <2 x i8> %x)
|
%r = call <2 x i8> @llvm.smax.v2i8(<2 x i8> <i8 poison, i8 -128>, <2 x i8> %x)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @smin_maxval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @smin_maxval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @smin_maxval_partial_undef(
|
; CHECK-LABEL: @smin_maxval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 undef, i8 127>)
|
%r = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 poison, i8 127>)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @umax_minval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @umax_minval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @umax_minval_partial_undef(
|
; CHECK-LABEL: @umax_minval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.umax.v2i8(<2 x i8> <i8 0, i8 undef>, <2 x i8> %x)
|
%r = call <2 x i8> @llvm.umax.v2i8(<2 x i8> <i8 0, i8 poison>, <2 x i8> %x)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
define <2 x i8> @umin_maxval_partial_undef(<2 x i8> %x) {
|
define <2 x i8> @umin_maxval_partial_poison(<2 x i8> %x) {
|
||||||
; CHECK-LABEL: @umin_maxval_partial_undef(
|
; CHECK-LABEL: @umin_maxval_partial_poison(
|
||||||
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
; CHECK-NEXT: ret <2 x i8> [[X:%.*]]
|
||||||
;
|
;
|
||||||
%r = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 255, i8 undef>)
|
%r = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 255, i8 poison>)
|
||||||
ret <2 x i8> %r
|
ret <2 x i8> %r
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -743,7 +743,7 @@ define <2 x i8> @umin_umin_constants_commute2(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: ret <2 x i8> [[M]]
|
; CHECK-NEXT: ret <2 x i8> [[M]]
|
||||||
;
|
;
|
||||||
%m = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 127, i8 127>)
|
%m = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %x, <2 x i8> <i8 127, i8 127>)
|
||||||
%m2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %m, <2 x i8> <i8 200, i8 undef>)
|
%m2 = call <2 x i8> @llvm.umin.v2i8(<2 x i8> %m, <2 x i8> <i8 200, i8 poison>)
|
||||||
ret <2 x i8> %m2
|
ret <2 x i8> %m2
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -803,7 +803,7 @@ define <2 x i8> @smin_smin_constants(<2 x i8> %x) {
|
|||||||
; CHECK-NEXT: ret <2 x i8> [[M]]
|
; CHECK-NEXT: ret <2 x i8> [[M]]
|
||||||
;
|
;
|
||||||
%m = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 7, i8 7>)
|
%m = call <2 x i8> @llvm.smin.v2i8(<2 x i8> %x, <2 x i8> <i8 7, i8 7>)
|
||||||
%m2 = call <2 x i8> @llvm.smin.v2i8(<2 x i8> <i8 undef, i8 9>, <2 x i8> %m)
|
%m2 = call <2 x i8> @llvm.smin.v2i8(<2 x i8> <i8 poison, i8 9>, <2 x i8> %m)
|
||||||
ret <2 x i8> %m2
|
ret <2 x i8> %m2
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1410,6 +1410,7 @@ TEST(InstructionsTest, GetSplat) {
|
|||||||
LLVMContext Ctx;
|
LLVMContext Ctx;
|
||||||
Type *Int32Ty = Type::getInt32Ty(Ctx);
|
Type *Int32Ty = Type::getInt32Ty(Ctx);
|
||||||
Constant *CU = UndefValue::get(Int32Ty);
|
Constant *CU = UndefValue::get(Int32Ty);
|
||||||
|
Constant *CP = PoisonValue::get(Int32Ty);
|
||||||
Constant *C0 = ConstantInt::get(Int32Ty, 0);
|
Constant *C0 = ConstantInt::get(Int32Ty, 0);
|
||||||
Constant *C1 = ConstantInt::get(Int32Ty, 1);
|
Constant *C1 = ConstantInt::get(Int32Ty, 1);
|
||||||
|
|
||||||
@ -1419,30 +1420,42 @@ TEST(InstructionsTest, GetSplat) {
|
|||||||
Constant *Splat1Undef = ConstantVector::get({CU, CU, C1, CU});
|
Constant *Splat1Undef = ConstantVector::get({CU, CU, C1, CU});
|
||||||
Constant *NotSplat = ConstantVector::get({C1, C1, C0, C1 ,C1});
|
Constant *NotSplat = ConstantVector::get({C1, C1, C0, C1 ,C1});
|
||||||
Constant *NotSplatUndef = ConstantVector::get({CU, C1, CU, CU ,C0});
|
Constant *NotSplatUndef = ConstantVector::get({CU, C1, CU, CU ,C0});
|
||||||
|
Constant *Splat0Poison = ConstantVector::get({C0, CP, C0, CP});
|
||||||
|
Constant *Splat1Poison = ConstantVector::get({CP, CP, C1, CP});
|
||||||
|
Constant *NotSplatPoison = ConstantVector::get({CP, C1, CP, CP, C0});
|
||||||
|
|
||||||
// Default - undefs are not allowed.
|
// Default - undef/poison is not allowed.
|
||||||
EXPECT_EQ(Splat0->getSplatValue(), C0);
|
EXPECT_EQ(Splat0->getSplatValue(), C0);
|
||||||
EXPECT_EQ(Splat1->getSplatValue(), C1);
|
EXPECT_EQ(Splat1->getSplatValue(), C1);
|
||||||
EXPECT_EQ(Splat0Undef->getSplatValue(), nullptr);
|
EXPECT_EQ(Splat0Undef->getSplatValue(), nullptr);
|
||||||
EXPECT_EQ(Splat1Undef->getSplatValue(), nullptr);
|
EXPECT_EQ(Splat1Undef->getSplatValue(), nullptr);
|
||||||
|
EXPECT_EQ(Splat0Poison->getSplatValue(), nullptr);
|
||||||
|
EXPECT_EQ(Splat1Poison->getSplatValue(), nullptr);
|
||||||
EXPECT_EQ(NotSplat->getSplatValue(), nullptr);
|
EXPECT_EQ(NotSplat->getSplatValue(), nullptr);
|
||||||
EXPECT_EQ(NotSplatUndef->getSplatValue(), nullptr);
|
EXPECT_EQ(NotSplatUndef->getSplatValue(), nullptr);
|
||||||
|
EXPECT_EQ(NotSplatPoison->getSplatValue(), nullptr);
|
||||||
|
|
||||||
// Disallow undefs explicitly.
|
// Disallow poison explicitly.
|
||||||
EXPECT_EQ(Splat0->getSplatValue(false), C0);
|
EXPECT_EQ(Splat0->getSplatValue(false), C0);
|
||||||
EXPECT_EQ(Splat1->getSplatValue(false), C1);
|
EXPECT_EQ(Splat1->getSplatValue(false), C1);
|
||||||
EXPECT_EQ(Splat0Undef->getSplatValue(false), nullptr);
|
EXPECT_EQ(Splat0Undef->getSplatValue(false), nullptr);
|
||||||
EXPECT_EQ(Splat1Undef->getSplatValue(false), nullptr);
|
EXPECT_EQ(Splat1Undef->getSplatValue(false), nullptr);
|
||||||
|
EXPECT_EQ(Splat0Poison->getSplatValue(false), nullptr);
|
||||||
|
EXPECT_EQ(Splat1Poison->getSplatValue(false), nullptr);
|
||||||
EXPECT_EQ(NotSplat->getSplatValue(false), nullptr);
|
EXPECT_EQ(NotSplat->getSplatValue(false), nullptr);
|
||||||
EXPECT_EQ(NotSplatUndef->getSplatValue(false), nullptr);
|
EXPECT_EQ(NotSplatUndef->getSplatValue(false), nullptr);
|
||||||
|
EXPECT_EQ(NotSplatPoison->getSplatValue(false), nullptr);
|
||||||
|
|
||||||
// Allow undefs.
|
// Allow poison but not undef.
|
||||||
EXPECT_EQ(Splat0->getSplatValue(true), C0);
|
EXPECT_EQ(Splat0->getSplatValue(true), C0);
|
||||||
EXPECT_EQ(Splat1->getSplatValue(true), C1);
|
EXPECT_EQ(Splat1->getSplatValue(true), C1);
|
||||||
EXPECT_EQ(Splat0Undef->getSplatValue(true), C0);
|
EXPECT_EQ(Splat0Undef->getSplatValue(true), nullptr);
|
||||||
EXPECT_EQ(Splat1Undef->getSplatValue(true), C1);
|
EXPECT_EQ(Splat1Undef->getSplatValue(true), nullptr);
|
||||||
|
EXPECT_EQ(Splat0Poison->getSplatValue(true), C0);
|
||||||
|
EXPECT_EQ(Splat1Poison->getSplatValue(true), C1);
|
||||||
EXPECT_EQ(NotSplat->getSplatValue(true), nullptr);
|
EXPECT_EQ(NotSplat->getSplatValue(true), nullptr);
|
||||||
EXPECT_EQ(NotSplatUndef->getSplatValue(true), nullptr);
|
EXPECT_EQ(NotSplatUndef->getSplatValue(true), nullptr);
|
||||||
|
EXPECT_EQ(NotSplatPoison->getSplatValue(true), nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(InstructionsTest, SkipDebug) {
|
TEST(InstructionsTest, SkipDebug) {
|
||||||
|
@ -1222,40 +1222,51 @@ TEST_F(PatternMatchTest, VectorUndefInt) {
|
|||||||
EXPECT_TRUE(match(VectorZeroPoison, m_Zero()));
|
EXPECT_TRUE(match(VectorZeroPoison, m_Zero()));
|
||||||
|
|
||||||
const APInt *C;
|
const APInt *C;
|
||||||
// Regardless of whether undefs are allowed,
|
// Regardless of whether poison is allowed,
|
||||||
// a fully undef constant does not match.
|
// a fully undef/poison constant does not match.
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APInt(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APInt(C)));
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APIntForbidUndef(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APIntForbidPoison(C)));
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APIntAllowUndef(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APIntAllowPoison(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APInt(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APInt(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APIntForbidUndef(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APIntForbidPoison(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APIntAllowUndef(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APIntAllowPoison(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APInt(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APIntForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APIntAllowPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APInt(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APIntForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APIntAllowPoison(C)));
|
||||||
|
|
||||||
// We can always match simple constants and simple splats.
|
// We can always match simple constants and simple splats.
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APInt(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APInt(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APIntForbidUndef(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APIntForbidPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APIntAllowUndef(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APIntAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APInt(C)));
|
EXPECT_TRUE(match(VectorZero, m_APInt(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APIntForbidUndef(C)));
|
EXPECT_TRUE(match(VectorZero, m_APIntForbidPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APIntAllowUndef(C)));
|
EXPECT_TRUE(match(VectorZero, m_APIntAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
|
|
||||||
// Whether splats with undef can be matched depends on the matcher.
|
// Splats with undef are never allowed.
|
||||||
|
// Whether splats with poison can be matched depends on the matcher.
|
||||||
EXPECT_FALSE(match(VectorZeroUndef, m_APInt(C)));
|
EXPECT_FALSE(match(VectorZeroUndef, m_APInt(C)));
|
||||||
EXPECT_FALSE(match(VectorZeroUndef, m_APIntForbidUndef(C)));
|
EXPECT_FALSE(match(VectorZeroUndef, m_APIntForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroUndef, m_APIntAllowPoison(C)));
|
||||||
|
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APInt(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APIntForbidPoison(C)));
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZeroUndef, m_APIntAllowUndef(C)));
|
EXPECT_TRUE(match(VectorZeroPoison, m_APIntAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1387,43 +1398,63 @@ TEST_F(PatternMatchTest, VectorUndefFloat) {
|
|||||||
EXPECT_FALSE(match(VectorNaNPoison, m_Finite()));
|
EXPECT_FALSE(match(VectorNaNPoison, m_Finite()));
|
||||||
|
|
||||||
const APFloat *C;
|
const APFloat *C;
|
||||||
// Regardless of whether undefs are allowed,
|
// Regardless of whether poison is allowed,
|
||||||
// a fully undef constant does not match.
|
// a fully undef/poison constant does not match.
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APFloat(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APFloat(C)));
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APFloatForbidUndef(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APFloatForbidPoison(C)));
|
||||||
EXPECT_FALSE(match(ScalarUndef, m_APFloatAllowUndef(C)));
|
EXPECT_FALSE(match(ScalarUndef, m_APFloatAllowPoison(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APFloat(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APFloat(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APFloatForbidUndef(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APFloatForbidPoison(C)));
|
||||||
EXPECT_FALSE(match(VectorUndef, m_APFloatAllowUndef(C)));
|
EXPECT_FALSE(match(VectorUndef, m_APFloatAllowPoison(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APFloat(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APFloatForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(ScalarPoison, m_APFloatAllowPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APFloat(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APFloatForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorPoison, m_APFloatAllowPoison(C)));
|
||||||
|
|
||||||
// We can always match simple constants and simple splats.
|
// We can always match simple constants and simple splats.
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APFloat(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APFloat(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APFloatForbidUndef(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APFloatForbidPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(ScalarZero, m_APFloatAllowUndef(C)));
|
EXPECT_TRUE(match(ScalarZero, m_APFloatAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APFloat(C)));
|
EXPECT_TRUE(match(VectorZero, m_APFloat(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APFloatForbidUndef(C)));
|
EXPECT_TRUE(match(VectorZero, m_APFloatForbidPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZero, m_APFloatAllowUndef(C)));
|
EXPECT_TRUE(match(VectorZero, m_APFloatAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
|
|
||||||
// Whether splats with undef can be matched depends on the matcher.
|
// Splats with undef are never allowed.
|
||||||
|
// Whether splats with poison can be matched depends on the matcher.
|
||||||
EXPECT_FALSE(match(VectorZeroUndef, m_APFloat(C)));
|
EXPECT_FALSE(match(VectorZeroUndef, m_APFloat(C)));
|
||||||
EXPECT_FALSE(match(VectorZeroUndef, m_APFloatForbidUndef(C)));
|
EXPECT_FALSE(match(VectorZeroUndef, m_APFloatForbidPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroUndef, m_APFloatAllowPoison(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroUndef, m_Finite(C)));
|
||||||
|
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APFloat(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APFloatForbidPoison(C)));
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZeroUndef, m_APFloatAllowUndef(C)));
|
EXPECT_TRUE(match(VectorZeroPoison, m_APFloatAllowPoison(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
C = nullptr;
|
C = nullptr;
|
||||||
EXPECT_TRUE(match(VectorZeroUndef, m_Finite(C)));
|
EXPECT_TRUE(match(VectorZeroPoison, m_Finite(C)));
|
||||||
|
EXPECT_TRUE(C->isZero());
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APFloat(C)));
|
||||||
|
EXPECT_FALSE(match(VectorZeroPoison, m_APFloatForbidPoison(C)));
|
||||||
|
C = nullptr;
|
||||||
|
EXPECT_TRUE(match(VectorZeroPoison, m_APFloatAllowPoison(C)));
|
||||||
|
EXPECT_TRUE(C->isZero());
|
||||||
|
C = nullptr;
|
||||||
|
EXPECT_TRUE(match(VectorZeroPoison, m_Finite(C)));
|
||||||
EXPECT_TRUE(C->isZero());
|
EXPECT_TRUE(C->isZero());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user