[msan][NFCI] Generalize handleIntrinsicByApplyingToShadow to allow alternative intrinsic for shadows (#124831)

https://github.com/llvm/llvm-project/pull/124159 uses
handleIntrinsicByApplyingToShadow for horizontal add/sub, but Vitaly
recommends always using the add version to avoid false negatives for
fully uninitialized data
(https://github.com/llvm/llvm-project/issues/124662).

This patch lays the groundwork by generalizing
handleIntrinsicByApplyingToShadow to allow using a different intrinsic
(of the same type as the original intrinsic) for the shadow. Planned
work will apply it to horizontal sub.
This commit is contained in:
Thurston Dang 2025-01-28 12:35:07 -08:00 committed by GitHub
parent bfefa15cc1
commit 7bd9c780e3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -4049,7 +4049,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// consider this an acceptable tradeoff for performance.
// To make shadow propagation precise, we want the equivalent of
// "horizontal OR", but this is not available.
return handleIntrinsicByApplyingToShadow(I, /* trailingVerbatimArgs */ 0);
return handleIntrinsicByApplyingToShadow(
I, /*shadowIntrinsicID=*/I.getIntrinsicID(),
/*trailingVerbatimArgs*/ 0);
}
/// Handle Arm NEON vector store intrinsics (vst{2,3,4}, vst1x_{2,3,4},
@ -4156,6 +4158,10 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
/// shadow[out] =
/// intrinsic(shadow[var1], shadow[var2], opType) | shadow[opType]
///
/// Typically, shadowIntrinsicID will be specified by the caller to be
/// I.getIntrinsicID(), but the caller can choose to replace it with another
/// intrinsic of the same type.
///
/// CAUTION: this assumes that the intrinsic will handle arbitrary
/// bit-patterns (for example, if the intrinsic accepts floats for
/// var1, we require that it doesn't care if inputs are NaNs).
@ -4165,6 +4171,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
///
/// The origin is approximated using setOriginForNaryOp.
void handleIntrinsicByApplyingToShadow(IntrinsicInst &I,
Intrinsic::ID shadowIntrinsicID,
unsigned int trailingVerbatimArgs) {
IRBuilder<> IRB(&I);
@ -4188,7 +4195,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
}
CallInst *CI =
IRB.CreateIntrinsic(I.getType(), I.getIntrinsicID(), ShadowArgs);
IRB.CreateIntrinsic(I.getType(), shadowIntrinsicID, ShadowArgs);
Value *CombinedShadow = CI;
// Combine the computed shadow with the shadow of trailing args
@ -4664,7 +4671,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
case Intrinsic::aarch64_neon_tbx3:
case Intrinsic::aarch64_neon_tbx4: {
// The last trailing argument (index register) should be handled verbatim
handleIntrinsicByApplyingToShadow(I, 1);
handleIntrinsicByApplyingToShadow(
I, /*shadowIntrinsicID=*/I.getIntrinsicID(),
/*trailingVerbatimArgs*/ 1);
break;
}