diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp index ab16ec77be10..60619dbe2f58 100644 --- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp +++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp @@ -804,9 +804,10 @@ public: m_Value(TA), m_ConstantInt(R), m_ConstantInt(C)))) return nullptr; - // Transpose of a transpose is a nop + // Transpose of a transpose is a nop when the shapes match. Value *TATA; - if (match(TA, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(TATA)))) { + if (match(TA, m_Intrinsic<Intrinsic::matrix_transpose>( + m_Value(TATA), m_Specific(C), m_Specific(R)))) { updateShapeAndReplaceAllUsesWith(I, TATA); eraseFromParentAndMove(&I, II, BB); eraseFromParentAndMove(TA, II, BB); diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/transpose-fold.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/transpose-fold.ll new file mode 100644 index 000000000000..ef2e224f69c5 --- /dev/null +++ b/llvm/test/Transforms/LowerMatrixIntrinsics/transpose-fold.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s + +; We can only fold matching transposes. + +define void @reshape(ptr %in, ptr %out) { +; CHECK-LABEL: define void @reshape( +; CHECK-SAME: ptr [[IN:%.*]], ptr [[OUT:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: [[COL_LOAD:%.*]] = load <4 x double>, ptr [[IN]], align 8 +; CHECK-NEXT: [[SPLIT:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> poison, <2 x i32> <i32 0, i32 1> +; CHECK-NEXT: [[SPLIT1:%.*]] = shufflevector <4 x double> [[COL_LOAD]], <4 x double> poison, <2 x i32> <i32 2, i32 3> +; CHECK-NEXT: [[TMP0:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[TMP0]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> [[TMP1]], double [[TMP2]], i64 1 +; CHECK-NEXT: [[TMP4:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1 +; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> poison, double [[TMP4]], i64 0 +; CHECK-NEXT: [[TMP6:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1 +; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> [[TMP5]], double [[TMP6]], i64 1 +; CHECK-NEXT: store <2 x double> [[TMP3]], ptr [[OUT]], align 8 +; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr double, ptr [[OUT]], i64 2 +; CHECK-NEXT: store <2 x double> [[TMP7]], ptr [[VEC_GEP]], align 8 +; CHECK-NEXT: ret void +; +entry: + %0 = load <4 x double>, ptr %in, align 8 + %1 = tail call <4 x double> @llvm.matrix.transpose.v4f64(<4 x double> %0, i32 4, i32 1) + %2 = tail call <4 x double> @llvm.matrix.transpose.v4f64(<4 x double> %1, i32 1, i32 4) + %3 = tail call <4 x double> @llvm.matrix.transpose.v4f64(<4 x double> %2, i32 2, i32 2) + %4 = tail call <4 x double> @llvm.matrix.transpose.v4f64(<4 x double> %3, i32 2, i32 2) + %5 = tail call <4 x double> @llvm.matrix.transpose.v4f64(<4 x double> %4, i32 2, i32 2) + store <4 x double> %5, ptr %out, align 8 + ret void +}