mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-16 23:16:35 +00:00

Reapplies #134068. The first patch was missing a check to prevent attempts to pair SVE fill/spill with other Neon load/store instructions, which could happen specifically if the Neon instruction was unscaled.
284 lines
12 KiB
LLVM
284 lines
12 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64_be-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-BE
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,ldp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-LDPALIGNEDONLY
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve,stp-aligned-only -aarch64-sve-vector-bits-min=128 -aarch64-sve-vector-bits-max=128 < %s | FileCheck %s --check-prefixes=CHECK-STPALIGNEDONLY
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK-OFF
|
|
; RUN: llc -verify-machineinstrs -mtriple=aarch64-linux-gnu -mattr=+sve -aarch64-sve-vector-bits-min=256 -aarch64-sve-vector-bits-max=256 < %s | FileCheck %s --check-prefixes=CHECK-OFF
|
|
|
|
define void @nxv16i8(ptr %ldptr, ptr %stptr) {
|
|
; CHECK-LABEL: nxv16i8:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldp q0, q1, [x0]
|
|
; CHECK-NEXT: stp q0, q1, [x1]
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-BE-LABEL: nxv16i8:
|
|
; CHECK-BE: // %bb.0:
|
|
; CHECK-BE-NEXT: ptrue p0.b
|
|
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
|
|
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #1, mul vl]
|
|
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
|
|
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #1, mul vl]
|
|
; CHECK-BE-NEXT: ret
|
|
;
|
|
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8:
|
|
; CHECK-LDPALIGNEDONLY: // %bb.0:
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #1, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8:
|
|
; CHECK-STPALIGNEDONLY: // %bb.0:
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #1, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-OFF-LABEL: nxv16i8:
|
|
; CHECK-OFF: // %bb.0:
|
|
; CHECK-OFF-NEXT: ldr z0, [x0]
|
|
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
|
|
; CHECK-OFF-NEXT: str z0, [x1]
|
|
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
|
|
; CHECK-OFF-NEXT: ret
|
|
%vscale = tail call i64 @llvm.vscale()
|
|
%vl = shl nuw nsw i64 %vscale, 4
|
|
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
|
|
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
|
|
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
|
|
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
|
|
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
|
|
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
|
|
ret void
|
|
}
|
|
|
|
define void @nxv16i8_max_range(ptr %ldptr, ptr %stptr) {
|
|
; CHECK-LABEL: nxv16i8_max_range:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldp q0, q1, [x0, #-1024]
|
|
; CHECK-NEXT: stp q0, q1, [x1, #1008]
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-BE-LABEL: nxv16i8_max_range:
|
|
; CHECK-BE: // %bb.0:
|
|
; CHECK-BE-NEXT: rdvl x8, #1
|
|
; CHECK-BE-NEXT: mov x9, #-1008 // =0xfffffffffffffc10
|
|
; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
|
|
; CHECK-BE-NEXT: lsr x8, x8, #4
|
|
; CHECK-BE-NEXT: mov w11, #1008 // =0x3f0
|
|
; CHECK-BE-NEXT: mov w12, #1024 // =0x400
|
|
; CHECK-BE-NEXT: ptrue p0.b
|
|
; CHECK-BE-NEXT: mul x9, x8, x9
|
|
; CHECK-BE-NEXT: mul x10, x8, x10
|
|
; CHECK-BE-NEXT: mul x11, x8, x11
|
|
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x9]
|
|
; CHECK-BE-NEXT: mul x8, x8, x12
|
|
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x10]
|
|
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
|
|
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
|
|
; CHECK-BE-NEXT: ret
|
|
;
|
|
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_max_range:
|
|
; CHECK-LDPALIGNEDONLY: // %bb.0:
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-64, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-63, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1, #1008]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_max_range:
|
|
; CHECK-STPALIGNEDONLY: // %bb.0:
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0, #-1024]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #63, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #64, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-OFF-LABEL: nxv16i8_max_range:
|
|
; CHECK-OFF: // %bb.0:
|
|
; CHECK-OFF-NEXT: ldr z0, [x0, #-64, mul vl]
|
|
; CHECK-OFF-NEXT: ldr z1, [x0, #-63, mul vl]
|
|
; CHECK-OFF-NEXT: str z0, [x1, #63, mul vl]
|
|
; CHECK-OFF-NEXT: str z1, [x1, #64, mul vl]
|
|
; CHECK-OFF-NEXT: ret
|
|
%vscale = tail call i64 @llvm.vscale()
|
|
%ldoff1 = mul i64 %vscale, -1024
|
|
%ldoff2 = mul i64 %vscale, -1008
|
|
%stoff1 = mul i64 %vscale, 1008
|
|
%stoff2 = mul i64 %vscale, 1024
|
|
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
|
|
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
|
|
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
|
|
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
|
|
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
|
|
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
|
|
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
|
|
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
|
|
ret void
|
|
}
|
|
|
|
define void @nxv16i8_outside_range(ptr %ldptr, ptr %stptr) {
|
|
; CHECK-LABEL: nxv16i8_outside_range:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldr z0, [x0, #-65, mul vl]
|
|
; CHECK-NEXT: ldr z1, [x0, #-64, mul vl]
|
|
; CHECK-NEXT: str z0, [x1, #64, mul vl]
|
|
; CHECK-NEXT: str z1, [x1, #65, mul vl]
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-BE-LABEL: nxv16i8_outside_range:
|
|
; CHECK-BE: // %bb.0:
|
|
; CHECK-BE-NEXT: rdvl x8, #1
|
|
; CHECK-BE-NEXT: mov x9, #-1040 // =0xfffffffffffffbf0
|
|
; CHECK-BE-NEXT: mov x10, #-1024 // =0xfffffffffffffc00
|
|
; CHECK-BE-NEXT: lsr x8, x8, #4
|
|
; CHECK-BE-NEXT: mov w11, #1024 // =0x400
|
|
; CHECK-BE-NEXT: mov w12, #1040 // =0x410
|
|
; CHECK-BE-NEXT: ptrue p0.b
|
|
; CHECK-BE-NEXT: mul x9, x8, x9
|
|
; CHECK-BE-NEXT: mul x10, x8, x10
|
|
; CHECK-BE-NEXT: mul x11, x8, x11
|
|
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0, x9]
|
|
; CHECK-BE-NEXT: mul x8, x8, x12
|
|
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, x10]
|
|
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1, x11]
|
|
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, x8]
|
|
; CHECK-BE-NEXT: ret
|
|
;
|
|
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_outside_range:
|
|
; CHECK-LDPALIGNEDONLY: // %bb.0:
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_outside_range:
|
|
; CHECK-STPALIGNEDONLY: // %bb.0:
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0, #-65, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #-64, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1, #64, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #65, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-OFF-LABEL: nxv16i8_outside_range:
|
|
; CHECK-OFF: // %bb.0:
|
|
; CHECK-OFF-NEXT: ldr z0, [x0, #-65, mul vl]
|
|
; CHECK-OFF-NEXT: ldr z1, [x0, #-64, mul vl]
|
|
; CHECK-OFF-NEXT: str z0, [x1, #64, mul vl]
|
|
; CHECK-OFF-NEXT: str z1, [x1, #65, mul vl]
|
|
; CHECK-OFF-NEXT: ret
|
|
%vscale = tail call i64 @llvm.vscale()
|
|
%ldoff1 = mul i64 %vscale, -1040
|
|
%ldoff2 = mul i64 %vscale, -1024
|
|
%stoff1 = mul i64 %vscale, 1024
|
|
%stoff2 = mul i64 %vscale, 1040
|
|
%ldptr1 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff1
|
|
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %ldoff2
|
|
%stptr1 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff1
|
|
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %stoff2
|
|
%ld1 = load <vscale x 16 x i8>, ptr %ldptr1, align 1
|
|
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
|
|
store <vscale x 16 x i8> %ld1, ptr %stptr1, align 1
|
|
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
|
|
ret void
|
|
}
|
|
|
|
define void @nxv16i8_2vl_stride(ptr %ldptr, ptr %stptr) {
|
|
; CHECK-LABEL: nxv16i8_2vl_stride:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldr z0, [x0]
|
|
; CHECK-NEXT: ldr z1, [x0, #2, mul vl]
|
|
; CHECK-NEXT: str z0, [x1]
|
|
; CHECK-NEXT: str z1, [x1, #2, mul vl]
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-BE-LABEL: nxv16i8_2vl_stride:
|
|
; CHECK-BE: // %bb.0:
|
|
; CHECK-BE-NEXT: ptrue p0.b
|
|
; CHECK-BE-NEXT: ld1b { z0.b }, p0/z, [x0]
|
|
; CHECK-BE-NEXT: ld1b { z1.b }, p0/z, [x0, #2, mul vl]
|
|
; CHECK-BE-NEXT: st1b { z0.b }, p0, [x1]
|
|
; CHECK-BE-NEXT: st1b { z1.b }, p0, [x1, #2, mul vl]
|
|
; CHECK-BE-NEXT: ret
|
|
;
|
|
; CHECK-LDPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
|
|
; CHECK-LDPALIGNEDONLY: // %bb.0:
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z0, [x0]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: str z0, [x1]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-STPALIGNEDONLY-LABEL: nxv16i8_2vl_stride:
|
|
; CHECK-STPALIGNEDONLY: // %bb.0:
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldr z0, [x0]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldr z1, [x0, #2, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z0, [x1]
|
|
; CHECK-STPALIGNEDONLY-NEXT: str z1, [x1, #2, mul vl]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-OFF-LABEL: nxv16i8_2vl_stride:
|
|
; CHECK-OFF: // %bb.0:
|
|
; CHECK-OFF-NEXT: ldr z0, [x0]
|
|
; CHECK-OFF-NEXT: ldr z1, [x0, #2, mul vl]
|
|
; CHECK-OFF-NEXT: str z0, [x1]
|
|
; CHECK-OFF-NEXT: str z1, [x1, #2, mul vl]
|
|
; CHECK-OFF-NEXT: ret
|
|
%vscale = tail call i64 @llvm.vscale()
|
|
%vl = shl nuw nsw i64 %vscale, 5
|
|
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
|
|
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
|
|
%ld1 = load <vscale x 16 x i8>, ptr %ldptr, align 1
|
|
%ld2 = load <vscale x 16 x i8>, ptr %ldptr2, align 1
|
|
store <vscale x 16 x i8> %ld1, ptr %stptr, align 1
|
|
store <vscale x 16 x i8> %ld2, ptr %stptr2, align 1
|
|
ret void
|
|
}
|
|
|
|
define void @nxv2f64_32b_aligned(ptr %ldptr, ptr %stptr) {
|
|
; CHECK-LABEL: nxv2f64_32b_aligned:
|
|
; CHECK: // %bb.0:
|
|
; CHECK-NEXT: ldp q0, q1, [x0]
|
|
; CHECK-NEXT: stp q0, q1, [x1]
|
|
; CHECK-NEXT: ret
|
|
;
|
|
; CHECK-BE-LABEL: nxv2f64_32b_aligned:
|
|
; CHECK-BE: // %bb.0:
|
|
; CHECK-BE-NEXT: ptrue p0.d
|
|
; CHECK-BE-NEXT: ld1d { z0.d }, p0/z, [x0]
|
|
; CHECK-BE-NEXT: ld1d { z1.d }, p0/z, [x0, #1, mul vl]
|
|
; CHECK-BE-NEXT: st1d { z0.d }, p0, [x1]
|
|
; CHECK-BE-NEXT: st1d { z1.d }, p0, [x1, #1, mul vl]
|
|
; CHECK-BE-NEXT: ret
|
|
;
|
|
; CHECK-LDPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
|
|
; CHECK-LDPALIGNEDONLY: // %bb.0:
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: stp q0, q1, [x1]
|
|
; CHECK-LDPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-STPALIGNEDONLY-LABEL: nxv2f64_32b_aligned:
|
|
; CHECK-STPALIGNEDONLY: // %bb.0:
|
|
; CHECK-STPALIGNEDONLY-NEXT: ldp q0, q1, [x0]
|
|
; CHECK-STPALIGNEDONLY-NEXT: stp q0, q1, [x1]
|
|
; CHECK-STPALIGNEDONLY-NEXT: ret
|
|
;
|
|
; CHECK-OFF-LABEL: nxv2f64_32b_aligned:
|
|
; CHECK-OFF: // %bb.0:
|
|
; CHECK-OFF-NEXT: ldr z0, [x0]
|
|
; CHECK-OFF-NEXT: ldr z1, [x0, #1, mul vl]
|
|
; CHECK-OFF-NEXT: str z0, [x1]
|
|
; CHECK-OFF-NEXT: str z1, [x1, #1, mul vl]
|
|
; CHECK-OFF-NEXT: ret
|
|
%vscale = tail call i64 @llvm.vscale()
|
|
%vl = shl nuw nsw i64 %vscale, 4
|
|
%ldptr2 = getelementptr inbounds nuw i8, ptr %ldptr, i64 %vl
|
|
%stptr2 = getelementptr inbounds nuw i8, ptr %stptr, i64 %vl
|
|
%ld1 = load <vscale x 2 x double>, ptr %ldptr, align 32
|
|
%ld2 = load <vscale x 2 x double>, ptr %ldptr2, align 32
|
|
store <vscale x 2 x double> %ld1, ptr %stptr, align 32
|
|
store <vscale x 2 x double> %ld2, ptr %stptr2, align 32
|
|
ret void
|
|
}
|