// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -target-feature +bf16 -mvscale-min=4 -mvscale-max=4 -disable-llvm-passes -emit-llvm -o - %s | FileCheck %s // REQUIRES: aarch64-registered-target #include #define N __ARM_FEATURE_SVE_BITS typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); typedef uint8_t uint8_vec_t __attribute__((vector_size(N / 64))); fixed_bool_t global_pred; fixed_int32_t global_vec; // CHECK-LABEL: @foo( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[PG:%.*]] = alloca , align 2 // CHECK-NEXT: store [[PRED:%.*]], ptr [[PRED_ADDR]], align 2 // CHECK-NEXT: store [[VEC:%.*]], ptr [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @global_pred, align 2 // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( poison, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr @global_pred, align 2 // CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( poison, <8 x i8> [[TMP3]], i64 0) // CHECK-NEXT: [[TMP4:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP5:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP4]]) // CHECK-NEXT: store [[TMP5]], ptr [[PG]], align 2 // CHECK-NEXT: [[TMP6:%.*]] = load , ptr [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load <16 x i32>, ptr @global_vec, align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP7]], i64 0) // CHECK-NEXT: [[TMP8:%.*]] = load , ptr [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP9:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP6]]) // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP9]], [[CASTSCALABLESVE]], [[TMP8]]) // CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP10]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], ptr [[RETVAL]], align 16 // CHECK-NEXT: [[TMP11:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP11]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t foo(svbool_t pred, svint32_t vec) { svbool_t pg = svand_z(pred, global_pred, global_pred); return svadd_m(pg, global_vec, vec); } // CHECK-LABEL: @test_ptr_to_global( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[GLOBAL_VEC_PTR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: store ptr @global_vec, ptr [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[GLOBAL_VEC_PTR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[TMP0]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t test_ptr_to_global() { fixed_int32_t *global_vec_ptr; global_vec_ptr = &global_vec; return *global_vec_ptr; } // // Test casting pointer from fixed-length array to scalable vector. // CHECK-LABEL: @array_arg( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[ARR_ADDR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: store ptr [[ARR:%.*]], ptr [[ARR_ADDR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[ARR_ADDR]], align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds <16 x i32>, ptr [[TMP0]], i64 0 // CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, ptr [[ARRAYIDX]], align 16 // CHECK-NEXT: store <16 x i32> [[TMP1]], ptr [[RETVAL]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP2]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t array_arg(fixed_int32_t arr[]) { return arr[0]; } // CHECK-LABEL: @address_of_array_idx( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i8>, align 2 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <8 x i8>], align 2 // CHECK-NEXT: [[PARR:%.*]] = alloca ptr, align 8 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], ptr [[ARR]], i64 0, i64 0 // CHECK-NEXT: store ptr [[ARRAYIDX]], ptr [[PARR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PARR]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr [[TMP0]], align 2 // CHECK-NEXT: store <8 x i8> [[TMP1]], ptr [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = load <8 x i8>, ptr [[RETVAL]], align 2 // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( poison, <8 x i8> [[TMP2]], i64 0) // CHECK-NEXT: [[TMP3:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: ret [[TMP3]] // fixed_bool_t address_of_array_idx() { fixed_bool_t arr[3]; fixed_bool_t *parr; parr = &arr[0]; return *parr; } // CHECK-LABEL: @test_cast( // CHECK-NEXT: entry: // CHECK-NEXT: [[RETVAL:%.*]] = alloca <16 x i32>, align 16 // CHECK-NEXT: [[PRED_ADDR:%.*]] = alloca , align 2 // CHECK-NEXT: [[VEC_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[XX:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: [[YY:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: [[PG:%.*]] = alloca , align 2 // CHECK-NEXT: store [[PRED:%.*]], ptr [[PRED_ADDR]], align 2 // CHECK-NEXT: store [[VEC:%.*]], ptr [[VEC_ADDR]], align 16 // CHECK-NEXT: store <8 x i8> , ptr [[XX]], align 8 // CHECK-NEXT: store <8 x i8> , ptr [[YY]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load , ptr [[PRED_ADDR]], align 2 // CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, ptr @global_pred, align 2 // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( poison, <8 x i8> [[TMP1]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTFIXEDSVE]] to // CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, ptr [[XX]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, ptr [[YY]], align 8 // CHECK-NEXT: [[ADD:%.*]] = add <8 x i8> [[TMP3]], [[TMP4]] // CHECK-NEXT: [[CASTFIXEDSVE2:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( poison, <8 x i8> [[ADD]], i64 0) // CHECK-NEXT: [[TMP5:%.*]] = bitcast [[CASTFIXEDSVE2]] to // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.and.z.nxv16i1( [[TMP0]], [[TMP2]], [[TMP5]]) // CHECK-NEXT: store [[TMP6]], ptr [[PG]], align 2 // CHECK-NEXT: [[TMP7:%.*]] = load , ptr [[PG]], align 2 // CHECK-NEXT: [[TMP8:%.*]] = load <16 x i32>, ptr @global_vec, align 16 // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP8]], i64 0) // CHECK-NEXT: [[TMP9:%.*]] = load , ptr [[VEC_ADDR]], align 16 // CHECK-NEXT: [[TMP10:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[TMP7]]) // CHECK-NEXT: [[TMP11:%.*]] = call @llvm.aarch64.sve.add.nxv4i32( [[TMP10]], [[CASTSCALABLESVE]], [[TMP9]]) // CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[TMP11]], i64 0) // CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE3]], ptr [[RETVAL]], align 16 // CHECK-NEXT: [[TMP12:%.*]] = load <16 x i32>, ptr [[RETVAL]], align 16 // CHECK-NEXT: [[CASTSCALABLESVE4:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( poison, <16 x i32> [[TMP12]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE4]] // fixed_int32_t test_cast(svbool_t pred, svint32_t vec) { uint8_vec_t xx = {1, 2, 3, 4}; uint8_vec_t yy = {2, 5, 4, 6}; svbool_t pg = svand_z(pred, global_pred, xx + yy); return svadd_m(pg, global_vec, vec); }