Paulo Matos 9571a28ee4 [WebAssembly] Add tests ensuring rotates persist
Due to the nature of WebAssembly, it's always better to keep
rotates instead of trying to optimize it. Commit 9485d983
disabled the generation of fsh for rotates, however these
tests ensure that future changes don't change the behaviour for
the Wasm backend that tends to have different optimization
requirements than other architectures. Also see:
https://github.com/llvm/llvm-project/issues/62703

Differential Revision: https://reviews.llvm.org/D152126
2023-06-06 07:48:35 +02:00

54 lines
2.8 KiB
C

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
// RUN: %clang_cc1 -triple wasm32-unknown-unknown -o - -emit-llvm %s | FileCheck --check-prefix=WEBASSEMBLY32 %s
// RUN: %clang_cc1 -triple wasm64-unknown-unknown -o - -emit-llvm %s | FileCheck --check-prefix=WEBASSEMBLY64 %s
// WEBASSEMBLY32-LABEL: define i32 @test32
// WEBASSEMBLY32-SAME: (i32 noundef [[X:%.*]]) #[[ATTR0:[0-9]+]] {
// WEBASSEMBLY32-NEXT: entry:
// WEBASSEMBLY32-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// WEBASSEMBLY32-NEXT: store i32 [[X]], ptr [[X_ADDR]], align 4
// WEBASSEMBLY32-NEXT: [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
// WEBASSEMBLY32-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -16711936
// WEBASSEMBLY32-NEXT: [[TMP1:%.*]] = call i32 @llvm.fshl.i32(i32 [[AND]], i32 [[AND]], i32 8)
// WEBASSEMBLY32-NEXT: ret i32 [[TMP1]]
//
// WEBASSEMBLY64-LABEL: define i32 @test32
// WEBASSEMBLY64-SAME: (i32 noundef [[X:%.*]]) #[[ATTR0:[0-9]+]] {
// WEBASSEMBLY64-NEXT: entry:
// WEBASSEMBLY64-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// WEBASSEMBLY64-NEXT: store i32 [[X]], ptr [[X_ADDR]], align 4
// WEBASSEMBLY64-NEXT: [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
// WEBASSEMBLY64-NEXT: [[AND:%.*]] = and i32 [[TMP0]], -16711936
// WEBASSEMBLY64-NEXT: [[TMP1:%.*]] = call i32 @llvm.fshl.i32(i32 [[AND]], i32 [[AND]], i32 8)
// WEBASSEMBLY64-NEXT: ret i32 [[TMP1]]
//
unsigned int test32(unsigned int x) {
return __builtin_rotateleft32((x & 0xFF00FF00), 8);
}
// WEBASSEMBLY32-LABEL: define i32 @test64
// WEBASSEMBLY32-SAME: (i32 noundef [[X:%.*]]) #[[ATTR0]] {
// WEBASSEMBLY32-NEXT: entry:
// WEBASSEMBLY32-NEXT: [[X_ADDR:%.*]] = alloca i32, align 4
// WEBASSEMBLY32-NEXT: store i32 [[X]], ptr [[X_ADDR]], align 4
// WEBASSEMBLY32-NEXT: [[TMP0:%.*]] = load i32, ptr [[X_ADDR]], align 4
// WEBASSEMBLY32-NEXT: [[CONV:%.*]] = zext i32 [[TMP0]] to i64
// WEBASSEMBLY32-NEXT: [[AND:%.*]] = and i64 [[CONV]], -71777214294589696
// WEBASSEMBLY32-NEXT: [[TMP1:%.*]] = call i64 @llvm.fshl.i64(i64 [[AND]], i64 [[AND]], i64 8)
// WEBASSEMBLY32-NEXT: [[CONV1:%.*]] = trunc i64 [[TMP1]] to i32
// WEBASSEMBLY32-NEXT: ret i32 [[CONV1]]
//
// WEBASSEMBLY64-LABEL: define i64 @test64
// WEBASSEMBLY64-SAME: (i64 noundef [[X:%.*]]) #[[ATTR0]] {
// WEBASSEMBLY64-NEXT: entry:
// WEBASSEMBLY64-NEXT: [[X_ADDR:%.*]] = alloca i64, align 8
// WEBASSEMBLY64-NEXT: store i64 [[X]], ptr [[X_ADDR]], align 8
// WEBASSEMBLY64-NEXT: [[TMP0:%.*]] = load i64, ptr [[X_ADDR]], align 8
// WEBASSEMBLY64-NEXT: [[AND:%.*]] = and i64 [[TMP0]], -71777214294589696
// WEBASSEMBLY64-NEXT: [[TMP1:%.*]] = call i64 @llvm.fshl.i64(i64 [[AND]], i64 [[AND]], i64 8)
// WEBASSEMBLY64-NEXT: ret i64 [[TMP1]]
//
unsigned long test64(unsigned long x) {
return __builtin_rotateleft64((x & 0xFF00FF00FF00FF00L), 8);
}