llvm-project/clang/test/CodeGen/atomic-ops-libcall.c
Logikable 5fdd094837
[clang][CodeGen] Emit atomic IR in place of optimized libcalls. (#73176)
In the beginning, Clang only emitted atomic IR for operations it knew
the
underlying microarch had instructions for, meaning it required
significant
knowledge of the target. Later, the backend acquired the ability to
lower
IR to libcalls. To avoid duplicating logic and improve logic locality,
we'd like to move as much as possible to the backend.

There are many ways to describe this change. For example, this change
reduces the variables Clang uses to decide whether to emit libcalls or
IR, down to only the atomic's size.
2024-02-12 09:33:09 -08:00

339 lines
16 KiB
C

// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
// RUN: %clang_cc1 -triple armv5e-none-linux-gnueabi -emit-llvm %s -o - | FileCheck %s
enum memory_order {
memory_order_relaxed, memory_order_consume, memory_order_acquire,
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
};
// CHECK-LABEL: define dso_local ptr @test_c11_atomic_fetch_add_int_ptr(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 12, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret ptr [[TMP3]]
//
int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local ptr @test_c11_atomic_fetch_sub_int_ptr(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 20, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret ptr [[TMP3]]
//
int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_c11_atomic_fetch_add_int(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 3, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_c11_atomic_fetch_sub_int(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 5, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local ptr @fp2a(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca ptr, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 4, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] monotonic, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret ptr [[TMP3]]
//
int *fp2a(int **p) {
// Note, the GNU builtins do not multiply by sizeof(T)!
return __atomic_fetch_sub(p, 4, memory_order_relaxed);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_add(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_add(int *p) {
return __atomic_fetch_add(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_sub(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_sub(int *p) {
return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_and(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_and(int *p) {
return __atomic_fetch_and(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_or(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_or(int *p) {
return __atomic_fetch_or(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_xor(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_xor(int *p) {
return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_fetch_nand(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw nand ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: store i32 [[TMP2]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP3]]
//
int test_atomic_fetch_nand(int *p) {
return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_add_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw add ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP4]]
//
int test_atomic_add_fetch(int *p) {
return __atomic_add_fetch(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_sub_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw sub ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP4]]
//
int test_atomic_sub_fetch(int *p) {
return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_and_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw and ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP4]]
//
int test_atomic_and_fetch(int *p) {
return __atomic_and_fetch(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_or_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw or ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP4]]
//
int test_atomic_or_fetch(int *p) {
return __atomic_or_fetch(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_xor_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xor ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = xor i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: store i32 [[TMP3]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP4]]
//
int test_atomic_xor_fetch(int *p) {
return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
}
// CHECK-LABEL: define dso_local i32 @test_atomic_nand_fetch(
// CHECK-SAME: ptr noundef [[P:%.*]]) #[[ATTR0]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[P_ADDR:%.*]] = alloca ptr, align 4
// CHECK-NEXT: [[DOTATOMICTMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store ptr [[P]], ptr [[P_ADDR]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[P_ADDR]], align 4
// CHECK-NEXT: store i32 55, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[DOTATOMICTMP]], align 4
// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw nand ptr [[TMP0]], i32 [[TMP1]] seq_cst, align 4
// CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[TMP1]]
// CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1
// CHECK-NEXT: store i32 [[TMP4]], ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, ptr [[ATOMIC_TEMP]], align 4
// CHECK-NEXT: ret i32 [[TMP5]]
//
int test_atomic_nand_fetch(int *p) {
return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
}