mirror of
https://github.com/llvm/llvm-project.git
synced 2025-04-26 21:36:06 +00:00

In the beginning, Clang only emitted atomic IR for operations it knew the underlying microarch had instructions for, meaning it required significant knowledge of the target. Later, the backend acquired the ability to lower IR to libcalls. To avoid duplicating logic and improve logic locality, we'd like to move as much as possible to the backend. There are many ways to describe this change. For example, this change reduces the variables Clang uses to decide whether to emit libcalls or IR, down to only the atomic's size.
36 lines
1.2 KiB
C
36 lines
1.2 KiB
C
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=thumbv7m-none--eabi -target-cpu cortex-m3 | FileCheck %s
|
|
|
|
int i;
|
|
long long l;
|
|
|
|
typedef enum memory_order {
|
|
memory_order_relaxed, memory_order_consume, memory_order_acquire,
|
|
memory_order_release, memory_order_acq_rel, memory_order_seq_cst
|
|
} memory_order;
|
|
|
|
void test_presence(void)
|
|
{
|
|
// CHECK-LABEL: @test_presence
|
|
// CHECK: atomicrmw add ptr {{.*}} seq_cst, align 4
|
|
__atomic_fetch_add(&i, 1, memory_order_seq_cst);
|
|
// CHECK: atomicrmw sub ptr {{.*}} seq_cst, align 4
|
|
__atomic_fetch_sub(&i, 1, memory_order_seq_cst);
|
|
// CHECK: load atomic i32, ptr {{.*}} seq_cst, align 4
|
|
int r;
|
|
__atomic_load(&i, &r, memory_order_seq_cst);
|
|
// CHECK: store atomic i32 {{.*}} seq_cst, align 4
|
|
r = 0;
|
|
__atomic_store(&i, &r, memory_order_seq_cst);
|
|
|
|
// CHECK: atomicrmw add ptr {{.*}} seq_cst, align 8
|
|
__atomic_fetch_add(&l, 1, memory_order_seq_cst);
|
|
// CHECK: atomicrmw sub ptr {{.*}} seq_cst, align 8
|
|
__atomic_fetch_sub(&l, 1, memory_order_seq_cst);
|
|
// CHECK: load atomic i64, ptr {{.*}} seq_cst, align 8
|
|
long long rl;
|
|
__atomic_load(&l, &rl, memory_order_seq_cst);
|
|
// CHECK: store atomic i64 {{.*}}, ptr {{.*}} seq_cst, align 8
|
|
rl = 0;
|
|
__atomic_store(&l, &rl, memory_order_seq_cst);
|
|
}
|