CPU Atomic / Memory Barrier
fetch_add (seq_cst)
x64
; linux clang x64
lock xaddl %esi, 8(%rsp)
; windows x64
lock xadd dword ptr [rbp-10h],edx
ARMv7A
dmb ish
.LBB0_1:
ldrex r0, [r4]
add r1, r0, #1
strex r0, r1, [r4]
cmp r0, #0
bne .LBB0_1
dmb ish
ARMv8.0A
.LBB0_1:
ldaxr w9, [x8]
add w1, w9, #1
stlxr w9, w1, [x8]
cbnz w9, .LBB0_1
ARMv8.1A
fetch_and (seq_cst)
x64
; linux clang
movl (%rsp), %ecx
movl %ecx, %edx
andl $4, %edx
movl %ecx, %eax
lock cmpxchgl %edx, (%rsp)
movl %eax, %ecx
; windows
prefetchw [rbp-10h]
mov eax,dword ptr [rbp-10h]
nop
mov ecx,eax
and ecx,4
lock cmpxchg dword ptr [rbp-10h],ecx
ARMv8.1A
fetch_or (seq_cst)
ARMv8.1A
fetch_xor (seq_cst)
ARMv8.1A
exchange (seq_cst)
x64
; linux clang
xchgl %esi, (%rsp)
; windows
xchg edx,dword ptr [rbp-10h]
ARMv8.1A
compare_exchange_weak (seq_cst)
x64
lock cmpxchgl %ecx, 8(%rsp)
lock cmpxchg dword ptr [rbp-10h],edi
ARMv7A
ldrex r2, [r4]
cmp r2, #0
beq .LBB0_44
clrex
b .LBB0_45
.LBB0_44:
dmb ish
mov r0, #3
strex r1, r0, [r4]
cmp r1, #0
beq .LBB0_53
.LBB0_45:
ARMv8.0A
ldaxr w2, [x19]
cbz w2, .LBB0_18
clrex
b .LBB0_19
.LBB0_18:
orr w8, wzr, #0x3
stlxr w9, w8, [x19]
cbz w9, .LBB0_51
.LBB0_19:
ARMv8.1A
compare_exchange_strong (seq_cst)
x64
lock cmpxchgl %ebx, 8(%rsp)
lock cmpxchg dword ptr [rbp-10h],r13d
ARMv7A
.LBB0_47:
ldrex r2, [r4]
cmp r2, #3
bne .LBB0_50
strex r1, r0, [r4]
cmp r1, #0
bne .LBB0_47
dmb ish
mov r1, #1
b .LBB0_51
.LBB0_50:
clrex
mov r1, #0
dmb ish
.LBB0_51:
ARMv8.0A
.LBB0_21:
ldaxr w2, [x19]
cmp w2, #3
b.ne .LBB0_24
stlxr w8, wzr, [x19]
cbnz w8, .LBB0_21
orr w1, wzr, #0x1
b .LBB0_25
.LBB0_24:
clrex
mov w1, wzr
.LBB0_25:
ARMv8.1A
load
x64 (relaxed/acquire/consume/seq_cst)
ARMv7A (relaxed)
ARMv7A (acquire/consume/seq_cst)
ARMv8.xA (relaxed)
ARMv8.xA (acquire/consume/seq_cst)
store
x64 (relaxed/release)
x64 (seq_cst)
ARMv7A (relaxed)
ARMv7A (release)
ARMv7A (seq_cst)
dmb ish
str r5, [r4]
dmb ish
ARMv8.xA (relaxed)
ARMv8.xA (release/seq_cst)