mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
locking/atomic: make atomic*_{cmp,}xchg optional
Most architectures define the atomic/atomic64 xchg and cmpxchg operations in terms of arch_xchg and arch_cmpxchg respectfully. Add fallbacks for these cases and remove the trivial cases from arch code. On some architectures the existing definitions are kept as these are used to build other arch_atomic*() operations. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-5-mark.rutland@arm.com
This commit is contained in:
parent
a7bafa7969
commit
d12157efc8
23 changed files with 179 additions and 265 deletions
|
@ -207,13 +207,6 @@ ATOMIC64_FETCH_OP(xor, ^)
|
|||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
|
||||
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
#define arch_atomic64_cmpxchg(v, old, new) \
|
||||
(arch_cmpxchg(&((v)->counter), old, new))
|
||||
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
#define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
|
||||
#define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue