mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-01 23:53:16 -04:00
We'd like all architectures to convert to ARCH_ATOMIC, as once all architectures are converted it will be possible to make significant cleanups to the atomics headers, and this will make it much easier to generically enable atomic functionality (e.g. debug logic in the instrumented wrappers). As a step towards that, this patch migrates csky to ARCH_ATOMIC. The arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common code wraps these with optional instrumentation to provide the regular functions. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Guo Ren <guoren@kernel.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-17-mark.rutland@arm.com
80 lines
1.8 KiB
C
80 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
|
|
#ifndef __ASM_CSKY_CMPXCHG_H
|
|
#define __ASM_CSKY_CMPXCHG_H
|
|
|
|
#ifdef CONFIG_SMP
|
|
#include <asm/barrier.h>
|
|
|
|
extern void __bad_xchg(void);
|
|
|
|
#define __xchg_relaxed(new, ptr, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
unsigned long tmp; \
|
|
switch (size) { \
|
|
case 4: \
|
|
asm volatile ( \
|
|
"1: ldex.w %0, (%3) \n" \
|
|
" mov %1, %2 \n" \
|
|
" stex.w %1, (%3) \n" \
|
|
" bez %1, 1b \n" \
|
|
: "=&r" (__ret), "=&r" (tmp) \
|
|
: "r" (__new), "r"(__ptr) \
|
|
:); \
|
|
break; \
|
|
default: \
|
|
__bad_xchg(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_xchg_relaxed(ptr, x) \
|
|
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
|
|
|
|
#define __cmpxchg_relaxed(ptr, old, new, size) \
|
|
({ \
|
|
__typeof__(ptr) __ptr = (ptr); \
|
|
__typeof__(new) __new = (new); \
|
|
__typeof__(new) __tmp; \
|
|
__typeof__(old) __old = (old); \
|
|
__typeof__(*(ptr)) __ret; \
|
|
switch (size) { \
|
|
case 4: \
|
|
asm volatile ( \
|
|
"1: ldex.w %0, (%3) \n" \
|
|
" cmpne %0, %4 \n" \
|
|
" bt 2f \n" \
|
|
" mov %1, %2 \n" \
|
|
" stex.w %1, (%3) \n" \
|
|
" bez %1, 1b \n" \
|
|
"2: \n" \
|
|
: "=&r" (__ret), "=&r" (__tmp) \
|
|
: "r" (__new), "r"(__ptr), "r"(__old) \
|
|
:); \
|
|
break; \
|
|
default: \
|
|
__bad_xchg(); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define arch_cmpxchg_relaxed(ptr, o, n) \
|
|
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
|
|
|
|
#define arch_cmpxchg(ptr, o, n) \
|
|
({ \
|
|
__typeof__(*(ptr)) __ret; \
|
|
__smp_release_fence(); \
|
|
__ret = arch_cmpxchg_relaxed(ptr, o, n); \
|
|
__smp_acquire_fence(); \
|
|
__ret; \
|
|
})
|
|
|
|
#else
|
|
#include <asm-generic/cmpxchg.h>
|
|
#endif
|
|
|
|
#endif /* __ASM_CSKY_CMPXCHG_H */
|