mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
Currently, there is a mess with the prototypes of the non-atomic bitops across the different architectures: ret bool, int, unsigned long nr int, long, unsigned int, unsigned long addr volatile unsigned long *, volatile void * Thankfully, it doesn't provoke any bugs, but can sometimes make the compiler angry when it's not handy at all. Adjust all the prototypes to the following standard: ret bool retval can be only 0 or 1 nr unsigned long native; signed makes no sense addr volatile unsigned long * bitmaps are arrays of ulongs Next, some architectures don't define 'arch_' versions as they don't support instrumentation, others do. To make sure there is always the same set of callables present and to ease any potential future changes, make them all follow the rule: * architecture-specific files define only 'arch_' versions; * non-prefixed versions can be defined only in asm-generic files; and place the non-prefixed definitions into a new file in asm-generic to be included by non-instrumented architectures. Finally, add some static assertions in order to prevent people from making a mess in this room again. I also used the %__always_inline attribute consistently, so that they always get resolved to the actual operations. Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Alexander Lobakin <alexandr.lobakin@intel.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Yury Norov <yury.norov@gmail.com> Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Signed-off-by: Yury Norov <yury.norov@gmail.com>
151 lines
4 KiB
C
151 lines
4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_SH_BITOPS_OP32_H
|
|
#define __ASM_SH_BITOPS_OP32_H
|
|
|
|
#include <linux/bits.h>
|
|
|
|
/*
|
|
* The bit modifying instructions on SH-2A are only capable of working
|
|
* with a 3-bit immediate, which signifies the shift position for the bit
|
|
* being worked on.
|
|
*/
|
|
#if defined(__BIG_ENDIAN)
|
|
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
|
#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
|
|
#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
|
|
#else
|
|
#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE)
|
|
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
|
|
#endif
|
|
|
|
static __always_inline void
|
|
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
if (__builtin_constant_p(nr)) {
|
|
__asm__ __volatile__ (
|
|
"bset.b %1, @(%O2,%0) ! __set_bit\n\t"
|
|
: "+r" (addr)
|
|
: "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
|
|
: "t", "memory"
|
|
);
|
|
} else {
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
|
|
*p |= mask;
|
|
}
|
|
}
|
|
|
|
static __always_inline void
|
|
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
if (__builtin_constant_p(nr)) {
|
|
__asm__ __volatile__ (
|
|
"bclr.b %1, @(%O2,%0) ! __clear_bit\n\t"
|
|
: "+r" (addr)
|
|
: "i" (BYTE_OFFSET(nr)),
|
|
"i" (BYTE_NUMBER(nr))
|
|
: "t", "memory"
|
|
);
|
|
} else {
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
|
|
*p &= ~mask;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* arch___change_bit - Toggle a bit in memory
|
|
* @nr: the bit to change
|
|
* @addr: the address to start counting from
|
|
*
|
|
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
|
* If it's called on the same region of memory simultaneously, the effect
|
|
* may be that only one operation succeeds.
|
|
*/
|
|
static __always_inline void
|
|
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
if (__builtin_constant_p(nr)) {
|
|
__asm__ __volatile__ (
|
|
"bxor.b %1, @(%O2,%0) ! __change_bit\n\t"
|
|
: "+r" (addr)
|
|
: "i" (BYTE_OFFSET(nr)),
|
|
"i" (BYTE_NUMBER(nr))
|
|
: "t", "memory"
|
|
);
|
|
} else {
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
|
|
*p ^= mask;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* arch___test_and_set_bit - Set a bit and return its old value
|
|
* @nr: Bit to set
|
|
* @addr: Address to count from
|
|
*
|
|
* This operation is non-atomic and can be reordered.
|
|
* If two examples of this operation race, one can appear to succeed
|
|
* but actually fail. You must protect multiple accesses with a lock.
|
|
*/
|
|
static __always_inline bool
|
|
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
unsigned long old = *p;
|
|
|
|
*p = old | mask;
|
|
return (old & mask) != 0;
|
|
}
|
|
|
|
/**
|
|
* arch___test_and_clear_bit - Clear a bit and return its old value
|
|
* @nr: Bit to clear
|
|
* @addr: Address to count from
|
|
*
|
|
* This operation is non-atomic and can be reordered.
|
|
* If two examples of this operation race, one can appear to succeed
|
|
* but actually fail. You must protect multiple accesses with a lock.
|
|
*/
|
|
static __always_inline bool
|
|
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
unsigned long old = *p;
|
|
|
|
*p = old & ~mask;
|
|
return (old & mask) != 0;
|
|
}
|
|
|
|
/* WARNING: non atomic and it can be reordered! */
|
|
static __always_inline bool
|
|
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
|
|
{
|
|
unsigned long mask = BIT_MASK(nr);
|
|
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
|
unsigned long old = *p;
|
|
|
|
*p = old ^ mask;
|
|
return (old & mask) != 0;
|
|
}
|
|
|
|
/**
|
|
* arch_test_bit - Determine whether a bit is set
|
|
* @nr: bit number to test
|
|
* @addr: Address to start counting from
|
|
*/
|
|
static __always_inline bool
|
|
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
|
|
{
|
|
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
|
}
|
|
|
|
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
|
|
|
|
#endif /* __ASM_SH_BITOPS_OP32_H */
|