futex: Ensure futex_atomic_cmpxchg_inatomic() is present

The boot-time detection of futex_atomic_cmpxchg_inatomic() has a bug on
some 32-bit arm builds, and Thomas Gleixner suggested that setting
CONFIG_HAVE_FUTEX_CMPXCHG would avoid the problem, as it is always present
anyway.

Looking into which other architectures could do the same showed that almost
all architectures have it, the exceptions being:

 - some old 32-bit MIPS uniprocessor cores without ll/sc
 - one xtensa variant with no SMP
 - 32-bit SPARC when built for SMP

Fix MIPS And Xtensa by rearranging the generic code to let it be used
as a fallback.

For SPARC, the SMP definition just ends up turning off futex anyway, so
this can be done at Kconfig time instead. Note that sparc32 glibc requires
the CASA instruction for its mutexes anyway, which is only available when
running on SPARCv9 or LEON CPUs, but needs to be implemented in the sparc32
kernel for those.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Max Filippov <jcmvbkbc@gmail.com>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Rich Felker <dalias@libc.org>
Link: https://lore.kernel.org/r/20211026100432.1730393-1-arnd@kernel.org
This commit is contained in:
Arnd Bergmann 2021-10-26 12:03:47 +02:00 committed by Thomas Gleixner
parent 2202e15b2b
commit 3f2bedabb6
4 changed files with 36 additions and 33 deletions

View file

@ -19,7 +19,11 @@
#include <asm/sync.h> #include <asm/sync.h>
#include <asm/war.h> #include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#define __futex_atomic_op(op, insn, ret, oldval, uaddr, oparg) \
{ \ { \
if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -80,9 +84,11 @@
: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \ "i" (-EFAULT) \
: "memory"); \ : "memory"); \
} else \ } else { \
ret = -ENOSYS; \ /* fallback for non-SMP */ \
} ret = arch_futex_atomic_op_inuser_local(op, oparg, oval,\
uaddr); \
}
static inline int static inline int
arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
@ -94,23 +100,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); __futex_atomic_op(op, "move $1, %z5", ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("addu $1, %1, %z5", __futex_atomic_op(op, "addu $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5", __futex_atomic_op(op, "or $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5", __futex_atomic_op(op, "and $1, %1, %z5",
ret, oldval, uaddr, ~oparg); ret, oldval, uaddr, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5", __futex_atomic_op(op, "xor $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
default: default:
@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT) "i" (-EFAULT)
: "memory"); : "memory");
} else } else {
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
}
*uval = val; *uval = val;
return ret; return ret;

View file

@ -16,6 +16,10 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \ #define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \ __asm__ __volatile( \
@ -105,7 +109,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
return ret; return ret;
#else #else
return -ENOSYS; return arch_futex_atomic_op_inuser_local(op, oparg, oval, uaddr);
#endif #endif
} }
@ -156,7 +160,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret; return ret;
#else #else
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
#endif #endif
} }

View file

@ -6,15 +6,22 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
* The following implementation only for uniprocessor machines. * The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion. * It relies on preempt_disable() ensuring mutual exclusion.
* *
*/ */
#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
futex_atomic_cmpxchg_inatomic_local_generic(uval, uaddr, oldval, newval)
#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
arch_futex_atomic_op_inuser_local_generic(op, oparg, oval, uaddr)
#endif /* CONFIG_SMP */
#endif
/** /**
* arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant * arch_futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous * argument and comparison of the previous
* futex value with another constant. * futex value with another constant.
* *
@ -28,7 +35,7 @@
* -ENOSYS - Operation not supported * -ENOSYS - Operation not supported
*/ */
static inline int static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{ {
int oldval, ret; int oldval, ret;
u32 tmp; u32 tmp;
@ -75,7 +82,7 @@ out_pagefault_enable:
} }
/** /**
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is * uaddr with newval if the current value is
* oldval. * oldval.
* @uval: pointer to store content of @uaddr * @uval: pointer to store content of @uaddr
@ -87,10 +94,9 @@ out_pagefault_enable:
* 0 - On success * 0 - On success
* -EFAULT - User access resulted in a page fault * -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention * -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/ */
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
u32 val; u32 val;
@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0; return 0;
} }
#else
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
return -ENOSYS;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return -ENOSYS;
}
#endif /* CONFIG_SMP */
#endif #endif

View file

@ -1579,6 +1579,7 @@ config BASE_FULL
config FUTEX config FUTEX
bool "Enable futex support" if EXPERT bool "Enable futex support" if EXPERT
depends on !(SPARC32 && SMP)
default y default y
imply RT_MUTEXES imply RT_MUTEXES
help help