mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-04-24 14:07:52 -04:00
SMP cross-CPU function-call updates for v6.4:
- Remove diagnostics and adjust config for CSD lock diagnostics - Add a generic IPI-sending tracepoint, as currently there's no easy way to instrument IPI origins: it's arch dependent and for some major architectures it's not even consistently available. Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmRK438RHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1jJ5Q/5AZ0HGpyqwdFK8GmGznyu5qjP5HwV9pPq gZQScqSy4tZEeza4TFMi83CoXSg9uJ7GlYJqqQMKm78LGEPomnZtXXC7oWvTA9M5 M/jAvzytmvZloSCXV6kK7jzSejMHhag97J/BjTYhZYQpJ9T+hNC87XO6J6COsKr9 lPIYqkFrIkQNr6B0U11AQfFejRYP1ics2fnbnZL86G/zZAc6x8EveM3KgSer2iHl KbrO+xcYyGY8Ef9P2F72HhEGFfM3WslpT1yzqR3sm4Y+fuMG0oW3qOQuMJx0ZhxT AloterY0uo6gJwI0P9k/K4klWgz81Tf/zLb0eBAtY2uJV9Fo3YhPHuZC7jGPGAy3 JusW2yNYqc8erHVEMAKDUsl/1KN4TE2uKlkZy98wno+KOoMufK5MA2e2kPPqXvUi Jk9RvFolnWUsexaPmCftti0OCv3YFiviVAJ/t0pchfmvvJA2da0VC9hzmEXpLJVF 25nBTV/1uAOrWvOpCyo3ElrC2CkQVkFmK5rXMDdvf6ib0Nid4vFcCkCSLVfu+ePB 11mi7QYro+CcnOug1K+yKogUDmsZgV/u1kUwgQzTIpZ05Kkb49gUiXw9L2RGcBJh yoDoiI66KPR7PWQ2qBdQoXug4zfEEtWG0O9HNLB0FFRC3hu7I+HHyiUkBWs9jasK PA5+V7HcQRk= =Wp7f -----END PGP SIGNATURE----- Merge tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull SMP cross-CPU function-call updates from Ingo Molnar: - Remove diagnostics and adjust config for CSD lock diagnostics - Add a generic IPI-sending tracepoint, as currently there's no easy way to instrument IPI origins: it's arch dependent and for some major architectures it's not even consistently available. * tag 'smp-core-2023-04-27' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: trace,smp: Trace all smp_function_call*() invocations trace: Add trace_ipi_send_cpu() sched, smp: Trace smp callback causing an IPI smp: reword smp call IPI comment treewide: Trace IPIs sent via smp_send_reschedule() irq_work: Trace self-IPIs sent via arch_irq_work_raise() smp: Trace IPIs sent via arch_send_call_function_ipi_mask() sched, smp: Trace IPIs sent via send_call_function_single_ipi() trace: Add trace_ipi_send_cpumask() kernel/smp: Make csdlock_debug= resettable locking/csd_lock: Remove per-CPU data indirection from CSD lock debugging locking/csd_lock: Remove added data from CSD lock debugging locking/csd_lock: Add Kconfig option for csd_debug default
This commit is contained in:
commit
f20730efbd
33 changed files with 216 additions and 280 deletions
|
@ -912,15 +912,14 @@
|
||||||
cs89x0_media= [HW,NET]
|
cs89x0_media= [HW,NET]
|
||||||
Format: { rj45 | aui | bnc }
|
Format: { rj45 | aui | bnc }
|
||||||
|
|
||||||
csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call
|
csdlock_debug= [KNL] Enable or disable debug add-ons of cross-CPU
|
||||||
handling. When switched on, additional debug data is
|
function call handling. When switched on,
|
||||||
printed to the console in case a hanging CPU is
|
additional debug data is printed to the console
|
||||||
detected, and that CPU is pinged again in order to try
|
in case a hanging CPU is detected, and that
|
||||||
to resolve the hang situation.
|
CPU is pinged again in order to try to resolve
|
||||||
0: disable csdlock debugging (default)
|
the hang situation. The default value of this
|
||||||
1: enable basic csdlock debugging (minor impact)
|
option depends on the CSD_LOCK_WAIT_DEBUG_DEFAULT
|
||||||
ext: enable extended csdlock debugging (more impact,
|
Kconfig option.
|
||||||
but more data)
|
|
||||||
|
|
||||||
dasd= [HW,NET]
|
dasd= [HW,NET]
|
||||||
See header of drivers/s390/block/dasd_devmap.c.
|
See header of drivers/s390/block/dasd_devmap.c.
|
||||||
|
|
|
@ -562,7 +562,7 @@ handle_ipi(struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
smp_send_reschedule(int cpu)
|
arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
#ifdef DEBUG_IPI_MSG
|
#ifdef DEBUG_IPI_MSG
|
||||||
if (cpu == hard_smp_processor_id())
|
if (cpu == hard_smp_processor_id())
|
||||||
|
|
|
@ -292,7 +292,7 @@ static void ipi_send_msg(const struct cpumask *callmap, enum ipi_msg_type msg)
|
||||||
ipi_send_msg_one(cpu, msg);
|
ipi_send_msg_one(cpu, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
|
ipi_send_msg_one(cpu, IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,6 @@
|
||||||
#include <asm/mach/arch.h>
|
#include <asm/mach/arch.h>
|
||||||
#include <asm/mpu.h>
|
#include <asm/mpu.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
|
||||||
#include <trace/events/ipi.h>
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -749,7 +748,7 @@ void __init set_smp_ipi_range(int ipi_base, int n)
|
||||||
ipi_setup(smp_processor_id());
|
ipi_setup(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
#include <asm/smp_plat.h>
|
#include <asm/smp_plat.h>
|
||||||
#include <asm/smp_scu.h>
|
#include <asm/smp_scu.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#define OWL_CPU1_ADDR 0x50
|
#define OWL_CPU1_ADDR 0x50
|
||||||
#define OWL_CPU1_FLAG 0x5c
|
#define OWL_CPU1_FLAG 0x5c
|
||||||
|
|
||||||
|
|
|
@ -51,7 +51,6 @@
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
#include <asm/virt.h>
|
#include <asm/virt.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
|
||||||
#include <trace/events/ipi.h>
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
|
||||||
|
@ -979,7 +978,7 @@ void __init set_smp_ipi_range(int ipi_base, int n)
|
||||||
ipi_setup(smp_processor_id());
|
ipi_setup(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -140,7 +140,7 @@ void smp_send_stop(void)
|
||||||
on_each_cpu(ipi_stop, NULL, 1);
|
on_each_cpu(ipi_stop, NULL, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -217,7 +217,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
|
send_ipi(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -220,11 +220,11 @@ kdump_smp_send_init(void)
|
||||||
* Called with preemption disabled.
|
* Called with preemption disabled.
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
smp_send_reschedule (int cpu)
|
arch_smp_send_reschedule (int cpu)
|
||||||
{
|
{
|
||||||
ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
|
ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Called with preemption disabled.
|
* Called with preemption disabled.
|
||||||
|
|
|
@ -155,11 +155,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||||
* it goes straight through and wastes no time serializing
|
* it goes straight through and wastes no time serializing
|
||||||
* anything. Worst case is that we lose a reschedule ...
|
* anything. Worst case is that we lose a reschedule ...
|
||||||
*/
|
*/
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
|
loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
|
||||||
|
|
||||||
irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
|
irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
|
||||||
{
|
{
|
||||||
|
|
|
@ -66,7 +66,7 @@ extern void calculate_cpu_foreign_map(void);
|
||||||
* it goes straight through and wastes no time serializing
|
* it goes straight through and wastes no time serializing
|
||||||
* anything. Worst case is that we lose a reschedule ...
|
* anything. Worst case is that we lose a reschedule ...
|
||||||
*/
|
*/
|
||||||
static inline void smp_send_reschedule(int cpu)
|
static inline void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
extern const struct plat_smp_ops *mp_ops; /* private */
|
extern const struct plat_smp_ops *mp_ops; /* private */
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,7 @@ void handle_IPI(unsigned int ipi_msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -246,8 +246,8 @@ void kgdb_roundup_cpus(void)
|
||||||
inline void
|
inline void
|
||||||
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
|
smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
|
||||||
|
|
||||||
void
|
void
|
||||||
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
|
arch_smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
|
||||||
|
|
||||||
void
|
void
|
||||||
smp_send_all_nop(void)
|
smp_send_all_nop(void)
|
||||||
|
|
|
@ -61,6 +61,8 @@
|
||||||
#include <asm/kup.h>
|
#include <asm/kup.h>
|
||||||
#include <asm/fadump.h>
|
#include <asm/fadump.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
#include <asm/udbg.h>
|
#include <asm/udbg.h>
|
||||||
#define DBG(fmt...) udbg_printf(fmt)
|
#define DBG(fmt...) udbg_printf(fmt)
|
||||||
|
@ -364,12 +366,12 @@ static inline void do_message_pass(int cpu, int msg)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
if (likely(smp_ops))
|
if (likely(smp_ops))
|
||||||
do_message_pass(cpu, PPC_MSG_RESCHEDULE);
|
do_message_pass(cpu, PPC_MSG_RESCHEDULE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
|
||||||
|
|
||||||
void arch_send_call_function_single_ipi(int cpu)
|
void arch_send_call_function_single_ipi(int cpu)
|
||||||
{
|
{
|
||||||
|
|
|
@ -43,6 +43,7 @@
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
#include <asm/ftrace.h>
|
#include <asm/ftrace.h>
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
|
@ -80,6 +81,8 @@
|
||||||
#include <asm/dtl.h>
|
#include <asm/dtl.h>
|
||||||
#include <asm/plpar_wrappers.h>
|
#include <asm/plpar_wrappers.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#include "book3s.h"
|
#include "book3s.h"
|
||||||
#include "book3s_hv.h"
|
#include "book3s_hv.h"
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#include "subcore.h"
|
#include "subcore.h"
|
||||||
#include "powernv.h"
|
#include "powernv.h"
|
||||||
|
|
||||||
|
|
|
@ -333,8 +333,8 @@ bool smp_crash_stop_failed(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
send_ipi_single(cpu, IPI_RESCHEDULE);
|
send_ipi_single(cpu, IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(smp_send_reschedule);
|
EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
|
||||||
|
|
|
@ -553,7 +553,7 @@ void arch_send_call_function_single_ipi(int cpu)
|
||||||
* it goes straight through and wastes no time serializing
|
* it goes straight through and wastes no time serializing
|
||||||
* anything. Worst case is that we lose a reschedule ...
|
* anything. Worst case is that we lose a reschedule ...
|
||||||
*/
|
*/
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
|
pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
|
||||||
}
|
}
|
||||||
|
|
|
@ -256,7 +256,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||||
(bogosum / (5000/HZ)) % 100);
|
(bogosum / (5000/HZ)) % 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
|
mp_ops->send_ipi(cpu, SMP_MSG_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ void cpu_panic(void)
|
||||||
|
|
||||||
struct linux_prom_registers smp_penguin_ctable = { 0 };
|
struct linux_prom_registers smp_penguin_ctable = { 0 };
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* CPU model dependent way of implementing IPI generation targeting
|
* CPU model dependent way of implementing IPI generation targeting
|
||||||
|
|
|
@ -1430,7 +1430,7 @@ static unsigned long send_cpu_poke(int cpu)
|
||||||
return hv_err;
|
return hv_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
if (cpu == smp_processor_id()) {
|
if (cpu == smp_processor_id()) {
|
||||||
WARN_ON_ONCE(preemptible());
|
WARN_ON_ONCE(preemptible());
|
||||||
|
|
|
@ -99,7 +99,7 @@ static inline void __noreturn play_dead(void)
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void smp_send_reschedule(int cpu)
|
static inline void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
smp_ops.smp_send_reschedule(cpu);
|
smp_ops.smp_send_reschedule(cpu);
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/cc_platform.h>
|
#include <linux/cc_platform.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/perf_event.h>
|
#include <asm/perf_event.h>
|
||||||
|
@ -41,6 +42,9 @@
|
||||||
#include <asm/fpu/api.h>
|
#include <asm/fpu/api.h>
|
||||||
|
|
||||||
#include <asm/virtext.h>
|
#include <asm/virtext.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
|
||||||
#include "svm.h"
|
#include "svm.h"
|
||||||
|
|
|
@ -60,7 +60,9 @@
|
||||||
#include <linux/mem_encrypt.h>
|
#include <linux/mem_encrypt.h>
|
||||||
#include <linux/entry-kvm.h>
|
#include <linux/entry-kvm.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
|
||||||
#include <asm/debugreg.h>
|
#include <asm/debugreg.h>
|
||||||
|
|
|
@ -391,7 +391,7 @@ void arch_send_call_function_single_ipi(int cpu)
|
||||||
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
|
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
|
||||||
}
|
}
|
||||||
|
|
||||||
void smp_send_reschedule(int cpu)
|
void arch_smp_send_reschedule(int cpu)
|
||||||
{
|
{
|
||||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -125,8 +125,15 @@ extern void smp_send_stop(void);
|
||||||
/*
|
/*
|
||||||
* sends a 'reschedule' event to another CPU:
|
* sends a 'reschedule' event to another CPU:
|
||||||
*/
|
*/
|
||||||
extern void smp_send_reschedule(int cpu);
|
extern void arch_smp_send_reschedule(int cpu);
|
||||||
|
/*
|
||||||
|
* scheduler_ipi() is inline so can't be passed as callback reason, but the
|
||||||
|
* callsite IP should be sufficient for root-causing IPIs sent from here.
|
||||||
|
*/
|
||||||
|
#define smp_send_reschedule(cpu) ({ \
|
||||||
|
trace_ipi_send_cpu(cpu, _RET_IP_, NULL); \
|
||||||
|
arch_smp_send_reschedule(cpu); \
|
||||||
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prepare machine for booting other CPUs.
|
* Prepare machine for booting other CPUs.
|
||||||
|
|
|
@ -35,6 +35,50 @@ TRACE_EVENT(ipi_raise,
|
||||||
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
|
TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(ipi_send_cpu,
|
||||||
|
|
||||||
|
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
|
||||||
|
|
||||||
|
TP_ARGS(cpu, callsite, callback),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(unsigned int, cpu)
|
||||||
|
__field(void *, callsite)
|
||||||
|
__field(void *, callback)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->cpu = cpu;
|
||||||
|
__entry->callsite = (void *)callsite;
|
||||||
|
__entry->callback = callback;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("cpu=%u callsite=%pS callback=%pS",
|
||||||
|
__entry->cpu, __entry->callsite, __entry->callback)
|
||||||
|
);
|
||||||
|
|
||||||
|
TRACE_EVENT(ipi_send_cpumask,
|
||||||
|
|
||||||
|
TP_PROTO(const struct cpumask *cpumask, unsigned long callsite, void *callback),
|
||||||
|
|
||||||
|
TP_ARGS(cpumask, callsite, callback),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__cpumask(cpumask)
|
||||||
|
__field(void *, callsite)
|
||||||
|
__field(void *, callback)
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__assign_cpumask(cpumask, cpumask_bits(cpumask));
|
||||||
|
__entry->callsite = (void *)callsite;
|
||||||
|
__entry->callback = callback;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("cpumask=%s callsite=%pS callback=%pS",
|
||||||
|
__get_cpumask(cpumask), __entry->callsite, __entry->callback)
|
||||||
|
);
|
||||||
|
|
||||||
DECLARE_EVENT_CLASS(ipi_handler,
|
DECLARE_EVENT_CLASS(ipi_handler,
|
||||||
|
|
||||||
TP_PROTO(const char *reason),
|
TP_PROTO(const char *reason),
|
||||||
|
|
|
@ -22,6 +22,8 @@
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
static DEFINE_PER_CPU(struct llist_head, raised_list);
|
static DEFINE_PER_CPU(struct llist_head, raised_list);
|
||||||
static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
static DEFINE_PER_CPU(struct llist_head, lazy_list);
|
||||||
static DEFINE_PER_CPU(struct task_struct *, irq_workd);
|
static DEFINE_PER_CPU(struct task_struct *, irq_workd);
|
||||||
|
@ -74,6 +76,14 @@ void __weak arch_irq_work_raise(void)
|
||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void irq_work_raise(struct irq_work *work)
|
||||||
|
{
|
||||||
|
if (trace_ipi_send_cpu_enabled() && arch_irq_work_has_interrupt())
|
||||||
|
trace_ipi_send_cpu(smp_processor_id(), _RET_IP_, work->func);
|
||||||
|
|
||||||
|
arch_irq_work_raise();
|
||||||
|
}
|
||||||
|
|
||||||
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
|
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
|
||||||
static void __irq_work_queue_local(struct irq_work *work)
|
static void __irq_work_queue_local(struct irq_work *work)
|
||||||
{
|
{
|
||||||
|
@ -99,7 +109,7 @@ static void __irq_work_queue_local(struct irq_work *work)
|
||||||
|
|
||||||
/* If the work is "lazy", handle it from next tick if any */
|
/* If the work is "lazy", handle it from next tick if any */
|
||||||
if (!lazy_work || tick_nohz_tick_stopped())
|
if (!lazy_work || tick_nohz_tick_stopped())
|
||||||
arch_irq_work_raise();
|
irq_work_raise(work);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Enqueue the irq work @work on the current CPU */
|
/* Enqueue the irq work @work on the current CPU */
|
||||||
|
|
|
@ -80,6 +80,7 @@
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <linux/sched/rseq_api.h>
|
#include <linux/sched/rseq_api.h>
|
||||||
#include <trace/events/sched.h>
|
#include <trace/events/sched.h>
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
#undef CREATE_TRACE_POINTS
|
#undef CREATE_TRACE_POINTS
|
||||||
|
|
||||||
#include "sched.h"
|
#include "sched.h"
|
||||||
|
@ -95,6 +96,9 @@
|
||||||
#include "../../io_uring/io-wq.h"
|
#include "../../io_uring/io-wq.h"
|
||||||
#include "../smpboot.h"
|
#include "../smpboot.h"
|
||||||
|
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
|
||||||
|
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
* Export tracepoints that act as a bare tracehook (ie: have no trace event
|
||||||
* associated with them) to allow external modules to probe them.
|
* associated with them) to allow external modules to probe them.
|
||||||
|
@ -3848,14 +3852,20 @@ void sched_ttwu_pending(void *arg)
|
||||||
rq_unlock_irqrestore(rq, &rf);
|
rq_unlock_irqrestore(rq, &rf);
|
||||||
}
|
}
|
||||||
|
|
||||||
void send_call_function_single_ipi(int cpu)
|
/*
|
||||||
|
* Prepare the scene for sending an IPI for a remote smp_call
|
||||||
|
*
|
||||||
|
* Returns true if the caller can proceed with sending the IPI.
|
||||||
|
* Returns false otherwise.
|
||||||
|
*/
|
||||||
|
bool call_function_single_prep_ipi(int cpu)
|
||||||
{
|
{
|
||||||
struct rq *rq = cpu_rq(cpu);
|
if (set_nr_if_polling(cpu_rq(cpu)->idle)) {
|
||||||
|
|
||||||
if (!set_nr_if_polling(rq->idle))
|
|
||||||
arch_send_call_function_single_ipi(cpu);
|
|
||||||
else
|
|
||||||
trace_sched_wake_idle_without_ipi(cpu);
|
trace_sched_wake_idle_without_ipi(cpu);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
extern void sched_ttwu_pending(void *arg);
|
extern void sched_ttwu_pending(void *arg);
|
||||||
|
|
||||||
extern void send_call_function_single_ipi(int cpu);
|
extern bool call_function_single_prep_ipi(int cpu);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern void flush_smp_call_function_queue(void);
|
extern void flush_smp_call_function_queue(void);
|
||||||
|
|
311
kernel/smp.c
311
kernel/smp.c
|
@ -26,68 +26,15 @@
|
||||||
#include <linux/sched/debug.h>
|
#include <linux/sched/debug.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#include "smpboot.h"
|
#include "smpboot.h"
|
||||||
#include "sched/smp.h"
|
#include "sched/smp.h"
|
||||||
|
|
||||||
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
|
#define CSD_TYPE(_csd) ((_csd)->node.u_flags & CSD_FLAG_TYPE_MASK)
|
||||||
|
|
||||||
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
|
||||||
union cfd_seq_cnt {
|
|
||||||
u64 val;
|
|
||||||
struct {
|
|
||||||
u64 src:16;
|
|
||||||
u64 dst:16;
|
|
||||||
#define CFD_SEQ_NOCPU 0xffff
|
|
||||||
u64 type:4;
|
|
||||||
#define CFD_SEQ_QUEUE 0
|
|
||||||
#define CFD_SEQ_IPI 1
|
|
||||||
#define CFD_SEQ_NOIPI 2
|
|
||||||
#define CFD_SEQ_PING 3
|
|
||||||
#define CFD_SEQ_PINGED 4
|
|
||||||
#define CFD_SEQ_HANDLE 5
|
|
||||||
#define CFD_SEQ_DEQUEUE 6
|
|
||||||
#define CFD_SEQ_IDLE 7
|
|
||||||
#define CFD_SEQ_GOTIPI 8
|
|
||||||
#define CFD_SEQ_HDLEND 9
|
|
||||||
u64 cnt:28;
|
|
||||||
} u;
|
|
||||||
};
|
|
||||||
|
|
||||||
static char *seq_type[] = {
|
|
||||||
[CFD_SEQ_QUEUE] = "queue",
|
|
||||||
[CFD_SEQ_IPI] = "ipi",
|
|
||||||
[CFD_SEQ_NOIPI] = "noipi",
|
|
||||||
[CFD_SEQ_PING] = "ping",
|
|
||||||
[CFD_SEQ_PINGED] = "pinged",
|
|
||||||
[CFD_SEQ_HANDLE] = "handle",
|
|
||||||
[CFD_SEQ_DEQUEUE] = "dequeue (src CPU 0 == empty)",
|
|
||||||
[CFD_SEQ_IDLE] = "idle",
|
|
||||||
[CFD_SEQ_GOTIPI] = "gotipi",
|
|
||||||
[CFD_SEQ_HDLEND] = "hdlend (src CPU 0 == early)",
|
|
||||||
};
|
|
||||||
|
|
||||||
struct cfd_seq_local {
|
|
||||||
u64 ping;
|
|
||||||
u64 pinged;
|
|
||||||
u64 handle;
|
|
||||||
u64 dequeue;
|
|
||||||
u64 idle;
|
|
||||||
u64 gotipi;
|
|
||||||
u64 hdlend;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct cfd_percpu {
|
|
||||||
call_single_data_t csd;
|
|
||||||
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
|
||||||
u64 seq_queue;
|
|
||||||
u64 seq_ipi;
|
|
||||||
u64 seq_noipi;
|
|
||||||
#endif
|
|
||||||
};
|
|
||||||
|
|
||||||
struct call_function_data {
|
struct call_function_data {
|
||||||
struct cfd_percpu __percpu *pcpu;
|
call_single_data_t __percpu *csd;
|
||||||
cpumask_var_t cpumask;
|
cpumask_var_t cpumask;
|
||||||
cpumask_var_t cpumask_ipi;
|
cpumask_var_t cpumask_ipi;
|
||||||
};
|
};
|
||||||
|
@ -110,8 +57,8 @@ int smpcfd_prepare_cpu(unsigned int cpu)
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
cfd->pcpu = alloc_percpu(struct cfd_percpu);
|
cfd->csd = alloc_percpu(call_single_data_t);
|
||||||
if (!cfd->pcpu) {
|
if (!cfd->csd) {
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
free_cpumask_var(cfd->cpumask_ipi);
|
free_cpumask_var(cfd->cpumask_ipi);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -126,7 +73,7 @@ int smpcfd_dead_cpu(unsigned int cpu)
|
||||||
|
|
||||||
free_cpumask_var(cfd->cpumask);
|
free_cpumask_var(cfd->cpumask);
|
||||||
free_cpumask_var(cfd->cpumask_ipi);
|
free_cpumask_var(cfd->cpumask_ipi);
|
||||||
free_percpu(cfd->pcpu);
|
free_percpu(cfd->csd);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,23 +103,49 @@ void __init call_function_init(void)
|
||||||
smpcfd_prepare_cpu(smp_processor_id());
|
smpcfd_prepare_cpu(smp_processor_id());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
send_call_function_single_ipi(int cpu)
|
||||||
|
{
|
||||||
|
if (call_function_single_prep_ipi(cpu)) {
|
||||||
|
trace_ipi_send_cpu(cpu, _RET_IP_,
|
||||||
|
generic_smp_call_function_single_interrupt);
|
||||||
|
arch_send_call_function_single_ipi(cpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static __always_inline void
|
||||||
|
send_call_function_ipi_mask(struct cpumask *mask)
|
||||||
|
{
|
||||||
|
trace_ipi_send_cpumask(mask, _RET_IP_,
|
||||||
|
generic_smp_call_function_single_interrupt);
|
||||||
|
arch_send_call_function_ipi_mask(mask);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
||||||
|
|
||||||
static DEFINE_STATIC_KEY_FALSE(csdlock_debug_enabled);
|
static DEFINE_STATIC_KEY_MAYBE(CONFIG_CSD_LOCK_WAIT_DEBUG_DEFAULT, csdlock_debug_enabled);
|
||||||
static DEFINE_STATIC_KEY_FALSE(csdlock_debug_extended);
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Parse the csdlock_debug= kernel boot parameter.
|
||||||
|
*
|
||||||
|
* If you need to restore the old "ext" value that once provided
|
||||||
|
* additional debugging information, reapply the following commits:
|
||||||
|
*
|
||||||
|
* de7b09ef658d ("locking/csd_lock: Prepare more CSD lock debugging")
|
||||||
|
* a5aabace5fb8 ("locking/csd_lock: Add more data to CSD lock debugging")
|
||||||
|
*/
|
||||||
static int __init csdlock_debug(char *str)
|
static int __init csdlock_debug(char *str)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
unsigned int val = 0;
|
unsigned int val = 0;
|
||||||
|
|
||||||
if (str && !strcmp(str, "ext")) {
|
ret = get_option(&str, &val);
|
||||||
val = 1;
|
if (ret) {
|
||||||
static_branch_enable(&csdlock_debug_extended);
|
if (val)
|
||||||
} else
|
static_branch_enable(&csdlock_debug_enabled);
|
||||||
get_option(&str, &val);
|
else
|
||||||
|
static_branch_disable(&csdlock_debug_enabled);
|
||||||
if (val)
|
}
|
||||||
static_branch_enable(&csdlock_debug_enabled);
|
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -181,36 +154,11 @@ __setup("csdlock_debug=", csdlock_debug);
|
||||||
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
|
static DEFINE_PER_CPU(call_single_data_t *, cur_csd);
|
||||||
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
|
static DEFINE_PER_CPU(smp_call_func_t, cur_csd_func);
|
||||||
static DEFINE_PER_CPU(void *, cur_csd_info);
|
static DEFINE_PER_CPU(void *, cur_csd_info);
|
||||||
static DEFINE_PER_CPU(struct cfd_seq_local, cfd_seq_local);
|
|
||||||
|
|
||||||
static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
|
static ulong csd_lock_timeout = 5000; /* CSD lock timeout in milliseconds. */
|
||||||
module_param(csd_lock_timeout, ulong, 0444);
|
module_param(csd_lock_timeout, ulong, 0444);
|
||||||
|
|
||||||
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
static atomic_t csd_bug_count = ATOMIC_INIT(0);
|
||||||
static u64 cfd_seq;
|
|
||||||
|
|
||||||
#define CFD_SEQ(s, d, t, c) \
|
|
||||||
(union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
|
|
||||||
|
|
||||||
static u64 cfd_seq_inc(unsigned int src, unsigned int dst, unsigned int type)
|
|
||||||
{
|
|
||||||
union cfd_seq_cnt new, old;
|
|
||||||
|
|
||||||
new = CFD_SEQ(src, dst, type, 0);
|
|
||||||
|
|
||||||
do {
|
|
||||||
old.val = READ_ONCE(cfd_seq);
|
|
||||||
new.u.cnt = old.u.cnt + 1;
|
|
||||||
} while (cmpxchg(&cfd_seq, old.val, new.val) != old.val);
|
|
||||||
|
|
||||||
return old.val;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define cfd_seq_store(var, src, dst, type) \
|
|
||||||
do { \
|
|
||||||
if (static_branch_unlikely(&csdlock_debug_extended)) \
|
|
||||||
var = cfd_seq_inc(src, dst, type); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/* Record current CSD work for current CPU, NULL to erase. */
|
/* Record current CSD work for current CPU, NULL to erase. */
|
||||||
static void __csd_lock_record(struct __call_single_data *csd)
|
static void __csd_lock_record(struct __call_single_data *csd)
|
||||||
|
@ -244,80 +192,6 @@ static int csd_lock_wait_getcpu(struct __call_single_data *csd)
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfd_seq_data_add(u64 val, unsigned int src, unsigned int dst,
|
|
||||||
unsigned int type, union cfd_seq_cnt *data,
|
|
||||||
unsigned int *n_data, unsigned int now)
|
|
||||||
{
|
|
||||||
union cfd_seq_cnt new[2];
|
|
||||||
unsigned int i, j, k;
|
|
||||||
|
|
||||||
new[0].val = val;
|
|
||||||
new[1] = CFD_SEQ(src, dst, type, new[0].u.cnt + 1);
|
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
|
||||||
if (new[i].u.cnt <= now)
|
|
||||||
new[i].u.cnt |= 0x80000000U;
|
|
||||||
for (j = 0; j < *n_data; j++) {
|
|
||||||
if (new[i].u.cnt == data[j].u.cnt) {
|
|
||||||
/* Direct read value trumps generated one. */
|
|
||||||
if (i == 0)
|
|
||||||
data[j].val = new[i].val;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (new[i].u.cnt < data[j].u.cnt) {
|
|
||||||
for (k = *n_data; k > j; k--)
|
|
||||||
data[k].val = data[k - 1].val;
|
|
||||||
data[j].val = new[i].val;
|
|
||||||
(*n_data)++;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (j == *n_data) {
|
|
||||||
data[j].val = new[i].val;
|
|
||||||
(*n_data)++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char *csd_lock_get_type(unsigned int type)
|
|
||||||
{
|
|
||||||
return (type >= ARRAY_SIZE(seq_type)) ? "?" : seq_type[type];
|
|
||||||
}
|
|
||||||
|
|
||||||
static void csd_lock_print_extended(struct __call_single_data *csd, int cpu)
|
|
||||||
{
|
|
||||||
struct cfd_seq_local *seq = &per_cpu(cfd_seq_local, cpu);
|
|
||||||
unsigned int srccpu = csd->node.src;
|
|
||||||
struct call_function_data *cfd = per_cpu_ptr(&cfd_data, srccpu);
|
|
||||||
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
|
||||||
unsigned int now;
|
|
||||||
union cfd_seq_cnt data[2 * ARRAY_SIZE(seq_type)];
|
|
||||||
unsigned int n_data = 0, i;
|
|
||||||
|
|
||||||
data[0].val = READ_ONCE(cfd_seq);
|
|
||||||
now = data[0].u.cnt;
|
|
||||||
|
|
||||||
cfd_seq_data_add(pcpu->seq_queue, srccpu, cpu, CFD_SEQ_QUEUE, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(pcpu->seq_ipi, srccpu, cpu, CFD_SEQ_IPI, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(pcpu->seq_noipi, srccpu, cpu, CFD_SEQ_NOIPI, data, &n_data, now);
|
|
||||||
|
|
||||||
cfd_seq_data_add(per_cpu(cfd_seq_local.ping, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PING, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(per_cpu(cfd_seq_local.pinged, srccpu), srccpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED, data, &n_data, now);
|
|
||||||
|
|
||||||
cfd_seq_data_add(seq->idle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_IDLE, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(seq->gotipi, CFD_SEQ_NOCPU, cpu, CFD_SEQ_GOTIPI, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(seq->handle, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HANDLE, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(seq->dequeue, CFD_SEQ_NOCPU, cpu, CFD_SEQ_DEQUEUE, data, &n_data, now);
|
|
||||||
cfd_seq_data_add(seq->hdlend, CFD_SEQ_NOCPU, cpu, CFD_SEQ_HDLEND, data, &n_data, now);
|
|
||||||
|
|
||||||
for (i = 0; i < n_data; i++) {
|
|
||||||
pr_alert("\tcsd: cnt(%07x): %04x->%04x %s\n",
|
|
||||||
data[i].u.cnt & ~0x80000000U, data[i].u.src,
|
|
||||||
data[i].u.dst, csd_lock_get_type(data[i].u.type));
|
|
||||||
}
|
|
||||||
pr_alert("\tcsd: cnt now: %07x\n", now);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Complain if too much time spent waiting. Note that only
|
* Complain if too much time spent waiting. Note that only
|
||||||
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
|
* the CSD_TYPE_SYNC/ASYNC types provide the destination CPU,
|
||||||
|
@ -368,8 +242,6 @@ static bool csd_lock_wait_toolong(struct __call_single_data *csd, u64 ts0, u64 *
|
||||||
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
|
*bug_id, !cpu_cur_csd ? "unresponsive" : "handling this request");
|
||||||
}
|
}
|
||||||
if (cpu >= 0) {
|
if (cpu >= 0) {
|
||||||
if (static_branch_unlikely(&csdlock_debug_extended))
|
|
||||||
csd_lock_print_extended(csd, cpu);
|
|
||||||
dump_cpu_task(cpu);
|
dump_cpu_task(cpu);
|
||||||
if (!cpu_cur_csd) {
|
if (!cpu_cur_csd) {
|
||||||
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
|
pr_alert("csd: Re-sending CSD lock (#%d) IPI from CPU#%02d to CPU#%02d\n", *bug_id, raw_smp_processor_id(), cpu);
|
||||||
|
@ -412,27 +284,7 @@ static __always_inline void csd_lock_wait(struct __call_single_data *csd)
|
||||||
|
|
||||||
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
|
smp_cond_load_acquire(&csd->node.u_flags, !(VAL & CSD_FLAG_LOCK));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __smp_call_single_queue_debug(int cpu, struct llist_node *node)
|
|
||||||
{
|
|
||||||
unsigned int this_cpu = smp_processor_id();
|
|
||||||
struct cfd_seq_local *seq = this_cpu_ptr(&cfd_seq_local);
|
|
||||||
struct call_function_data *cfd = this_cpu_ptr(&cfd_data);
|
|
||||||
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
|
||||||
|
|
||||||
cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
|
|
||||||
if (llist_add(node, &per_cpu(call_single_queue, cpu))) {
|
|
||||||
cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
|
|
||||||
cfd_seq_store(seq->ping, this_cpu, cpu, CFD_SEQ_PING);
|
|
||||||
send_call_function_single_ipi(cpu);
|
|
||||||
cfd_seq_store(seq->pinged, this_cpu, cpu, CFD_SEQ_PINGED);
|
|
||||||
} else {
|
|
||||||
cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
#else
|
||||||
#define cfd_seq_store(var, src, dst, type)
|
|
||||||
|
|
||||||
static void csd_lock_record(struct __call_single_data *csd)
|
static void csd_lock_record(struct __call_single_data *csd)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -470,23 +322,29 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(call_single_data_t, csd_data);
|
||||||
|
|
||||||
void __smp_call_single_queue(int cpu, struct llist_node *node)
|
void __smp_call_single_queue(int cpu, struct llist_node *node)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG
|
/*
|
||||||
if (static_branch_unlikely(&csdlock_debug_extended)) {
|
* We have to check the type of the CSD before queueing it, because
|
||||||
unsigned int type;
|
* once queued it can have its flags cleared by
|
||||||
|
* flush_smp_call_function_queue()
|
||||||
|
* even if we haven't sent the smp_call IPI yet (e.g. the stopper
|
||||||
|
* executes migration_cpu_stop() on the remote CPU).
|
||||||
|
*/
|
||||||
|
if (trace_ipi_send_cpu_enabled()) {
|
||||||
|
call_single_data_t *csd;
|
||||||
|
smp_call_func_t func;
|
||||||
|
|
||||||
type = CSD_TYPE(container_of(node, call_single_data_t,
|
csd = container_of(node, call_single_data_t, node.llist);
|
||||||
node.llist));
|
func = CSD_TYPE(csd) == CSD_TYPE_TTWU ?
|
||||||
if (type == CSD_TYPE_SYNC || type == CSD_TYPE_ASYNC) {
|
sched_ttwu_pending : csd->func;
|
||||||
__smp_call_single_queue_debug(cpu, node);
|
|
||||||
return;
|
trace_ipi_send_cpu(cpu, _RET_IP_, func);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The list addition should be visible before sending the IPI
|
* The list addition should be visible to the target CPU when it pops
|
||||||
* handler locks the list to pull the entry off it because of
|
* the head of the list to pull the entry off it in the IPI handler
|
||||||
* normal cache coherency rules implied by spinlocks.
|
* because of normal cache coherency rules implied by the underlying
|
||||||
|
* llist ops.
|
||||||
*
|
*
|
||||||
* If IPIs can go out of order to the cache coherency protocol
|
* If IPIs can go out of order to the cache coherency protocol
|
||||||
* in an architecture, sufficient synchronisation should be added
|
* in an architecture, sufficient synchronisation should be added
|
||||||
|
@ -541,8 +399,6 @@ static int generic_exec_single(int cpu, struct __call_single_data *csd)
|
||||||
*/
|
*/
|
||||||
void generic_smp_call_function_single_interrupt(void)
|
void generic_smp_call_function_single_interrupt(void)
|
||||||
{
|
{
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU,
|
|
||||||
smp_processor_id(), CFD_SEQ_GOTIPI);
|
|
||||||
__flush_smp_call_function_queue(true);
|
__flush_smp_call_function_queue(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,13 +426,7 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
lockdep_assert_irqs_disabled();
|
lockdep_assert_irqs_disabled();
|
||||||
|
|
||||||
head = this_cpu_ptr(&call_single_queue);
|
head = this_cpu_ptr(&call_single_queue);
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->handle, CFD_SEQ_NOCPU,
|
|
||||||
smp_processor_id(), CFD_SEQ_HANDLE);
|
|
||||||
entry = llist_del_all(head);
|
entry = llist_del_all(head);
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->dequeue,
|
|
||||||
/* Special meaning of source cpu: 0 == queue empty */
|
|
||||||
entry ? CFD_SEQ_NOCPU : 0,
|
|
||||||
smp_processor_id(), CFD_SEQ_DEQUEUE);
|
|
||||||
entry = llist_reverse_order(entry);
|
entry = llist_reverse_order(entry);
|
||||||
|
|
||||||
/* There shouldn't be any pending callbacks on an offline CPU. */
|
/* There shouldn't be any pending callbacks on an offline CPU. */
|
||||||
|
@ -635,12 +485,8 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!entry) {
|
if (!entry)
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend,
|
|
||||||
0, smp_processor_id(),
|
|
||||||
CFD_SEQ_HDLEND);
|
|
||||||
return;
|
return;
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Second; run all !SYNC callbacks.
|
* Second; run all !SYNC callbacks.
|
||||||
|
@ -678,9 +524,6 @@ static void __flush_smp_call_function_queue(bool warn_cpu_offline)
|
||||||
*/
|
*/
|
||||||
if (entry)
|
if (entry)
|
||||||
sched_ttwu_pending(entry);
|
sched_ttwu_pending(entry);
|
||||||
|
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->hdlend, CFD_SEQ_NOCPU,
|
|
||||||
smp_processor_id(), CFD_SEQ_HDLEND);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -704,8 +547,6 @@ void flush_smp_call_function_queue(void)
|
||||||
if (llist_empty(this_cpu_ptr(&call_single_queue)))
|
if (llist_empty(this_cpu_ptr(&call_single_queue)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU,
|
|
||||||
smp_processor_id(), CFD_SEQ_IDLE);
|
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
/* Get the already pending soft interrupts for RT enabled kernels */
|
/* Get the already pending soft interrupts for RT enabled kernels */
|
||||||
was_pending = local_softirq_pending();
|
was_pending = local_softirq_pending();
|
||||||
|
@ -887,9 +728,9 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
int cpu, last_cpu, this_cpu = smp_processor_id();
|
int cpu, last_cpu, this_cpu = smp_processor_id();
|
||||||
struct call_function_data *cfd;
|
struct call_function_data *cfd;
|
||||||
bool wait = scf_flags & SCF_WAIT;
|
bool wait = scf_flags & SCF_WAIT;
|
||||||
|
int nr_cpus = 0, nr_queued = 0;
|
||||||
bool run_remote = false;
|
bool run_remote = false;
|
||||||
bool run_local = false;
|
bool run_local = false;
|
||||||
int nr_cpus = 0;
|
|
||||||
|
|
||||||
lockdep_assert_preemption_disabled();
|
lockdep_assert_preemption_disabled();
|
||||||
|
|
||||||
|
@ -929,11 +770,12 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
|
|
||||||
cpumask_clear(cfd->cpumask_ipi);
|
cpumask_clear(cfd->cpumask_ipi);
|
||||||
for_each_cpu(cpu, cfd->cpumask) {
|
for_each_cpu(cpu, cfd->cpumask) {
|
||||||
struct cfd_percpu *pcpu = per_cpu_ptr(cfd->pcpu, cpu);
|
call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
|
||||||
call_single_data_t *csd = &pcpu->csd;
|
|
||||||
|
|
||||||
if (cond_func && !cond_func(cpu, info))
|
if (cond_func && !cond_func(cpu, info)) {
|
||||||
|
__cpumask_clear_cpu(cpu, cfd->cpumask);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
csd_lock(csd);
|
csd_lock(csd);
|
||||||
if (wait)
|
if (wait)
|
||||||
|
@ -944,19 +786,20 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
csd->node.src = smp_processor_id();
|
csd->node.src = smp_processor_id();
|
||||||
csd->node.dst = cpu;
|
csd->node.dst = cpu;
|
||||||
#endif
|
#endif
|
||||||
cfd_seq_store(pcpu->seq_queue, this_cpu, cpu, CFD_SEQ_QUEUE);
|
|
||||||
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
if (llist_add(&csd->node.llist, &per_cpu(call_single_queue, cpu))) {
|
||||||
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
__cpumask_set_cpu(cpu, cfd->cpumask_ipi);
|
||||||
nr_cpus++;
|
nr_cpus++;
|
||||||
last_cpu = cpu;
|
last_cpu = cpu;
|
||||||
|
|
||||||
cfd_seq_store(pcpu->seq_ipi, this_cpu, cpu, CFD_SEQ_IPI);
|
|
||||||
} else {
|
|
||||||
cfd_seq_store(pcpu->seq_noipi, this_cpu, cpu, CFD_SEQ_NOIPI);
|
|
||||||
}
|
}
|
||||||
|
nr_queued++;
|
||||||
}
|
}
|
||||||
|
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->ping, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PING);
|
/*
|
||||||
|
* Trace each smp_function_call_*() as an IPI, actual IPIs
|
||||||
|
* will be traced with func==generic_smp_call_function_single_ipi().
|
||||||
|
*/
|
||||||
|
if (nr_queued)
|
||||||
|
trace_ipi_send_cpumask(cfd->cpumask, _RET_IP_, func);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Choose the most efficient way to send an IPI. Note that the
|
* Choose the most efficient way to send an IPI. Note that the
|
||||||
|
@ -966,9 +809,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
if (nr_cpus == 1)
|
if (nr_cpus == 1)
|
||||||
send_call_function_single_ipi(last_cpu);
|
send_call_function_single_ipi(last_cpu);
|
||||||
else if (likely(nr_cpus > 1))
|
else if (likely(nr_cpus > 1))
|
||||||
arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
|
send_call_function_ipi_mask(cfd->cpumask_ipi);
|
||||||
|
|
||||||
cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->pinged, this_cpu, CFD_SEQ_NOCPU, CFD_SEQ_PINGED);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (run_local && (!cond_func || cond_func(this_cpu, info))) {
|
if (run_local && (!cond_func || cond_func(this_cpu, info))) {
|
||||||
|
@ -983,7 +824,7 @@ static void smp_call_function_many_cond(const struct cpumask *mask,
|
||||||
for_each_cpu(cpu, cfd->cpumask) {
|
for_each_cpu(cpu, cfd->cpumask) {
|
||||||
call_single_data_t *csd;
|
call_single_data_t *csd;
|
||||||
|
|
||||||
csd = &per_cpu_ptr(cfd->pcpu, cpu)->csd;
|
csd = per_cpu_ptr(cfd->csd, cpu);
|
||||||
csd_lock_wait(csd);
|
csd_lock_wait(csd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1490,6 +1490,15 @@ config CSD_LOCK_WAIT_DEBUG
|
||||||
include the IPI handler function currently executing (if any)
|
include the IPI handler function currently executing (if any)
|
||||||
and relevant stack traces.
|
and relevant stack traces.
|
||||||
|
|
||||||
|
config CSD_LOCK_WAIT_DEBUG_DEFAULT
|
||||||
|
bool "Default csd_lock_wait() debugging on at boot time"
|
||||||
|
depends on CSD_LOCK_WAIT_DEBUG
|
||||||
|
depends on 64BIT
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This option causes the csdlock_debug= kernel boot parameter to
|
||||||
|
default to 1 (basic debugging) instead of 0 (no debugging).
|
||||||
|
|
||||||
endmenu # lock debugging
|
endmenu # lock debugging
|
||||||
|
|
||||||
config TRACE_IRQFLAGS
|
config TRACE_IRQFLAGS
|
||||||
|
|
|
@ -62,11 +62,14 @@
|
||||||
#include "kvm_mm.h"
|
#include "kvm_mm.h"
|
||||||
#include "vfio.h"
|
#include "vfio.h"
|
||||||
|
|
||||||
|
#include <trace/events/ipi.h>
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/kvm.h>
|
#include <trace/events/kvm.h>
|
||||||
|
|
||||||
#include <linux/kvm_dirty_ring.h>
|
#include <linux/kvm_dirty_ring.h>
|
||||||
|
|
||||||
|
|
||||||
/* Worst case buffer size needed for holding an integer. */
|
/* Worst case buffer size needed for holding an integer. */
|
||||||
#define ITOA_MAX_LEN 12
|
#define ITOA_MAX_LEN 12
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue