mirror of
https://gitee.com/bianbu-linux/linux-6.6
synced 2025-07-22 01:43:37 -04:00
Individual scheduler domain should consist different hierarchy consisting of cores sharing similar property. Currently, no scheduler domain is defined separately for the cores that shares the last level cache. As a result, the scheduler fails to take advantage of cache locality while migrating tasks during load balancing. Here are the cpu masks currently present for sparc that are/can be used in scheduler domain construction. cpu_core_map : set based on the cores that shares l1 cache. core_core_sib_map : is set based on the socket id. The prior SPARC notion of socket was defined as highest level of shared cache. However, the MD record on T7 platforms now describes the CPUs that share the physical socket and this is no longer tied to shared cache. That's why a separate cpu mask needs to be created that truly represent highest level of shared cache for all platforms. Signed-off-by: Atish Patra <atish.patra@oracle.com> Reviewed-by: Chris Hyser <chris.hyser@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
63 lines
1.5 KiB
C
63 lines
1.5 KiB
C
#ifndef _ASM_SPARC64_TOPOLOGY_H
|
|
#define _ASM_SPARC64_TOPOLOGY_H
|
|
|
|
#ifdef CONFIG_NUMA
|
|
|
|
#include <asm/mmzone.h>
|
|
|
|
static inline int cpu_to_node(int cpu)
|
|
{
|
|
return numa_cpu_lookup_table[cpu];
|
|
}
|
|
|
|
#define parent_node(node) (node)
|
|
|
|
#define cpumask_of_node(node) ((node) == -1 ? \
|
|
cpu_all_mask : \
|
|
&numa_cpumask_lookup_table[node])
|
|
|
|
struct pci_bus;
|
|
#ifdef CONFIG_PCI
|
|
int pcibus_to_node(struct pci_bus *pbus);
|
|
#else
|
|
static inline int pcibus_to_node(struct pci_bus *pbus)
|
|
{
|
|
return -1;
|
|
}
|
|
#endif
|
|
|
|
#define cpumask_of_pcibus(bus) \
|
|
(pcibus_to_node(bus) == -1 ? \
|
|
cpu_all_mask : \
|
|
cpumask_of_node(pcibus_to_node(bus)))
|
|
|
|
int __node_distance(int, int);
|
|
#define node_distance(a, b) __node_distance(a, b)
|
|
|
|
#else /* CONFIG_NUMA */
|
|
|
|
#include <asm-generic/topology.h>
|
|
|
|
#endif /* !(CONFIG_NUMA) */
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
|
|
#define topology_core_id(cpu) (cpu_data(cpu).core_id)
|
|
#define topology_core_cpumask(cpu) (&cpu_core_sib_map[cpu])
|
|
#define topology_core_cache_cpumask(cpu) (&cpu_core_sib_cache_map[cpu])
|
|
#define topology_sibling_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
|
|
#endif /* CONFIG_SMP */
|
|
|
|
extern cpumask_t cpu_core_map[NR_CPUS];
|
|
extern cpumask_t cpu_core_sib_map[NR_CPUS];
|
|
extern cpumask_t cpu_core_sib_cache_map[NR_CPUS];
|
|
|
|
/**
|
|
* Return cores that shares the last level cache.
|
|
*/
|
|
static inline const struct cpumask *cpu_coregroup_mask(int cpu)
|
|
{
|
|
return &cpu_core_sib_cache_map[cpu];
|
|
}
|
|
|
|
#endif /* _ASM_SPARC64_TOPOLOGY_H */
|