From f78eae2e6f5d1eb05f76a45486286445b916bd92 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Mon, 4 Jun 2007 17:01:39 -0700 Subject: [PATCH] [SPARC64]: Proper multi-core scheduling support. The scheduling domain hierarchy is: all cpus --> cpus that share an instruction cache --> cpus that share an integer execution unit Signed-off-by: David S. Miller --- arch/sparc64/Kconfig | 9 +++++++ arch/sparc64/kernel/mdesc.c | 49 ++++++++++++++++++++++++++++++++++ arch/sparc64/kernel/prom.c | 1 + arch/sparc64/kernel/smp.c | 19 ++++++++++++- include/asm-sparc64/cpudata.h | 2 +- include/asm-sparc64/smp.h | 1 + include/asm-sparc64/topology.h | 13 ++++++--- 7 files changed, 89 insertions(+), 5 deletions(-) diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index bd00f89eed..89a1b469b9 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -396,6 +396,15 @@ config SCHED_SMT when dealing with UltraSPARC cpus at a cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config CMDLINE_BOOL diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index 9246c2cf95..1b5db4bc6b 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c @@ -473,6 +473,53 @@ static void __init set_core_ids(void) } } +static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id) +{ + int i; + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + const u64 *id; + + if (strcmp(mp->arcs[i].name, "back")) + continue; + + if (strcmp(t->name, "cpu")) + continue; + + id = md_get_property(t, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).proc_id = proc_id; + } +} + +static void __init __set_proc_ids(const char *exec_unit_name) +{ + struct mdesc_node *mp; + int idx; + + idx = 0; + md_for_each_node_by_name(mp, exec_unit_name) { + const char *type; + int len; + + type = md_get_property(mp, "type", &len); + if (!find_in_proplist(type, "int", len) && + !find_in_proplist(type, "integer", len)) + continue; + + mark_proc_ids(mp, idx); + + idx++; + } +} + +static void __init set_proc_ids(void) +{ + __set_proc_ids("exec_unit"); + __set_proc_ids("exec-unit"); +} + static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) { u64 val; @@ -574,9 +621,11 @@ static void __init mdesc_fill_in_cpu_data(void) #endif c->core_id = 0; + c->proc_id = -1; } set_core_ids(); + set_proc_ids(); smp_fill_in_sib_core_maps(); } diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index dad4b3ba70..928aba3d0d 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -1800,6 +1800,7 @@ static void __init of_fill_in_cpu_data(void) cpu_data(cpuid).core_id = 0; } + cpu_data(cpuid).proc_id = -1; #ifdef CONFIG_SMP cpu_set(cpuid, cpu_present_map); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c550bba349..68a45ac933 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -51,6 +51,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; static cpumask_t smp_commenced_mask; static cpumask_t cpu_callout_map; @@ -1217,13 +1219,28 @@ void __devinit smp_fill_in_sib_core_maps(void) unsigned int j; if (cpu_data(i).core_id == 0) { - cpu_set(i, cpu_sibling_map[i]); + cpu_set(i, cpu_core_map[i]); continue; } for_each_possible_cpu(j) { if (cpu_data(i).core_id == cpu_data(j).core_id) + cpu_set(j, cpu_core_map[i]); + } + } + + for_each_possible_cpu(i) { + unsigned int j; + + if (cpu_data(i).proc_id == -1) { + cpu_set(i, cpu_sibling_map[i]); + continue; + } + + for_each_possible_cpu(j) { + if (cpu_data(i).proc_id == + cpu_data(j).proc_id) cpu_set(j, cpu_sibling_map[i]); } } diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 03c385de76..445026fbec 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -31,7 +31,7 @@ typedef struct { unsigned int ecache_size; unsigned int ecache_line_size; int core_id; - unsigned int __pad3; + int proc_id; } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index f76e1492ad..5400212686 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -33,6 +33,7 @@ extern cpumask_t phys_cpu_present_map; #define cpu_possible_map phys_cpu_present_map extern cpumask_t cpu_sibling_map[NR_CPUS]; +extern cpumask_t cpu_core_map[NR_CPUS]; /* * General functions that each host system must provide. diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index e0d450d600..4880f7ca0b 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h @@ -1,12 +1,19 @@ #ifndef _ASM_SPARC64_TOPOLOGY_H #define _ASM_SPARC64_TOPOLOGY_H +#ifdef CONFIG_SMP #include -#define smt_capable() (tlb_type == hypervisor) - -#include +#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) #define topology_core_id(cpu) (cpu_data(cpu).core_id) +#define topology_core_siblings(cpu) (cpu_core_map[cpu]) #define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define mc_capable() (tlb_type == hypervisor) +#define smt_capable() (tlb_type == hypervisor) +#endif /* CONFIG_SMP */ + +#include + +#define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) #endif /* _ASM_SPARC64_TOPOLOGY_H */ -- 2.20.1