1 #include <linux/init.h>
2 #include <linux/sched.h>
4 /* Don't do any NUMA setup on Opteron right now. They seem to be
5 better off with flat scheduling. This is just for SMT. */
7 #ifdef CONFIG_SCHED_SMT
9 static struct sched_group sched_group_cpus[NR_CPUS];
10 static struct sched_group sched_group_phys[NR_CPUS];
11 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
12 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
13 __init void arch_init_sched_domains(void)
16 struct sched_group *first = NULL, *last = NULL;
20 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
21 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
23 *cpu_domain = SD_SIBLING_INIT;
24 /* Disable SMT NICE for CMP */
25 /* RED-PEN use a generic flag */
26 if (cpu_data[i].x86_vendor == X86_VENDOR_AMD)
27 cpu_domain->flags &= ~SD_SHARE_CPUPOWER;
28 cpu_domain->span = cpu_sibling_map[i];
29 cpu_domain->parent = phys_domain;
30 cpu_domain->groups = &sched_group_cpus[i];
32 *phys_domain = SD_CPU_INIT;
33 phys_domain->span = cpu_possible_map;
34 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
37 /* Set up CPU (sibling) groups */
39 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
43 if (i != first_cpu(cpu_domain->span))
46 for_each_cpu_mask(j, cpu_domain->span) {
47 struct sched_group *cpu = &sched_group_cpus[j];
49 cpus_clear(cpu->cpumask);
50 cpu_set(j, cpu->cpumask);
51 cpu->cpu_power = SCHED_LOAD_SCALE;
63 /* Set up physical groups */
65 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
66 struct sched_group *cpu = &sched_group_phys[i];
68 if (i != first_cpu(cpu_domain->span))
71 cpu->cpumask = cpu_domain->span;
73 * Make each extra sibling increase power by 10% of
74 * the basic CPU. This is very arbitrary.
76 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
88 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
89 cpu_attach_domain(cpu_domain, i);