This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / x86_64 / kernel / domain.c
1 #include <linux/init.h>
2 #include <linux/sched.h>
3
4 /* Don't do any NUMA setup on Opteron right now. They seem to be
5    better off with flat scheduling. This is just for SMT. */
6
7 #ifdef CONFIG_SCHED_SMT
8
9 static struct sched_group sched_group_cpus[NR_CPUS];
10 static struct sched_group sched_group_phys[NR_CPUS];
11 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
12 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
13 __init void arch_init_sched_domains(void)
14 {
15         int i;
16         struct sched_group *first = NULL, *last = NULL;
17
18         /* Set up domains */
19         for_each_cpu(i) {
20                 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
21                 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
22
23                 *cpu_domain = SD_SIBLING_INIT;
24                 /* Disable SMT NICE for CMP */
25                 /* RED-PEN use a generic flag */ 
26                 if (cpu_data[i].x86_vendor == X86_VENDOR_AMD) 
27                         cpu_domain->flags &= ~SD_SHARE_CPUPOWER; 
28                 cpu_domain->span = cpu_sibling_map[i];
29                 cpu_domain->parent = phys_domain;
30                 cpu_domain->groups = &sched_group_cpus[i];
31
32                 *phys_domain = SD_CPU_INIT;
33                 phys_domain->span = cpu_possible_map;
34                 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
35         }
36
37         /* Set up CPU (sibling) groups */
38         for_each_cpu(i) {
39                 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
40                 int j;
41                 first = last = NULL;
42
43                 if (i != first_cpu(cpu_domain->span))
44                         continue;
45
46                 for_each_cpu_mask(j, cpu_domain->span) {
47                         struct sched_group *cpu = &sched_group_cpus[j];
48
49                         cpus_clear(cpu->cpumask);
50                         cpu_set(j, cpu->cpumask);
51                         cpu->cpu_power = SCHED_LOAD_SCALE;
52
53                         if (!first)
54                                 first = cpu;
55                         if (last)
56                                 last->next = cpu;
57                         last = cpu;
58                 }
59                 last->next = first;
60         }
61
62         first = last = NULL;
63         /* Set up physical groups */
64         for_each_cpu(i) {
65                 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
66                 struct sched_group *cpu = &sched_group_phys[i];
67
68                 if (i != first_cpu(cpu_domain->span))
69                         continue;
70
71                 cpu->cpumask = cpu_domain->span;
72                 /*
73                  * Make each extra sibling increase power by 10% of
74                  * the basic CPU. This is very arbitrary.
75                  */
76                 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
77
78                 if (!first)
79                         first = cpu;
80                 if (last)
81                         last->next = cpu;
82                 last = cpu;
83         }
84         last->next = first;
85
86         mb();
87         for_each_cpu(i) {
88                 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
89                 cpu_attach_domain(cpu_domain, i);
90         }
91 }
92
93 #endif