patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / ppc64 / kernel / sysfs.c
1 #include <linux/config.h>
2 #include <linux/sysdev.h>
3 #include <linux/cpu.h>
4 #include <linux/smp.h>
5 #include <linux/percpu.h>
6 #include <linux/init.h>
7 #include <linux/sched.h>
8 #include <asm/current.h>
9 #include <asm/processor.h>
10 #include <asm/cputable.h>
11 #include <asm/hvcall.h>
12 #include <asm/prom.h>
13
14
15 /* SMT stuff */
16
17 #ifndef CONFIG_PPC_ISERIES
18
19 /* default to snooze disabled */
20 DEFINE_PER_CPU(unsigned long, smt_snooze_delay);
21
22 static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
23                                       size_t count)
24 {
25         struct cpu *cpu = container_of(dev, struct cpu, sysdev);
26         ssize_t ret;
27         unsigned long snooze;
28
29         ret = sscanf(buf, "%lu", &snooze);
30         if (ret != 1)
31                 return -EINVAL;
32
33         per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
34
35         return count;
36 }
37
38 static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf)
39 {
40         struct cpu *cpu = container_of(dev, struct cpu, sysdev);
41
42         return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
43 }
44
45 static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
46                    store_smt_snooze_delay);
47
48 /* Only parse OF options if the matching cmdline option was not specified */
49 static int smt_snooze_cmdline;
50
51 static int __init smt_setup(void)
52 {
53         struct device_node *options;
54         unsigned int *val;
55         unsigned int cpu;
56
57         if (!cur_cpu_spec->cpu_features & CPU_FTR_SMT)
58                 return 1;
59
60         options = find_path_device("/options");
61         if (!options)
62                 return 1;
63
64         val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
65                                            NULL);
66         if (!smt_snooze_cmdline && val) {
67                 for_each_cpu(cpu)
68                         per_cpu(smt_snooze_delay, cpu) = *val;
69         }
70
71         return 1;
72 }
73 __initcall(smt_setup);
74
75 static int __init setup_smt_snooze_delay(char *str)
76 {
77         unsigned int cpu;
78         int snooze;
79
80         if (!cur_cpu_spec->cpu_features & CPU_FTR_SMT)
81                 return 1;
82
83         smt_snooze_cmdline = 1;
84
85         if (get_option(&str, &snooze)) {
86                 for_each_cpu(cpu)
87                         per_cpu(smt_snooze_delay, cpu) = snooze;
88         }
89
90         return 1;
91 }
92 __setup("smt-snooze-delay=", setup_smt_snooze_delay);
93
94 #endif
95
96
97 /* PMC stuff */
98
99 /*
100  * Enabling PMCs will slow partition context switch times so we only do
101  * it the first time we write to the PMCs.
102  */
103
104 static DEFINE_PER_CPU(char, pmcs_enabled);
105
106 #ifdef CONFIG_PPC_ISERIES
107 void ppc64_enable_pmcs(void)
108 {
109         /* XXX Implement for iseries */
110 }
111 #else
112 void ppc64_enable_pmcs(void)
113 {
114         unsigned long hid0;
115         unsigned long set, reset;
116         int ret;
117         unsigned int ctrl;
118
119         /* Only need to enable them once */
120         if (__get_cpu_var(pmcs_enabled))
121                 return;
122
123         __get_cpu_var(pmcs_enabled) = 1;
124
125         switch (systemcfg->platform) {
126                 case PLATFORM_PSERIES:
127                         hid0 = mfspr(HID0);
128                         hid0 |= 1UL << (63 - 20);
129
130                         /* POWER4 requires the following sequence */
131                         asm volatile(
132                                 "sync\n"
133                                 "mtspr  %1, %0\n"
134                                 "mfspr  %0, %1\n"
135                                 "mfspr  %0, %1\n"
136                                 "mfspr  %0, %1\n"
137                                 "mfspr  %0, %1\n"
138                                 "mfspr  %0, %1\n"
139                                 "mfspr  %0, %1\n"
140                                 "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
141                                 "memory");
142                         break;
143
144                 case PLATFORM_PSERIES_LPAR:
145                         set = 1UL << 63;
146                         reset = 0;
147                         ret = plpar_hcall_norets(H_PERFMON, set, reset);
148                         if (ret)
149                                 printk(KERN_ERR "H_PERFMON call returned %d",
150                                        ret);
151                         break;
152
153                 default:
154                         break;
155         }
156
157         /* instruct hypervisor to maintain PMCs */
158         if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
159                 char *ptr = (char *)&paca[smp_processor_id()].xLpPaca;
160                 ptr[0xBB] = 1;
161         }
162
163         /*
164          * On SMT machines we have to set the run latch in the ctrl register
165          * in order to make PMC6 spin.
166          */
167         if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) {
168                 ctrl = mfspr(CTRLF);
169                 ctrl |= RUNLATCH;
170                 mtspr(CTRLT, ctrl);
171         }
172 }
173 #endif
174
175 /* XXX convert to rusty's on_one_cpu */
176 static unsigned long run_on_cpu(unsigned long cpu,
177                                 unsigned long (*func)(unsigned long),
178                                 unsigned long arg)
179 {
180         cpumask_t old_affinity = current->cpus_allowed;
181         unsigned long ret;
182
183         /* should return -EINVAL to userspace */
184         if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
185                 return 0;
186
187         ret = func(arg);
188
189         set_cpus_allowed(current, old_affinity);
190
191         return ret;
192 }
193
194 #define SYSFS_PMCSETUP(NAME, ADDRESS) \
195 static unsigned long read_##NAME(unsigned long junk) \
196 { \
197         return mfspr(ADDRESS); \
198 } \
199 static unsigned long write_##NAME(unsigned long val) \
200 { \
201         ppc64_enable_pmcs(); \
202         mtspr(ADDRESS, val); \
203         return 0; \
204 } \
205 static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
206 { \
207         struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
208         unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
209         return sprintf(buf, "%lx\n", val); \
210 } \
211 static ssize_t store_##NAME(struct sys_device *dev, const char *buf, \
212                             size_t count) \
213 { \
214         struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
215         unsigned long val; \
216         int ret = sscanf(buf, "%lx", &val); \
217         if (ret != 1) \
218                 return -EINVAL; \
219         run_on_cpu(cpu->sysdev.id, write_##NAME, val); \
220         return count; \
221 }
222
223 SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
224 SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
225 SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
226 SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
227 SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
228 SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
229 SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
230 SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
231 SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
232 SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
233 SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
234 SYSFS_PMCSETUP(purr, SPRN_PURR);
235
236 static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
237 static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
238 static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
239 static SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1);
240 static SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2);
241 static SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3);
242 static SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4);
243 static SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5);
244 static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
245 static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
246 static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
247 static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
248
249 static void __init register_cpu_pmc(struct sys_device *s)
250 {
251         sysdev_create_file(s, &attr_mmcr0);
252         sysdev_create_file(s, &attr_mmcr1);
253
254         if (cur_cpu_spec->cpu_features & CPU_FTR_MMCRA)
255                 sysdev_create_file(s, &attr_mmcra);
256
257         sysdev_create_file(s, &attr_pmc1);
258         sysdev_create_file(s, &attr_pmc2);
259         sysdev_create_file(s, &attr_pmc3);
260         sysdev_create_file(s, &attr_pmc4);
261         sysdev_create_file(s, &attr_pmc5);
262         sysdev_create_file(s, &attr_pmc6);
263
264         if (cur_cpu_spec->cpu_features & CPU_FTR_PMC8) {
265                 sysdev_create_file(s, &attr_pmc7);
266                 sysdev_create_file(s, &attr_pmc8);
267         }
268
269         if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
270                 sysdev_create_file(s, &attr_purr);
271 }
272
273
274 /* NUMA stuff */
275
276 #ifdef CONFIG_NUMA
277 static struct node node_devices[MAX_NUMNODES];
278
279 static void register_nodes(void)
280 {
281         int i;
282
283         for (i = 0; i < MAX_NUMNODES; i++) {
284                 if (node_online(i)) {
285                         int p_node = parent_node(i);
286                         struct node *parent = NULL;
287
288                         if (p_node != i)
289                                 parent = &node_devices[p_node];
290
291                         register_node(&node_devices[i], i, parent);
292                 }
293         }
294 }
295 #else
296 static void register_nodes(void)
297 {
298         return;
299 }
300 #endif
301
302
303 /* Only valid if CPU is online. */
304 static ssize_t show_physical_id(struct sys_device *dev, char *buf)
305 {
306         struct cpu *cpu = container_of(dev, struct cpu, sysdev);
307
308         return sprintf(buf, "%u\n", get_hard_smp_processor_id(cpu->sysdev.id));
309 }
310 static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
311
312
313 static DEFINE_PER_CPU(struct cpu, cpu_devices);
314
315 static int __init topology_init(void)
316 {
317         int cpu;
318         struct node *parent = NULL;
319
320         register_nodes();
321
322         for_each_cpu(cpu) {
323                 struct cpu *c = &per_cpu(cpu_devices, cpu);
324
325 #ifdef CONFIG_NUMA
326                 parent = &node_devices[cpu_to_node(cpu)];
327 #endif
328                 register_cpu(c, cpu, parent);
329
330                 register_cpu_pmc(&c->sysdev);
331
332                 sysdev_create_file(&c->sysdev, &attr_physical_id);
333
334 #ifndef CONFIG_PPC_ISERIES
335                 if (cur_cpu_spec->cpu_features & CPU_FTR_SMT)
336                         sysdev_create_file(&c->sysdev, &attr_smt_snooze_delay);
337 #endif
338         }
339
340         return 0;
341 }
342 __initcall(topology_init);