upgrade to linux 2.6.9-1.11_FC2
[linux-2.6.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/kmod.h>         /* for hotplug_path */
15 #include <linux/kthread.h>
16 #include <linux/stop_machine.h>
17 #include <asm/semaphore.h>
18
19 /* This protects CPUs going up and down... */
20 DECLARE_MUTEX(cpucontrol);
21
22 static struct notifier_block *cpu_chain;
23
24 /* Need to know about CPUs going up/down? */
25 int register_cpu_notifier(struct notifier_block *nb)
26 {
27         int ret;
28
29         if ((ret = down_interruptible(&cpucontrol)) != 0)
30                 return ret;
31         ret = notifier_chain_register(&cpu_chain, nb);
32         up(&cpucontrol);
33         return ret;
34 }
35 EXPORT_SYMBOL(register_cpu_notifier);
36
37 void unregister_cpu_notifier(struct notifier_block *nb)
38 {
39         down(&cpucontrol);
40         notifier_chain_unregister(&cpu_chain, nb);
41         up(&cpucontrol);
42 }
43 EXPORT_SYMBOL(unregister_cpu_notifier);
44
45 #ifdef CONFIG_HOTPLUG_CPU
46 static inline void check_for_tasks(int cpu)
47 {
48         struct task_struct *p;
49
50         write_lock_irq(&tasklist_lock);
51         for_each_process(p) {
52                 if (task_cpu(p) == cpu && (p->utime != 0 || p->stime != 0))
53                         printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
54                                 (state = %ld, flags = %lx) \n",
55                                  p->comm, p->pid, cpu, p->state, p->flags);
56         }
57         write_unlock_irq(&tasklist_lock);
58 }
59
60 /* Notify userspace when a cpu event occurs, by running '/sbin/hotplug
61  * cpu' with certain environment variables set.  */
62 static int cpu_run_sbin_hotplug(unsigned int cpu, const char *action)
63 {
64         char *argv[3], *envp[6], cpu_str[12], action_str[32], devpath_str[40];
65         int i;
66
67         sprintf(cpu_str, "CPU=%d", cpu);
68         sprintf(action_str, "ACTION=%s", action);
69         sprintf(devpath_str, "DEVPATH=devices/system/cpu/cpu%d", cpu);
70         
71         i = 0;
72         argv[i++] = hotplug_path;
73         argv[i++] = "cpu";
74         argv[i] = NULL;
75
76         i = 0;
77         /* minimal command environment */
78         envp[i++] = "HOME=/";
79         envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
80         envp[i++] = cpu_str;
81         envp[i++] = action_str;
82         envp[i++] = devpath_str;
83         envp[i] = NULL;
84
85         return call_usermodehelper(argv[0], argv, envp, 0);
86 }
87
88 /* Take this CPU down. */
89 static int take_cpu_down(void *unused)
90 {
91         int err;
92
93         /* Take offline: makes arch_cpu_down somewhat easier. */
94         cpu_clear(smp_processor_id(), cpu_online_map);
95
96         /* Ensure this CPU doesn't handle any more interrupts. */
97         err = __cpu_disable();
98         if (err < 0)
99                 cpu_set(smp_processor_id(), cpu_online_map);
100         else
101                 /* Force idle task to run as soon as we yield: it should
102                    immediately notice cpu is offline and die quickly. */
103                 sched_idle_next();
104
105         return err;
106 }
107
108 int cpu_down(unsigned int cpu)
109 {
110         int err;
111         struct task_struct *p;
112         cpumask_t old_allowed, tmp;
113
114         if ((err = lock_cpu_hotplug_interruptible()) != 0)
115                 return err;
116
117         if (num_online_cpus() == 1) {
118                 err = -EBUSY;
119                 goto out;
120         }
121
122         if (!cpu_online(cpu)) {
123                 err = -EINVAL;
124                 goto out;
125         }
126
127         err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
128                                                 (void *)(long)cpu);
129         if (err == NOTIFY_BAD) {
130                 printk("%s: attempt to take down CPU %u failed\n",
131                                 __FUNCTION__, cpu);
132                 err = -EINVAL;
133                 goto out;
134         }
135
136         /* Ensure that we are not runnable on dying cpu */
137         old_allowed = current->cpus_allowed;
138         tmp = CPU_MASK_ALL;
139         cpu_clear(cpu, tmp);
140         set_cpus_allowed(current, tmp);
141
142         p = __stop_machine_run(take_cpu_down, NULL, cpu);
143         if (IS_ERR(p)) {
144                 /* CPU didn't die: tell everyone.  Can't complain. */
145                 if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
146                                 (void *)(long)cpu) == NOTIFY_BAD)
147                         BUG();
148
149                 err = PTR_ERR(p);
150                 goto out_allowed;
151         }
152
153         if (cpu_online(cpu))
154                 goto out_thread;
155
156         /* Wait for it to sleep (leaving idle task). */
157         while (!idle_cpu(cpu))
158                 yield();
159
160         /* This actually kills the CPU. */
161         __cpu_die(cpu);
162
163         /* Move it here so it can run. */
164         kthread_bind(p, smp_processor_id());
165
166         /* CPU is completely dead: tell everyone.  Too late to complain. */
167         if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
168             == NOTIFY_BAD)
169                 BUG();
170
171         check_for_tasks(cpu);
172
173         cpu_run_sbin_hotplug(cpu, "offline");
174
175 out_thread:
176         err = kthread_stop(p);
177 out_allowed:
178         set_cpus_allowed(current, old_allowed);
179 out:
180         unlock_cpu_hotplug();
181         return err;
182 }
183 #else
184 static inline int cpu_run_sbin_hotplug(unsigned int cpu, const char *action)
185 {
186         return 0;
187 }
188 #endif /*CONFIG_HOTPLUG_CPU*/
189
190 int __devinit cpu_up(unsigned int cpu)
191 {
192         int ret;
193         void *hcpu = (void *)(long)cpu;
194
195         if ((ret = down_interruptible(&cpucontrol)) != 0)
196                 return ret;
197
198         if (cpu_online(cpu) || !cpu_present(cpu)) {
199                 ret = -EINVAL;
200                 goto out;
201         }
202         ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
203         if (ret == NOTIFY_BAD) {
204                 printk("%s: attempt to bring up CPU %u failed\n",
205                                 __FUNCTION__, cpu);
206                 ret = -EINVAL;
207                 goto out_notify;
208         }
209
210         /* Arch-specific enabling code. */
211         ret = __cpu_up(cpu);
212         if (ret != 0)
213                 goto out_notify;
214         if (!cpu_online(cpu))
215                 BUG();
216
217         /* Now call notifier in preparation. */
218         notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
219
220 out_notify:
221         if (ret != 0)
222                 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
223 out:
224         up(&cpucontrol);
225         return ret;
226 }