1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <asm/alternative.h>
5 #include <asm/sections.h>
9 # define DPRINTK(fmt, args...) printk(fmt, args)
11 # define DPRINTK(fmt, args...)
14 /* Use inline assembly to define this because the nops are defined
15 as inline assembly strings in the include files and we cannot
16 get them easily into strings. */
17 asm("\t.data\nintelnops: "
18 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
19 GENERIC_NOP7 GENERIC_NOP8);
20 asm("\t.data\nk8nops: "
21 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
23 asm("\t.data\nk7nops: "
24 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
27 extern unsigned char intelnops[], k8nops[], k7nops[];
28 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
33 intelnops + 1 + 2 + 3,
34 intelnops + 1 + 2 + 3 + 4,
35 intelnops + 1 + 2 + 3 + 4 + 5,
36 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
37 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
39 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
45 k8nops + 1 + 2 + 3 + 4,
46 k8nops + 1 + 2 + 3 + 4 + 5,
47 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
48 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
50 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
56 k7nops + 1 + 2 + 3 + 4,
57 k7nops + 1 + 2 + 3 + 4 + 5,
58 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
59 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
63 unsigned char **noptable;
65 { X86_FEATURE_K8, k8_nops },
66 { X86_FEATURE_K7, k7_nops },
71 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
72 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
73 extern u8 *__smp_locks[], *__smp_locks_end[];
75 extern u8 __smp_alt_begin[], __smp_alt_end[];
78 static unsigned char** find_nop_table(void)
80 unsigned char **noptable = intel_nops;
83 for (i = 0; noptypes[i].cpuid >= 0; i++) {
84 if (boot_cpu_has(noptypes[i].cpuid)) {
85 noptable = noptypes[i].noptable;
92 /* Replace instructions with better alternatives for this CPU type.
93 This runs before SMP is initialized to avoid SMP problems with
94 self modifying code. This implies that assymetric systems where
95 APs have less capabilities than the boot processor are not handled.
96 Tough. Make sure you disable such features by hand. */
98 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
100 unsigned char **noptable = find_nop_table();
104 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
105 for (a = start; a < end; a++) {
106 BUG_ON(a->replacementlen > a->instrlen);
107 if (!boot_cpu_has(a->cpuid))
109 memcpy(a->instr, a->replacement, a->replacementlen);
110 diff = a->instrlen - a->replacementlen;
111 /* Pad the rest with nops */
112 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
116 memcpy(a->instr + i, noptable[k], k);
122 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
126 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
127 for (a = start; a < end; a++) {
128 memcpy(a->replacement + a->replacementlen,
134 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
138 for (a = start; a < end; a++) {
140 a->replacement + a->replacementlen,
145 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
149 for (ptr = start; ptr < end; ptr++) {
154 **ptr = 0xf0; /* lock prefix */
158 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
160 unsigned char **noptable = find_nop_table();
163 for (ptr = start; ptr < end; ptr++) {
168 **ptr = noptable[1][0];
172 struct smp_alt_module {
173 /* what is this ??? */
177 /* ptrs to lock prefixes */
181 /* .text segment, needed to avoid patching init code ;) */
185 struct list_head next;
187 static LIST_HEAD(smp_alt_modules);
188 static DEFINE_SPINLOCK(smp_alt);
190 static int smp_alt_once = 0;
191 static int __init bootonly(char *str)
196 __setup("smp-alt-boot", bootonly);
198 void alternatives_smp_module_add(struct module *mod, char *name,
199 void *locks, void *locks_end,
200 void *text, void *text_end)
202 struct smp_alt_module *smp;
206 if (boot_cpu_has(X86_FEATURE_UP))
207 alternatives_smp_unlock(locks, locks_end,
212 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
214 return; /* we'll run the (safe but slow) SMP code then ... */
219 smp->locks_end = locks_end;
221 smp->text_end = text_end;
222 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
223 __FUNCTION__, smp->locks, smp->locks_end,
224 smp->text, smp->text_end, smp->name);
226 spin_lock_irqsave(&smp_alt, flags);
227 list_add_tail(&smp->next, &smp_alt_modules);
228 if (boot_cpu_has(X86_FEATURE_UP))
229 alternatives_smp_unlock(smp->locks, smp->locks_end,
230 smp->text, smp->text_end);
231 spin_unlock_irqrestore(&smp_alt, flags);
234 void alternatives_smp_module_del(struct module *mod)
236 struct smp_alt_module *item;
242 spin_lock_irqsave(&smp_alt, flags);
243 list_for_each_entry(item, &smp_alt_modules, next) {
244 if (mod != item->mod)
246 list_del(&item->next);
247 spin_unlock_irqrestore(&smp_alt, flags);
248 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
252 spin_unlock_irqrestore(&smp_alt, flags);
255 void alternatives_smp_switch(int smp)
257 struct smp_alt_module *mod;
262 BUG_ON(!smp && (num_online_cpus() > 1));
264 spin_lock_irqsave(&smp_alt, flags);
266 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
267 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
268 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
269 alternatives_smp_apply(__smp_alt_instructions,
270 __smp_alt_instructions_end);
271 list_for_each_entry(mod, &smp_alt_modules, next)
272 alternatives_smp_lock(mod->locks, mod->locks_end,
273 mod->text, mod->text_end);
275 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
276 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
277 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
278 apply_alternatives(__smp_alt_instructions,
279 __smp_alt_instructions_end);
280 list_for_each_entry(mod, &smp_alt_modules, next)
281 alternatives_smp_unlock(mod->locks, mod->locks_end,
282 mod->text, mod->text_end);
284 spin_unlock_irqrestore(&smp_alt, flags);
289 void __init alternative_instructions(void)
291 apply_alternatives(__alt_instructions, __alt_instructions_end);
295 /* switch to patch-once-at-boottime-only mode and free the
296 * tables in case we know the number of CPUs will never ever
298 #ifdef CONFIG_HOTPLUG_CPU
299 if (num_possible_cpus() < 2)
306 if (1 == num_possible_cpus()) {
307 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
308 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
309 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
310 apply_alternatives(__smp_alt_instructions,
311 __smp_alt_instructions_end);
312 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
315 free_init_pages("SMP alternatives",
316 (unsigned long)__smp_alt_begin,
317 (unsigned long)__smp_alt_end);
319 alternatives_smp_save(__smp_alt_instructions,
320 __smp_alt_instructions_end);
321 alternatives_smp_module_add(NULL, "core kernel",
322 __smp_locks, __smp_locks_end,
324 alternatives_smp_switch(0);