1 #include <linux/module.h>
2 #include <linux/sched.h>
3 #include <linux/spinlock.h>
4 #include <linux/list.h>
5 #include <asm/alternative.h>
6 #include <asm/sections.h>
8 #ifdef CONFIG_X86_64_XEN
9 static int no_replacement = 1;
11 static int no_replacement = 0;
13 static int smp_alt_once = 0;
14 static int debug_alternative = 0;
16 static int __init noreplacement_setup(char *s)
21 static int __init bootonly(char *str)
26 static int __init debug_alt(char *str)
28 debug_alternative = 1;
32 __setup("noreplacement", noreplacement_setup);
33 __setup("smp-alt-boot", bootonly);
34 __setup("debug-alternative", debug_alt);
36 #define DPRINTK(fmt, args...) if (debug_alternative) \
37 printk(KERN_DEBUG fmt, args)
40 /* Use inline assembly to define this because the nops are defined
41 as inline assembly strings in the include files and we cannot
42 get them easily into strings. */
43 asm("\t.data\nintelnops: "
44 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
45 GENERIC_NOP7 GENERIC_NOP8);
46 extern unsigned char intelnops[];
47 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
52 intelnops + 1 + 2 + 3,
53 intelnops + 1 + 2 + 3 + 4,
54 intelnops + 1 + 2 + 3 + 4 + 5,
55 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
56 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
61 asm("\t.data\nk8nops: "
62 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
64 extern unsigned char k8nops[];
65 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
71 k8nops + 1 + 2 + 3 + 4,
72 k8nops + 1 + 2 + 3 + 4 + 5,
73 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
74 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
79 asm("\t.data\nk7nops: "
80 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
82 extern unsigned char k7nops[];
83 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
89 k7nops + 1 + 2 + 3 + 4,
90 k7nops + 1 + 2 + 3 + 4 + 5,
91 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
92 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
98 extern char __vsyscall_0;
99 static inline unsigned char** find_nop_table(void)
104 #else /* CONFIG_X86_64 */
108 unsigned char **noptable;
110 { X86_FEATURE_K8, k8_nops },
111 { X86_FEATURE_K7, k7_nops },
115 static unsigned char** find_nop_table(void)
117 unsigned char **noptable = intel_nops;
120 for (i = 0; noptypes[i].cpuid >= 0; i++) {
121 if (boot_cpu_has(noptypes[i].cpuid)) {
122 noptable = noptypes[i].noptable;
129 #endif /* CONFIG_X86_64 */
131 static void nop_out(void *insns, unsigned int len)
133 unsigned char **noptable = find_nop_table();
136 unsigned int noplen = len;
137 if (noplen > ASM_NOP_MAX)
138 noplen = ASM_NOP_MAX;
139 memcpy(insns, noptable[noplen], noplen);
145 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
146 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
147 extern u8 *__smp_locks[], *__smp_locks_end[];
149 extern u8 __smp_alt_begin[], __smp_alt_end[];
151 /* Replace instructions with better alternatives for this CPU type.
152 This runs before SMP is initialized to avoid SMP problems with
153 self modifying code. This implies that assymetric systems where
154 APs have less capabilities than the boot processor are not handled.
155 Tough. Make sure you disable such features by hand. */
157 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
163 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
164 for (a = start; a < end; a++) {
165 BUG_ON(a->replacementlen > a->instrlen);
166 if (!boot_cpu_has(a->cpuid))
170 /* vsyscall code is not mapped yet. resolve it manually. */
171 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
173 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)phys_to_machine(__pa_symbol(&__vsyscall_0)));
175 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
177 DPRINTK("%s: vsyscall fixup: %p => %p\n",
178 __FUNCTION__, a->instr, instr);
181 memcpy(instr, a->replacement, a->replacementlen);
182 diff = a->instrlen - a->replacementlen;
183 nop_out(instr + a->replacementlen, diff);
189 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
193 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
194 for (a = start; a < end; a++) {
195 memcpy(a->replacement + a->replacementlen,
201 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
205 for (a = start; a < end; a++) {
207 a->replacement + a->replacementlen,
212 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
216 for (ptr = start; ptr < end; ptr++) {
221 **ptr = 0xf0; /* lock prefix */
225 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
229 for (ptr = start; ptr < end; ptr++) {
238 struct smp_alt_module {
239 /* what is this ??? */
243 /* ptrs to lock prefixes */
247 /* .text segment, needed to avoid patching init code ;) */
251 struct list_head next;
253 static LIST_HEAD(smp_alt_modules);
254 static DEFINE_SPINLOCK(smp_alt);
256 void alternatives_smp_module_add(struct module *mod, char *name,
257 void *locks, void *locks_end,
258 void *text, void *text_end)
260 struct smp_alt_module *smp;
267 if (boot_cpu_has(X86_FEATURE_UP))
268 alternatives_smp_unlock(locks, locks_end,
273 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
275 return; /* we'll run the (safe but slow) SMP code then ... */
280 smp->locks_end = locks_end;
282 smp->text_end = text_end;
283 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
284 __FUNCTION__, smp->locks, smp->locks_end,
285 smp->text, smp->text_end, smp->name);
287 spin_lock_irqsave(&smp_alt, flags);
288 list_add_tail(&smp->next, &smp_alt_modules);
289 if (boot_cpu_has(X86_FEATURE_UP))
290 alternatives_smp_unlock(smp->locks, smp->locks_end,
291 smp->text, smp->text_end);
292 spin_unlock_irqrestore(&smp_alt, flags);
295 void alternatives_smp_module_del(struct module *mod)
297 struct smp_alt_module *item;
300 if (no_replacement || smp_alt_once)
303 spin_lock_irqsave(&smp_alt, flags);
304 list_for_each_entry(item, &smp_alt_modules, next) {
305 if (mod != item->mod)
307 list_del(&item->next);
308 spin_unlock_irqrestore(&smp_alt, flags);
309 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
313 spin_unlock_irqrestore(&smp_alt, flags);
316 void alternatives_smp_switch(int smp)
318 struct smp_alt_module *mod;
321 #ifdef CONFIG_LOCKDEP
323 * A not yet fixed binutils section handling bug prevents
324 * alternatives-replacement from working reliably, so turn
327 printk("lockdep: not fixing up alternatives.\n");
331 if (no_replacement || smp_alt_once)
333 BUG_ON(!smp && (num_online_cpus() > 1));
335 spin_lock_irqsave(&smp_alt, flags);
337 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
338 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
339 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
340 alternatives_smp_apply(__smp_alt_instructions,
341 __smp_alt_instructions_end);
342 list_for_each_entry(mod, &smp_alt_modules, next)
343 alternatives_smp_lock(mod->locks, mod->locks_end,
344 mod->text, mod->text_end);
346 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
347 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
348 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
349 apply_alternatives(__smp_alt_instructions,
350 __smp_alt_instructions_end);
351 list_for_each_entry(mod, &smp_alt_modules, next)
352 alternatives_smp_unlock(mod->locks, mod->locks_end,
353 mod->text, mod->text_end);
355 spin_unlock_irqrestore(&smp_alt, flags);
360 #ifdef CONFIG_PARAVIRT
361 void apply_paravirt(struct paravirt_patch *start, struct paravirt_patch *end)
363 struct paravirt_patch *p;
365 for (p = start; p < end; p++) {
368 used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
370 #ifdef CONFIG_DEBUG_PARAVIRT
373 /* Deliberately clobber regs using "not %reg" to find bugs. */
374 for (i = 0; i < 3; i++) {
375 if (p->len - used >= 2 && (p->clobbers & (1 << i))) {
376 memcpy(p->instr + used, "\xf7\xd0", 2);
377 p->instr[used+1] |= i;
383 /* Pad the rest with nops */
384 nop_out(p->instr + used, p->len - used);
387 /* Sync to be conservative, in case we patched following instructions */
390 extern struct paravirt_patch __start_parainstructions[],
391 __stop_parainstructions[];
392 #endif /* CONFIG_PARAVIRT */
394 void __init alternative_instructions(void)
397 if (no_replacement) {
398 printk(KERN_INFO "(SMP-)alternatives turned off\n");
399 #ifndef CONFIG_X86_64
400 /* ToDo: x86_64 put something strange there, not sure what yet */
401 free_init_pages("SMP alternatives",
402 (unsigned long)__smp_alt_begin,
403 (unsigned long)__smp_alt_end);
408 local_irq_save(flags);
409 apply_alternatives(__alt_instructions, __alt_instructions_end);
411 /* switch to patch-once-at-boottime-only mode and free the
412 * tables in case we know the number of CPUs will never ever
414 #ifdef CONFIG_HOTPLUG_CPU
415 if (num_possible_cpus() < 2)
423 if (1 == num_possible_cpus()) {
424 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
425 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
426 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
427 apply_alternatives(__smp_alt_instructions,
428 __smp_alt_instructions_end);
429 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
432 free_init_pages("SMP alternatives",
433 (unsigned long)__smp_alt_begin,
434 (unsigned long)__smp_alt_end);
436 alternatives_smp_save(__smp_alt_instructions,
437 __smp_alt_instructions_end);
438 alternatives_smp_module_add(NULL, "core kernel",
439 __smp_locks, __smp_locks_end,
441 alternatives_smp_switch(0);
444 apply_paravirt(__start_parainstructions, __stop_parainstructions);
445 local_irq_restore(flags);