1 #include <linux/module.h>
2 #include <linux/spinlock.h>
3 #include <linux/list.h>
4 #include <asm/alternative.h>
5 #include <asm/sections.h>
7 #ifdef CONFIG_X86_64_XEN
8 static int no_replacement = 1;
10 static int no_replacement = 0;
12 static int smp_alt_once = 0;
13 static int debug_alternative = 0;
15 static int __init noreplacement_setup(char *s)
20 static int __init bootonly(char *str)
25 static int __init debug_alt(char *str)
27 debug_alternative = 1;
31 __setup("noreplacement", noreplacement_setup);
32 __setup("smp-alt-boot", bootonly);
33 __setup("debug-alternative", debug_alt);
35 #define DPRINTK(fmt, args...) if (debug_alternative) \
36 printk(KERN_DEBUG fmt, args)
39 /* Use inline assembly to define this because the nops are defined
40 as inline assembly strings in the include files and we cannot
41 get them easily into strings. */
42 asm("\t.data\nintelnops: "
43 GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
44 GENERIC_NOP7 GENERIC_NOP8);
45 extern unsigned char intelnops[];
46 static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
51 intelnops + 1 + 2 + 3,
52 intelnops + 1 + 2 + 3 + 4,
53 intelnops + 1 + 2 + 3 + 4 + 5,
54 intelnops + 1 + 2 + 3 + 4 + 5 + 6,
55 intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
60 asm("\t.data\nk8nops: "
61 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
63 extern unsigned char k8nops[];
64 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
70 k8nops + 1 + 2 + 3 + 4,
71 k8nops + 1 + 2 + 3 + 4 + 5,
72 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
73 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
78 asm("\t.data\nk7nops: "
79 K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
81 extern unsigned char k7nops[];
82 static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
88 k7nops + 1 + 2 + 3 + 4,
89 k7nops + 1 + 2 + 3 + 4 + 5,
90 k7nops + 1 + 2 + 3 + 4 + 5 + 6,
91 k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
97 extern char __vsyscall_0;
98 static inline unsigned char** find_nop_table(void)
103 #else /* CONFIG_X86_64 */
107 unsigned char **noptable;
109 { X86_FEATURE_K8, k8_nops },
110 { X86_FEATURE_K7, k7_nops },
114 static unsigned char** find_nop_table(void)
116 unsigned char **noptable = intel_nops;
119 for (i = 0; noptypes[i].cpuid >= 0; i++) {
120 if (boot_cpu_has(noptypes[i].cpuid)) {
121 noptable = noptypes[i].noptable;
128 #endif /* CONFIG_X86_64 */
130 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
131 extern struct alt_instr __smp_alt_instructions[], __smp_alt_instructions_end[];
132 extern u8 *__smp_locks[], *__smp_locks_end[];
134 extern u8 __smp_alt_begin[], __smp_alt_end[];
136 /* Replace instructions with better alternatives for this CPU type.
137 This runs before SMP is initialized to avoid SMP problems with
138 self modifying code. This implies that assymetric systems where
139 APs have less capabilities than the boot processor are not handled.
140 Tough. Make sure you disable such features by hand. */
142 void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
144 unsigned char **noptable = find_nop_table();
149 DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
150 for (a = start; a < end; a++) {
151 BUG_ON(a->replacementlen > a->instrlen);
152 if (!boot_cpu_has(a->cpuid))
156 /* vsyscall code is not mapped yet. resolve it manually. */
157 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
159 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)phys_to_machine(__pa_symbol(&__vsyscall_0)));
161 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
163 DPRINTK("%s: vsyscall fixup: %p => %p\n",
164 __FUNCTION__, a->instr, instr);
167 memcpy(instr, a->replacement, a->replacementlen);
168 diff = a->instrlen - a->replacementlen;
169 /* Pad the rest with nops */
170 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
174 memcpy(a->instr + i, noptable[k], k);
181 static void alternatives_smp_save(struct alt_instr *start, struct alt_instr *end)
185 DPRINTK("%s: alt table %p-%p\n", __FUNCTION__, start, end);
186 for (a = start; a < end; a++) {
187 memcpy(a->replacement + a->replacementlen,
193 static void alternatives_smp_apply(struct alt_instr *start, struct alt_instr *end)
197 for (a = start; a < end; a++) {
199 a->replacement + a->replacementlen,
204 static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
208 for (ptr = start; ptr < end; ptr++) {
213 **ptr = 0xf0; /* lock prefix */
217 static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
219 unsigned char **noptable = find_nop_table();
222 for (ptr = start; ptr < end; ptr++) {
227 **ptr = noptable[1][0];
231 struct smp_alt_module {
232 /* what is this ??? */
236 /* ptrs to lock prefixes */
240 /* .text segment, needed to avoid patching init code ;) */
244 struct list_head next;
246 static LIST_HEAD(smp_alt_modules);
247 static DEFINE_SPINLOCK(smp_alt);
249 void alternatives_smp_module_add(struct module *mod, char *name,
250 void *locks, void *locks_end,
251 void *text, void *text_end)
253 struct smp_alt_module *smp;
260 if (boot_cpu_has(X86_FEATURE_UP))
261 alternatives_smp_unlock(locks, locks_end,
266 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
268 return; /* we'll run the (safe but slow) SMP code then ... */
273 smp->locks_end = locks_end;
275 smp->text_end = text_end;
276 DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
277 __FUNCTION__, smp->locks, smp->locks_end,
278 smp->text, smp->text_end, smp->name);
280 spin_lock_irqsave(&smp_alt, flags);
281 list_add_tail(&smp->next, &smp_alt_modules);
282 if (boot_cpu_has(X86_FEATURE_UP))
283 alternatives_smp_unlock(smp->locks, smp->locks_end,
284 smp->text, smp->text_end);
285 spin_unlock_irqrestore(&smp_alt, flags);
288 void alternatives_smp_module_del(struct module *mod)
290 struct smp_alt_module *item;
293 if (no_replacement || smp_alt_once)
296 spin_lock_irqsave(&smp_alt, flags);
297 list_for_each_entry(item, &smp_alt_modules, next) {
298 if (mod != item->mod)
300 list_del(&item->next);
301 spin_unlock_irqrestore(&smp_alt, flags);
302 DPRINTK("%s: %s\n", __FUNCTION__, item->name);
306 spin_unlock_irqrestore(&smp_alt, flags);
309 void alternatives_smp_switch(int smp)
311 struct smp_alt_module *mod;
314 #ifdef CONFIG_LOCKDEP
316 * A not yet fixed binutils section handling bug prevents
317 * alternatives-replacement from working reliably, so turn
320 printk("lockdep: not fixing up alternatives.\n");
324 if (no_replacement || smp_alt_once)
326 BUG_ON(!smp && (num_online_cpus() > 1));
328 spin_lock_irqsave(&smp_alt, flags);
330 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
331 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
332 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
333 alternatives_smp_apply(__smp_alt_instructions,
334 __smp_alt_instructions_end);
335 list_for_each_entry(mod, &smp_alt_modules, next)
336 alternatives_smp_lock(mod->locks, mod->locks_end,
337 mod->text, mod->text_end);
339 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
340 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
341 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
342 apply_alternatives(__smp_alt_instructions,
343 __smp_alt_instructions_end);
344 list_for_each_entry(mod, &smp_alt_modules, next)
345 alternatives_smp_unlock(mod->locks, mod->locks_end,
346 mod->text, mod->text_end);
348 spin_unlock_irqrestore(&smp_alt, flags);
353 void __init alternative_instructions(void)
356 if (no_replacement) {
357 printk(KERN_INFO "(SMP-)alternatives turned off\n");
358 free_init_pages("SMP alternatives",
359 (unsigned long)__smp_alt_begin,
360 (unsigned long)__smp_alt_end);
364 local_irq_save(flags);
365 apply_alternatives(__alt_instructions, __alt_instructions_end);
367 /* switch to patch-once-at-boottime-only mode and free the
368 * tables in case we know the number of CPUs will never ever
370 #ifdef CONFIG_HOTPLUG_CPU
371 if (num_possible_cpus() < 2)
379 if (1 == num_possible_cpus()) {
380 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
381 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
382 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
383 apply_alternatives(__smp_alt_instructions,
384 __smp_alt_instructions_end);
385 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
388 free_init_pages("SMP alternatives",
389 (unsigned long)__smp_alt_begin,
390 (unsigned long)__smp_alt_end);
392 alternatives_smp_save(__smp_alt_instructions,
393 __smp_alt_instructions_end);
394 alternatives_smp_module_add(NULL, "core kernel",
395 __smp_locks, __smp_locks_end,
397 alternatives_smp_switch(0);
400 local_irq_restore(flags);