2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
29 #include <asm/hwrpb.h>
30 #include <asm/ptrace.h>
31 #include <asm/atomic.h>
35 #include <asm/bitops.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/hardirq.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
48 #define DBGS(args) printk args
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 /* A collection of single bit ipi messages. */
58 unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
61 enum ipi_message_type {
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata = 0;
70 /* Which cpus ids came online. */
71 cpumask_t cpu_present_mask;
72 cpumask_t cpu_online_map;
74 EXPORT_SYMBOL(cpu_online_map);
76 /* cpus reported in the hwrpb */
77 static unsigned long hwrpb_cpu_present_mask __initdata = 0;
79 int smp_num_probed; /* Internal processor count */
80 int smp_num_cpus = 1; /* Number that came online. */
81 cycles_t cacheflush_time;
82 unsigned long cache_decay_ticks;
84 extern void calibrate_delay(void);
89 * Called by both boot and secondaries to move global data into
90 * per-processor storage.
92 static inline void __init
93 smp_store_cpu_info(int cpuid)
95 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
96 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
97 cpu_data[cpuid].need_new_asn = 0;
98 cpu_data[cpuid].asn_lock = 0;
102 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
104 static inline void __init
105 smp_setup_percpu_timer(int cpuid)
107 cpu_data[cpuid].prof_counter = 1;
108 cpu_data[cpuid].prof_multiplier = 1;
112 wait_boot_cpu_to_stop(int cpuid)
114 unsigned long stop = jiffies + 10*HZ;
116 while (time_before(jiffies, stop)) {
117 if (!smp_secondary_alive)
122 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
128 * Where secondaries begin a life of C.
133 int cpuid = hard_smp_processor_id();
135 if (cpu_test_and_set(cpuid, cpu_online_map)) {
136 printk("??, cpu 0x%x already present??\n", cpuid);
140 /* Turn on machine checks. */
143 /* Set trap vectors. */
146 /* Set interrupt vector. */
149 /* Get our local ticker going. */
150 smp_setup_percpu_timer(cpuid);
152 /* Call platform-specific callin, if specified */
153 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
155 /* All kernel threads share the same mm context. */
156 atomic_inc(&init_mm.mm_count);
157 current->active_mm = &init_mm;
159 /* Must have completely accurate bogos. */
162 /* Wait boot CPU to stop with irq enabled before running
164 wait_boot_cpu_to_stop(cpuid);
168 smp_store_cpu_info(cpuid);
169 /* Allow master to continue only after we written loops_per_jiffy. */
171 smp_secondary_alive = 1;
173 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
174 cpuid, current, current->active_mm));
182 * Rough estimation for SMP scheduling, this is the number of cycles it
183 * takes for a fully memory-limited process to flush the SMP-local cache.
185 * We are not told how much cache there is, so we have to guess.
188 smp_tune_scheduling (int cpuid)
190 struct percpu_struct *cpu;
191 unsigned long on_chip_cache; /* kB */
192 unsigned long freq; /* Hz */
193 unsigned long bandwidth = 350; /* MB/s */
195 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset
196 + cpuid * hwrpb->processor_size);
200 on_chip_cache = 16 + 16;
205 on_chip_cache = 8 + 8 + 96;
209 on_chip_cache = 16 + 8;
215 on_chip_cache = 64 + 64;
219 freq = hwrpb->cycle_freq ? : est_cycle_freq;
221 cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
222 cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
224 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
225 cacheflush_time/(freq/1000000),
226 (cacheflush_time*100/(freq/1000000)) % 100);
227 printk("task migration cache decay timeout: %ld msecs.\n",
228 (cache_decay_ticks + 1) * 1000 / HZ);
231 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
233 wait_for_txrdy (unsigned long cpumask)
235 unsigned long timeout;
237 if (!(hwrpb->txrdy & cpumask))
240 timeout = jiffies + 10*HZ;
241 while (time_before(jiffies, timeout)) {
242 if (!(hwrpb->txrdy & cpumask))
252 * Send a message to a secondary's console. "START" is one such
253 * interesting message. ;-)
256 send_secondary_console_msg(char *str, int cpuid)
258 struct percpu_struct *cpu;
259 register char *cp1, *cp2;
260 unsigned long cpumask;
263 cpu = (struct percpu_struct *)
265 + hwrpb->processor_offset
266 + cpuid * hwrpb->processor_size);
268 cpumask = (1UL << cpuid);
269 if (wait_for_txrdy(cpumask))
274 *(unsigned int *)&cpu->ipc_buffer[0] = len;
275 cp1 = (char *) &cpu->ipc_buffer[1];
276 memcpy(cp1, cp2, len);
278 /* atomic test and set */
280 set_bit(cpuid, &hwrpb->rxrdy);
282 if (wait_for_txrdy(cpumask))
287 printk("Processor %x not ready\n", cpuid);
291 * A secondary console wants to send a message. Receive it.
294 recv_secondary_console_msg(void)
297 unsigned long txrdy = hwrpb->txrdy;
298 char *cp1, *cp2, buf[80];
299 struct percpu_struct *cpu;
301 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
303 mycpu = hard_smp_processor_id();
305 for (i = 0; i < NR_CPUS; i++) {
306 if (!(txrdy & (1UL << i)))
309 DBGS(("recv_secondary_console_msg: "
310 "TXRDY contains CPU %d.\n", i));
312 cpu = (struct percpu_struct *)
314 + hwrpb->processor_offset
315 + i * hwrpb->processor_size);
317 DBGS(("recv_secondary_console_msg: on %d from %d"
318 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
319 mycpu, i, cpu->halt_reason, cpu->flags));
321 cnt = cpu->ipc_buffer[0] >> 32;
322 if (cnt <= 0 || cnt >= 80)
323 strcpy(buf, "<<< BOGUS MSG >>>");
325 cp1 = (char *) &cpu->ipc_buffer[11];
329 while ((cp2 = strchr(cp2, '\r')) != 0) {
336 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
337 "message is '%s'\n", mycpu, buf));
344 * Convince the console to have a secondary cpu begin execution.
347 secondary_cpu_start(int cpuid, struct task_struct *idle)
349 struct percpu_struct *cpu;
350 struct pcb_struct *hwpcb, *ipcb;
351 unsigned long timeout;
353 cpu = (struct percpu_struct *)
355 + hwrpb->processor_offset
356 + cpuid * hwrpb->processor_size);
357 hwpcb = (struct pcb_struct *) cpu->hwpcb;
358 ipcb = &idle->thread_info->pcb;
360 /* Initialize the CPU's HWPCB to something just good enough for
361 us to get started. Immediately after starting, we'll swpctx
362 to the target idle task's pcb. Reuse the stack in the mean
363 time. Precalculate the target PCBB. */
364 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
366 hwpcb->ptbr = ipcb->ptbr;
369 hwpcb->unique = virt_to_phys(ipcb);
370 hwpcb->flags = ipcb->flags;
371 hwpcb->res1 = hwpcb->res2 = 0;
374 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
375 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
377 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
378 cpuid, idle->state, ipcb->flags));
380 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
381 hwrpb->CPU_restart = __smp_callin;
382 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
384 /* Recalculate and update the HWRPB checksum */
385 hwrpb_update_checksum(hwrpb);
388 * Send a "start" command to the specified processor.
391 /* SRM III 3.4.1.3 */
392 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
393 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
396 send_secondary_console_msg("START\r\n", cpuid);
398 /* Wait 10 seconds for an ACK from the console. */
399 timeout = jiffies + 10*HZ;
400 while (time_before(jiffies, timeout)) {
406 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
410 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
414 static struct task_struct * __init
417 /* Don't care about the contents of regs since we'll never
418 reschedule the forked task. */
420 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
424 * Bring one cpu online.
427 smp_boot_one_cpu(int cpuid)
429 struct task_struct *idle;
430 unsigned long timeout;
432 /* Cook up an idler for this guy. Note that the address we
433 give to kernel_thread is irrelevant -- it's going to start
434 where HWRPB.CPU_restart says to start. But this gets all
435 the other task-y sort of data structures set up like we
436 wish. We can't use kernel_thread since we must avoid
437 rescheduling the child. */
438 idle = fork_by_hand();
440 panic("failed fork for CPU %d", cpuid);
442 wake_up_forked_process(idle);
444 init_idle(idle, cpuid);
445 unhash_process(idle);
447 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
448 cpuid, idle->state, idle->flags));
450 /* Signal the secondary to wait a moment. */
451 smp_secondary_alive = -1;
453 /* Whirrr, whirrr, whirrrrrrrrr... */
454 if (secondary_cpu_start(cpuid, idle))
457 /* Notify the secondary CPU it can run calibrate_delay. */
459 smp_secondary_alive = 0;
461 /* We've been acked by the console; wait one second for
462 the task to start up for real. */
463 timeout = jiffies + 1*HZ;
464 while (time_before(jiffies, timeout)) {
465 if (smp_secondary_alive == 1)
471 /* We failed to boot the CPU. */
473 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
477 /* Another "Red Snapper". */
482 * Called from setup_arch. Detect an SMP system and which processors
488 struct percpu_struct *cpubase, *cpu;
491 if (boot_cpuid != 0) {
492 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
496 if (hwrpb->nr_processors > 1) {
499 DBGS(("setup_smp: nr_processors %ld\n",
500 hwrpb->nr_processors));
502 cpubase = (struct percpu_struct *)
503 ((char*)hwrpb + hwrpb->processor_offset);
504 boot_cpu_palrev = cpubase->pal_revision;
506 for (i = 0; i < hwrpb->nr_processors; i++) {
507 cpu = (struct percpu_struct *)
508 ((char *)cpubase + i*hwrpb->processor_size);
509 if ((cpu->flags & 0x1cc) == 0x1cc) {
511 /* Assume here that "whami" == index */
512 hwrpb_cpu_present_mask |= (1UL << i);
513 cpu->pal_revision = boot_cpu_palrev;
516 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
517 i, cpu->flags, cpu->type));
518 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
519 i, cpu->pal_revision));
523 hwrpb_cpu_present_mask = (1UL << boot_cpuid);
525 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
527 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
528 smp_num_probed, hwrpb_cpu_present_mask);
532 * Called by smp_init prepare the secondaries
535 smp_prepare_cpus(unsigned int max_cpus)
539 /* Take care of some initial bookkeeping. */
540 memset(ipi_data, 0, sizeof(ipi_data));
542 current_thread_info()->cpu = boot_cpuid;
544 smp_store_cpu_info(boot_cpuid);
545 smp_tune_scheduling(boot_cpuid);
546 smp_setup_percpu_timer(boot_cpuid);
548 /* Nothing to do on a UP box, or when told not to. */
549 if (smp_num_probed == 1 || max_cpus == 0) {
550 cpu_present_mask = cpumask_of_cpu(boot_cpuid);
551 printk(KERN_INFO "SMP mode deactivated.\n");
555 printk(KERN_INFO "SMP starting up secondaries.\n");
558 for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) {
562 if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
565 cpu_set(i, cpu_possible_map);
569 smp_num_cpus = cpu_count;
573 smp_prepare_boot_cpu(void)
576 * Mark the boot cpu (current cpu) as both present and online
578 cpu_set(smp_processor_id(), cpu_present_mask);
579 cpu_set(smp_processor_id(), cpu_online_map);
583 __cpu_up(unsigned int cpu)
585 smp_boot_one_cpu(cpu);
587 return cpu_online(cpu) ? 0 : -ENOSYS;
591 smp_cpus_done(unsigned int max_cpus)
594 unsigned long bogosum = 0;
596 for(cpu = 0; cpu < NR_CPUS; cpu++)
598 bogosum += cpu_data[cpu].loops_per_jiffy;
600 printk(KERN_INFO "SMP: Total of %d processors activated "
601 "(%lu.%02lu BogoMIPS).\n",
603 (bogosum + 2500) / (500000/HZ),
604 ((bogosum + 2500) / (5000/HZ)) % 100);
609 smp_percpu_timer_interrupt(struct pt_regs *regs)
611 int cpu = smp_processor_id();
612 unsigned long user = user_mode(regs);
613 struct cpuinfo_alpha *data = &cpu_data[cpu];
615 /* Record kernel PC. */
617 alpha_do_profile(regs->pc);
619 if (!--data->prof_counter) {
620 /* We need to make like a normal interrupt -- otherwise
621 timer interrupts ignore the global interrupt lock,
622 which would be a Bad Thing. */
625 update_process_times(user);
627 data->prof_counter = data->prof_multiplier;
634 setup_profiling_timer(unsigned int multiplier)
641 send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation)
646 for_each_cpu_mask(i, to_whom)
647 set_bit(operation, &ipi_data[i].bits);
650 for_each_cpu_mask(i, to_whom)
654 /* Structure and data for smp_call_function. This is designed to
655 minimize static memory requirements. Plus it looks cleaner. */
657 struct smp_call_struct {
658 void (*func) (void *info);
661 atomic_t unstarted_count;
662 atomic_t unfinished_count;
665 static struct smp_call_struct *smp_call_function_data;
667 /* Atomicly drop data into a shared pointer. The pointer is free if
668 it is initially locked. If retry, spin until free. */
671 pointer_lock (void *lock, void *data, int retry)
677 /* Compare and swap with zero. */
685 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
694 while (*(void **)lock)
700 handle_ipi(struct pt_regs *regs)
702 int this_cpu = smp_processor_id();
703 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
707 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
708 this_cpu, *pending_ipis, regs->pc));
711 mb(); /* Order interrupt and bit testing. */
712 while ((ops = xchg(pending_ipis, 0)) != 0) {
713 mb(); /* Order bit clearing and data access. */
719 which = __ffs(which);
723 /* Reschedule callback. Everything to be done
724 is done by the interrupt return path. */
729 struct smp_call_struct *data;
730 void (*func)(void *info);
734 data = smp_call_function_data;
739 /* Notify the sending CPU that the data has been
740 received, and execution is about to begin. */
742 atomic_dec (&data->unstarted_count);
744 /* At this point the structure may be gone unless
748 /* Notify the sending CPU that the task is done. */
750 if (wait) atomic_dec (&data->unfinished_count);
758 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
764 mb(); /* Order data access and bit testing. */
767 cpu_data[this_cpu].ipi_count++;
770 recv_secondary_console_msg();
774 smp_send_reschedule(int cpu)
777 if (cpu == hard_smp_processor_id())
779 "smp_send_reschedule: Sending IPI to self.\n");
781 send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE);
787 cpumask_t to_whom = cpu_possible_map;
788 cpu_clear(smp_processor_id(), to_whom);
790 if (hard_smp_processor_id() != boot_cpu_id)
791 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
793 send_ipi_message(to_whom, IPI_CPU_STOP);
797 * Run a function on all other CPUs.
798 * <func> The function to run. This must be fast and non-blocking.
799 * <info> An arbitrary pointer to pass to the function.
800 * <retry> If true, keep retrying until ready.
801 * <wait> If true, wait until function has completed on other CPUs.
802 * [RETURNS] 0 on success, else a negative status code.
804 * Does not return until remote CPUs are nearly ready to execute <func>
805 * or are or have executed.
806 * You must not call this function with disabled interrupts or from a
807 * hardware interrupt handler or from a bottom half handler.
811 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
812 int wait, cpumask_t to_whom)
814 struct smp_call_struct data;
815 unsigned long timeout;
816 int num_cpus_to_call;
818 /* Can deadlock when called with interrupts disabled */
819 WARN_ON(irqs_disabled());
825 cpu_clear(smp_processor_id(), to_whom);
826 num_cpus_to_call = cpus_weight(to_whom);
828 atomic_set(&data.unstarted_count, num_cpus_to_call);
829 atomic_set(&data.unfinished_count, num_cpus_to_call);
831 /* Acquire the smp_call_function_data mutex. */
832 if (pointer_lock(&smp_call_function_data, &data, retry))
835 /* Send a message to the requested CPUs. */
836 send_ipi_message(to_whom, IPI_CALL_FUNC);
838 /* Wait for a minimal response. */
839 timeout = jiffies + HZ;
840 while (atomic_read (&data.unstarted_count) > 0
841 && time_before (jiffies, timeout))
844 /* If there's no response yet, log a message but allow a longer
845 * timeout period -- if we get a response this time, log
846 * a message saying when we got it..
848 if (atomic_read(&data.unstarted_count) > 0) {
849 long start_time = jiffies;
850 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
852 timeout = jiffies + 30 * HZ;
853 while (atomic_read(&data.unstarted_count) > 0
854 && time_before(jiffies, timeout))
856 if (atomic_read(&data.unstarted_count) <= 0) {
857 long delta = jiffies - start_time;
859 "%s: response %ld.%ld seconds into long wait\n",
860 __FUNCTION__, delta / HZ,
861 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
865 /* We either got one or timed out -- clear the lock. */
867 smp_call_function_data = NULL;
870 * If after both the initial and long timeout periods we still don't
871 * have a response, something is very wrong...
873 BUG_ON(atomic_read (&data.unstarted_count) > 0);
875 /* Wait for a complete response, if needed. */
877 while (atomic_read (&data.unfinished_count) > 0)
885 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
887 return smp_call_function_on_cpu (func, info, retry, wait,
892 ipi_imb(void *ignored)
900 /* Must wait other processors to flush their icache before continue. */
901 if (on_each_cpu(ipi_imb, NULL, 1, 1))
902 printk(KERN_CRIT "smp_imb: timed out\n");
906 ipi_flush_tlb_all(void *ignored)
914 /* Although we don't have any data to pass, we do want to
915 synchronize with the other processors. */
916 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
917 printk(KERN_CRIT "flush_tlb_all: timed out\n");
921 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
924 ipi_flush_tlb_mm(void *x)
926 struct mm_struct *mm = (struct mm_struct *) x;
927 if (mm == current->active_mm && !asn_locked())
928 flush_tlb_current(mm);
934 flush_tlb_mm(struct mm_struct *mm)
938 if (mm == current->active_mm) {
939 flush_tlb_current(mm);
940 if (atomic_read(&mm->mm_users) <= 1) {
941 int cpu, this_cpu = smp_processor_id();
942 for (cpu = 0; cpu < NR_CPUS; cpu++) {
943 if (!cpu_online(cpu) || cpu == this_cpu)
945 if (mm->context[cpu])
946 mm->context[cpu] = 0;
953 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
954 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
960 struct flush_tlb_page_struct {
961 struct vm_area_struct *vma;
962 struct mm_struct *mm;
967 ipi_flush_tlb_page(void *x)
969 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
970 struct mm_struct * mm = data->mm;
972 if (mm == current->active_mm && !asn_locked())
973 flush_tlb_current_page(mm, data->vma, data->addr);
979 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
981 struct flush_tlb_page_struct data;
982 struct mm_struct *mm = vma->vm_mm;
986 if (mm == current->active_mm) {
987 flush_tlb_current_page(mm, vma, addr);
988 if (atomic_read(&mm->mm_users) <= 1) {
989 int cpu, this_cpu = smp_processor_id();
990 for (cpu = 0; cpu < NR_CPUS; cpu++) {
991 if (!cpu_online(cpu) || cpu == this_cpu)
993 if (mm->context[cpu])
994 mm->context[cpu] = 0;
1005 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
1006 printk(KERN_CRIT "flush_tlb_page: timed out\n");
1013 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1015 /* On the Alpha we always flush the whole user tlb. */
1016 flush_tlb_mm(vma->vm_mm);
1020 ipi_flush_icache_page(void *x)
1022 struct mm_struct *mm = (struct mm_struct *) x;
1023 if (mm == current->active_mm && !asn_locked())
1024 __load_new_mm_context(mm);
1026 flush_tlb_other(mm);
1030 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
1031 unsigned long addr, int len)
1033 struct mm_struct *mm = vma->vm_mm;
1035 if ((vma->vm_flags & VM_EXEC) == 0)
1040 if (mm == current->active_mm) {
1041 __load_new_mm_context(mm);
1042 if (atomic_read(&mm->mm_users) <= 1) {
1043 int cpu, this_cpu = smp_processor_id();
1044 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1045 if (!cpu_online(cpu) || cpu == this_cpu)
1047 if (mm->context[cpu])
1048 mm->context[cpu] = 0;
1055 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
1056 printk(KERN_CRIT "flush_icache_page: timed out\n");
1062 #ifdef CONFIG_DEBUG_SPINLOCK
1064 _raw_spin_unlock(spinlock_t * lock)
1070 lock->previous = NULL;
1072 lock->base_file = "none";
1077 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
1081 void *inline_pc = __builtin_return_address(0);
1082 unsigned long started = jiffies;
1084 int cpu = smp_processor_id();
1089 /* Use sub-sections to put the actual loop at the end
1090 of this object file's text section so as to perfect
1091 branch prediction. */
1092 __asm__ __volatile__(
1107 : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
1108 : "1" (lock->lock), "2" (stuck) : "memory");
1112 "%s:%d spinlock stuck in %s at %p(%d)"
1113 " owner %s at %p(%d) %s:%d\n",
1115 current->comm, inline_pc, cpu,
1116 lock->task->comm, lock->previous,
1117 lock->on_cpu, lock->base_file, lock->line_no);
1123 /* Exiting. Got the lock. */
1125 lock->previous = inline_pc;
1126 lock->task = current;
1127 lock->base_file = base_file;
1128 lock->line_no = line_no;
1132 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1133 base_file, line_no, current->comm, inline_pc,
1134 cpu, jiffies - started);
1139 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1142 if ((ret = !test_and_set_bit(0, lock))) {
1143 lock->on_cpu = smp_processor_id();
1144 lock->previous = __builtin_return_address(0);
1145 lock->task = current;
1147 lock->base_file = base_file;
1148 lock->line_no = line_no;
1152 #endif /* CONFIG_DEBUG_SPINLOCK */
1154 #ifdef CONFIG_DEBUG_RWLOCK
1155 void _raw_write_lock(rwlock_t * lock)
1158 int stuck_lock, stuck_reader;
1159 void *inline_pc = __builtin_return_address(0);
1164 stuck_reader = 1<<30;
1166 __asm__ __volatile__(
1175 "6: blt %3,4b # debug\n"
1176 " subl %3,1,%3 # debug\n"
1179 "8: blt %4,4b # debug\n"
1180 " subl %4,1,%4 # debug\n"
1185 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
1186 "=&r" (stuck_lock), "=&r" (stuck_reader)
1187 : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
1189 if (stuck_lock < 0) {
1190 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1193 if (stuck_reader < 0) {
1194 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1200 void _raw_read_lock(rwlock_t * lock)
1204 void *inline_pc = __builtin_return_address(0);
1210 __asm__ __volatile__(
1219 " blt %2,4b # debug\n"
1220 " subl %2,1,%2 # debug\n"
1224 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
1225 : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
1227 if (stuck_lock < 0) {
1228 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1232 #endif /* CONFIG_DEBUG_RWLOCK */