2 * linux/arch/alpha/kernel/smp.c
4 * 2001-07-09 Phil Ezolt (Phillip.Ezolt@compaq.com)
5 * Renamed modified smp_call_function to smp_call_function_on_cpu()
6 * Created an function that conforms to the old calling convention
7 * of smp_call_function().
9 * This is helpful for DCPI.
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
19 #include <linux/threads.h>
20 #include <linux/smp.h>
21 #include <linux/smp_lock.h>
22 #include <linux/interrupt.h>
23 #include <linux/init.h>
24 #include <linux/delay.h>
25 #include <linux/spinlock.h>
26 #include <linux/irq.h>
27 #include <linux/cache.h>
29 #include <asm/hwrpb.h>
30 #include <asm/ptrace.h>
31 #include <asm/atomic.h>
35 #include <asm/bitops.h>
36 #include <asm/pgtable.h>
37 #include <asm/pgalloc.h>
38 #include <asm/hardirq.h>
39 #include <asm/mmu_context.h>
40 #include <asm/tlbflush.h>
48 #define DBGS(args) printk args
53 /* A collection of per-processor data. */
54 struct cpuinfo_alpha cpu_data[NR_CPUS];
56 /* A collection of single bit ipi messages. */
58 unsigned long bits ____cacheline_aligned;
59 } ipi_data[NR_CPUS] __cacheline_aligned;
61 enum ipi_message_type {
67 /* Set to a secondary's cpuid when it comes online. */
68 static int smp_secondary_alive __initdata = 0;
70 /* Which cpus ids came online. */
71 unsigned long cpu_present_mask;
72 cpumask_t cpu_online_map;
74 EXPORT_SYMBOL(cpu_online_map);
76 /* cpus reported in the hwrpb */
77 static unsigned long hwrpb_cpu_present_mask __initdata = 0;
79 int smp_num_probed; /* Internal processor count */
80 int smp_num_cpus = 1; /* Number that came online. */
81 cycles_t cacheflush_time;
82 unsigned long cache_decay_ticks;
84 extern void calibrate_delay(void);
89 * Called by both boot and secondaries to move global data into
90 * per-processor storage.
92 static inline void __init
93 smp_store_cpu_info(int cpuid)
95 cpu_data[cpuid].loops_per_jiffy = loops_per_jiffy;
96 cpu_data[cpuid].last_asn = ASN_FIRST_VERSION;
97 cpu_data[cpuid].need_new_asn = 0;
98 cpu_data[cpuid].asn_lock = 0;
102 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
104 static inline void __init
105 smp_setup_percpu_timer(int cpuid)
107 cpu_data[cpuid].prof_counter = 1;
108 cpu_data[cpuid].prof_multiplier = 1;
112 wait_boot_cpu_to_stop(int cpuid)
114 unsigned long stop = jiffies + 10*HZ;
116 while (time_before(jiffies, stop)) {
117 if (!smp_secondary_alive)
122 printk("wait_boot_cpu_to_stop: FAILED on CPU %d, hanging now\n", cpuid);
128 * Where secondaries begin a life of C.
133 int cpuid = hard_smp_processor_id();
135 if (cpu_test_and_set(cpuid, cpu_online_map)) {
136 printk("??, cpu 0x%x already present??\n", cpuid);
140 /* Turn on machine checks. */
143 /* Set trap vectors. */
146 /* Set interrupt vector. */
149 /* Get our local ticker going. */
150 smp_setup_percpu_timer(cpuid);
152 /* Call platform-specific callin, if specified */
153 if (alpha_mv.smp_callin) alpha_mv.smp_callin();
155 /* All kernel threads share the same mm context. */
156 atomic_inc(&init_mm.mm_count);
157 current->active_mm = &init_mm;
159 /* Must have completely accurate bogos. */
162 /* Wait boot CPU to stop with irq enabled before running
164 wait_boot_cpu_to_stop(cpuid);
168 smp_store_cpu_info(cpuid);
169 /* Allow master to continue only after we written loops_per_jiffy. */
171 smp_secondary_alive = 1;
173 DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n",
174 cpuid, current, current->active_mm));
182 * Rough estimation for SMP scheduling, this is the number of cycles it
183 * takes for a fully memory-limited process to flush the SMP-local cache.
185 * We are not told how much cache there is, so we have to guess.
188 smp_tune_scheduling (int cpuid)
190 struct percpu_struct *cpu;
191 unsigned long on_chip_cache; /* kB */
192 unsigned long freq; /* Hz */
193 unsigned long bandwidth = 350; /* MB/s */
195 cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset
196 + cpuid * hwrpb->processor_size);
200 on_chip_cache = 16 + 16;
205 on_chip_cache = 8 + 8 + 96;
209 on_chip_cache = 16 + 8;
215 on_chip_cache = 64 + 64;
219 freq = hwrpb->cycle_freq ? : est_cycle_freq;
221 cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
222 cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
224 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
225 cacheflush_time/(freq/1000000),
226 (cacheflush_time*100/(freq/1000000)) % 100);
227 printk("task migration cache decay timeout: %ld msecs.\n",
228 (cache_decay_ticks + 1) * 1000 / HZ);
231 /* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
233 wait_for_txrdy (unsigned long cpumask)
235 unsigned long timeout;
237 if (!(hwrpb->txrdy & cpumask))
240 timeout = jiffies + 10*HZ;
241 while (time_before(jiffies, timeout)) {
242 if (!(hwrpb->txrdy & cpumask))
252 * Send a message to a secondary's console. "START" is one such
253 * interesting message. ;-)
256 send_secondary_console_msg(char *str, int cpuid)
258 struct percpu_struct *cpu;
259 register char *cp1, *cp2;
260 unsigned long cpumask;
263 cpu = (struct percpu_struct *)
265 + hwrpb->processor_offset
266 + cpuid * hwrpb->processor_size);
268 cpumask = (1UL << cpuid);
269 if (wait_for_txrdy(cpumask))
274 *(unsigned int *)&cpu->ipc_buffer[0] = len;
275 cp1 = (char *) &cpu->ipc_buffer[1];
276 memcpy(cp1, cp2, len);
278 /* atomic test and set */
280 set_bit(cpuid, &hwrpb->rxrdy);
282 if (wait_for_txrdy(cpumask))
287 printk("Processor %x not ready\n", cpuid);
291 * A secondary console wants to send a message. Receive it.
294 recv_secondary_console_msg(void)
297 unsigned long txrdy = hwrpb->txrdy;
298 char *cp1, *cp2, buf[80];
299 struct percpu_struct *cpu;
301 DBGS(("recv_secondary_console_msg: TXRDY 0x%lx.\n", txrdy));
303 mycpu = hard_smp_processor_id();
305 for (i = 0; i < NR_CPUS; i++) {
306 if (!(txrdy & (1UL << i)))
309 DBGS(("recv_secondary_console_msg: "
310 "TXRDY contains CPU %d.\n", i));
312 cpu = (struct percpu_struct *)
314 + hwrpb->processor_offset
315 + i * hwrpb->processor_size);
317 DBGS(("recv_secondary_console_msg: on %d from %d"
318 " HALT_REASON 0x%lx FLAGS 0x%lx\n",
319 mycpu, i, cpu->halt_reason, cpu->flags));
321 cnt = cpu->ipc_buffer[0] >> 32;
322 if (cnt <= 0 || cnt >= 80)
323 strcpy(buf, "<<< BOGUS MSG >>>");
325 cp1 = (char *) &cpu->ipc_buffer[11];
329 while ((cp2 = strchr(cp2, '\r')) != 0) {
336 DBGS((KERN_INFO "recv_secondary_console_msg: on %d "
337 "message is '%s'\n", mycpu, buf));
344 * Convince the console to have a secondary cpu begin execution.
347 secondary_cpu_start(int cpuid, struct task_struct *idle)
349 struct percpu_struct *cpu;
350 struct pcb_struct *hwpcb, *ipcb;
351 unsigned long timeout;
353 cpu = (struct percpu_struct *)
355 + hwrpb->processor_offset
356 + cpuid * hwrpb->processor_size);
357 hwpcb = (struct pcb_struct *) cpu->hwpcb;
358 ipcb = &idle->thread_info->pcb;
360 /* Initialize the CPU's HWPCB to something just good enough for
361 us to get started. Immediately after starting, we'll swpctx
362 to the target idle task's pcb. Reuse the stack in the mean
363 time. Precalculate the target PCBB. */
364 hwpcb->ksp = (unsigned long)ipcb + sizeof(union thread_union) - 16;
366 hwpcb->ptbr = ipcb->ptbr;
369 hwpcb->unique = virt_to_phys(ipcb);
370 hwpcb->flags = ipcb->flags;
371 hwpcb->res1 = hwpcb->res2 = 0;
374 DBGS(("KSP 0x%lx PTBR 0x%lx VPTBR 0x%lx UNIQUE 0x%lx\n",
375 hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
377 DBGS(("Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx\n",
378 cpuid, idle->state, ipcb->flags));
380 /* Setup HWRPB fields that SRM uses to activate secondary CPU */
381 hwrpb->CPU_restart = __smp_callin;
382 hwrpb->CPU_restart_data = (unsigned long) __smp_callin;
384 /* Recalculate and update the HWRPB checksum */
385 hwrpb_update_checksum(hwrpb);
388 * Send a "start" command to the specified processor.
391 /* SRM III 3.4.1.3 */
392 cpu->flags |= 0x22; /* turn on Context Valid and Restart Capable */
393 cpu->flags &= ~1; /* turn off Bootstrap In Progress */
396 send_secondary_console_msg("START\r\n", cpuid);
398 /* Wait 10 seconds for an ACK from the console. */
399 timeout = jiffies + 10*HZ;
400 while (time_before(jiffies, timeout)) {
406 printk(KERN_ERR "SMP: Processor %d failed to start.\n", cpuid);
410 DBGS(("secondary_cpu_start: SUCCESS for CPU %d!!!\n", cpuid));
414 static struct task_struct * __init
417 /* Don't care about the contents of regs since we'll never
418 reschedule the forked task. */
420 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
424 * Bring one cpu online.
427 smp_boot_one_cpu(int cpuid)
429 struct task_struct *idle;
430 unsigned long timeout;
432 /* Cook up an idler for this guy. Note that the address we
433 give to kernel_thread is irrelevant -- it's going to start
434 where HWRPB.CPU_restart says to start. But this gets all
435 the other task-y sort of data structures set up like we
436 wish. We can't use kernel_thread since we must avoid
437 rescheduling the child. */
438 idle = fork_by_hand();
440 panic("failed fork for CPU %d", cpuid);
442 wake_up_forked_process(idle);
444 init_idle(idle, cpuid);
445 unhash_process(idle);
447 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
448 cpuid, idle->state, idle->flags));
450 /* Signal the secondary to wait a moment. */
451 smp_secondary_alive = -1;
453 /* Whirrr, whirrr, whirrrrrrrrr... */
454 if (secondary_cpu_start(cpuid, idle))
457 /* Notify the secondary CPU it can run calibrate_delay. */
459 smp_secondary_alive = 0;
461 /* We've been acked by the console; wait one second for
462 the task to start up for real. */
463 timeout = jiffies + 1*HZ;
464 while (time_before(jiffies, timeout)) {
465 if (smp_secondary_alive == 1)
471 /* We failed to boot the CPU. */
473 printk(KERN_ERR "SMP: Processor %d is stuck.\n", cpuid);
477 /* Another "Red Snapper". */
482 * Called from setup_arch. Detect an SMP system and which processors
488 struct percpu_struct *cpubase, *cpu;
491 if (boot_cpuid != 0) {
492 printk(KERN_WARNING "SMP: Booting off cpu %d instead of 0?\n",
496 if (hwrpb->nr_processors > 1) {
499 DBGS(("setup_smp: nr_processors %ld\n",
500 hwrpb->nr_processors));
502 cpubase = (struct percpu_struct *)
503 ((char*)hwrpb + hwrpb->processor_offset);
504 boot_cpu_palrev = cpubase->pal_revision;
506 for (i = 0; i < hwrpb->nr_processors; i++) {
507 cpu = (struct percpu_struct *)
508 ((char *)cpubase + i*hwrpb->processor_size);
509 if ((cpu->flags & 0x1cc) == 0x1cc) {
511 /* Assume here that "whami" == index */
512 hwrpb_cpu_present_mask |= (1UL << i);
513 cpu->pal_revision = boot_cpu_palrev;
516 DBGS(("setup_smp: CPU %d: flags 0x%lx type 0x%lx\n",
517 i, cpu->flags, cpu->type));
518 DBGS(("setup_smp: CPU %d: PAL rev 0x%lx\n",
519 i, cpu->pal_revision));
523 hwrpb_cpu_present_mask = (1UL << boot_cpuid);
525 cpu_present_mask = 1UL << boot_cpuid;
527 printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
528 smp_num_probed, hwrpb_cpu_present_mask);
532 * Called by smp_init prepare the secondaries
535 smp_prepare_cpus(unsigned int max_cpus)
539 /* Take care of some initial bookkeeping. */
540 memset(ipi_data, 0, sizeof(ipi_data));
542 current_thread_info()->cpu = boot_cpuid;
544 smp_store_cpu_info(boot_cpuid);
545 smp_tune_scheduling(boot_cpuid);
546 smp_setup_percpu_timer(boot_cpuid);
548 /* Nothing to do on a UP box, or when told not to. */
549 if (smp_num_probed == 1 || max_cpus == 0) {
550 cpu_present_mask = 1UL << boot_cpuid;
551 printk(KERN_INFO "SMP mode deactivated.\n");
555 printk(KERN_INFO "SMP starting up secondaries.\n");
558 for (i = 0; (i < NR_CPUS) && (cpu_count < max_cpus); i++) {
562 if (((hwrpb_cpu_present_mask >> i) & 1) == 0)
565 cpu_present_mask |= 1UL << i;
569 smp_num_cpus = cpu_count;
573 smp_prepare_boot_cpu(void)
576 * Mark the boot cpu (current cpu) as both present and online
578 cpu_set(smp_processor_id(), cpu_present_mask);
579 cpu_set(smp_processor_id(), cpu_online_map);
583 __cpu_up(unsigned int cpu)
585 smp_boot_one_cpu(cpu);
587 return cpu_online(cpu) ? 0 : -ENOSYS;
591 smp_cpus_done(unsigned int max_cpus)
594 unsigned long bogosum = 0;
596 for(cpu = 0; cpu < NR_CPUS; cpu++)
598 bogosum += cpu_data[cpu].loops_per_jiffy;
600 printk(KERN_INFO "SMP: Total of %ld processors activated "
601 "(%lu.%02lu BogoMIPS).\n",
603 (bogosum + 2500) / (500000/HZ),
604 ((bogosum + 2500) / (5000/HZ)) % 100);
609 smp_percpu_timer_interrupt(struct pt_regs *regs)
611 int cpu = smp_processor_id();
612 unsigned long user = user_mode(regs);
613 struct cpuinfo_alpha *data = &cpu_data[cpu];
615 /* Record kernel PC. */
617 alpha_do_profile(regs->pc);
619 if (!--data->prof_counter) {
620 /* We need to make like a normal interrupt -- otherwise
621 timer interrupts ignore the global interrupt lock,
622 which would be a Bad Thing. */
625 update_process_times(user);
627 data->prof_counter = data->prof_multiplier;
634 setup_profiling_timer(unsigned int multiplier)
641 send_ipi_message(unsigned long to_whom, enum ipi_message_type operation)
643 unsigned long i, set, n;
646 for (i = to_whom; i ; i &= ~set) {
649 set_bit(operation, &ipi_data[n].bits);
653 for (i = to_whom; i ; i &= ~set) {
660 /* Structure and data for smp_call_function. This is designed to
661 minimize static memory requirements. Plus it looks cleaner. */
663 struct smp_call_struct {
664 void (*func) (void *info);
667 atomic_t unstarted_count;
668 atomic_t unfinished_count;
671 static struct smp_call_struct *smp_call_function_data;
673 /* Atomicly drop data into a shared pointer. The pointer is free if
674 it is initially locked. If retry, spin until free. */
677 pointer_lock (void *lock, void *data, int retry)
683 /* Compare and swap with zero. */
691 : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp)
700 while (*(void **)lock)
706 handle_ipi(struct pt_regs *regs)
708 int this_cpu = smp_processor_id();
709 unsigned long *pending_ipis = &ipi_data[this_cpu].bits;
713 DBGS(("handle_ipi: on CPU %d ops 0x%lx PC 0x%lx\n",
714 this_cpu, *pending_ipis, regs->pc));
717 mb(); /* Order interrupt and bit testing. */
718 while ((ops = xchg(pending_ipis, 0)) != 0) {
719 mb(); /* Order bit clearing and data access. */
725 which = __ffs(which);
729 /* Reschedule callback. Everything to be done
730 is done by the interrupt return path. */
735 struct smp_call_struct *data;
736 void (*func)(void *info);
740 data = smp_call_function_data;
745 /* Notify the sending CPU that the data has been
746 received, and execution is about to begin. */
748 atomic_dec (&data->unstarted_count);
750 /* At this point the structure may be gone unless
754 /* Notify the sending CPU that the task is done. */
756 if (wait) atomic_dec (&data->unfinished_count);
764 printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n",
770 mb(); /* Order data access and bit testing. */
773 cpu_data[this_cpu].ipi_count++;
776 recv_secondary_console_msg();
780 smp_send_reschedule(int cpu)
783 if (cpu == hard_smp_processor_id())
785 "smp_send_reschedule: Sending IPI to self.\n");
787 send_ipi_message(1UL << cpu, IPI_RESCHEDULE);
793 unsigned long to_whom = cpu_present_mask & ~(1UL << smp_processor_id());
795 if (hard_smp_processor_id() != boot_cpu_id)
796 printk(KERN_WARNING "smp_send_stop: Not on boot cpu.\n");
798 send_ipi_message(to_whom, IPI_CPU_STOP);
802 * Run a function on all other CPUs.
803 * <func> The function to run. This must be fast and non-blocking.
804 * <info> An arbitrary pointer to pass to the function.
805 * <retry> If true, keep retrying until ready.
806 * <wait> If true, wait until function has completed on other CPUs.
807 * [RETURNS] 0 on success, else a negative status code.
809 * Does not return until remote CPUs are nearly ready to execute <func>
810 * or are or have executed.
811 * You must not call this function with disabled interrupts or from a
812 * hardware interrupt handler or from a bottom half handler.
816 smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
817 int wait, unsigned long to_whom)
819 struct smp_call_struct data;
820 unsigned long timeout;
821 int num_cpus_to_call;
823 /* Can deadlock when called with interrupts disabled */
824 WARN_ON(irqs_disabled());
830 to_whom &= ~(1L << smp_processor_id());
831 num_cpus_to_call = hweight64(to_whom);
833 atomic_set(&data.unstarted_count, num_cpus_to_call);
834 atomic_set(&data.unfinished_count, num_cpus_to_call);
836 /* Acquire the smp_call_function_data mutex. */
837 if (pointer_lock(&smp_call_function_data, &data, retry))
840 /* Send a message to the requested CPUs. */
841 send_ipi_message(to_whom, IPI_CALL_FUNC);
843 /* Wait for a minimal response. */
844 timeout = jiffies + HZ;
845 while (atomic_read (&data.unstarted_count) > 0
846 && time_before (jiffies, timeout))
849 /* If there's no response yet, log a message but allow a longer
850 * timeout period -- if we get a response this time, log
851 * a message saying when we got it..
853 if (atomic_read(&data.unstarted_count) > 0) {
854 long start_time = jiffies;
855 printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
857 timeout = jiffies + 30 * HZ;
858 while (atomic_read(&data.unstarted_count) > 0
859 && time_before(jiffies, timeout))
861 if (atomic_read(&data.unstarted_count) <= 0) {
862 long delta = jiffies - start_time;
864 "%s: response %ld.%ld seconds into long wait\n",
865 __FUNCTION__, delta / HZ,
866 (100 * (delta - ((delta / HZ) * HZ))) / HZ);
870 /* We either got one or timed out -- clear the lock. */
872 smp_call_function_data = 0;
875 * If after both the initial and long timeout periods we still don't
876 * have a response, something is very wrong...
878 BUG_ON(atomic_read (&data.unstarted_count) > 0);
880 /* Wait for a complete response, if needed. */
882 while (atomic_read (&data.unfinished_count) > 0)
890 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
892 return smp_call_function_on_cpu (func, info, retry, wait,
897 ipi_imb(void *ignored)
905 /* Must wait other processors to flush their icache before continue. */
906 if (on_each_cpu(ipi_imb, NULL, 1, 1))
907 printk(KERN_CRIT "smp_imb: timed out\n");
911 ipi_flush_tlb_all(void *ignored)
919 /* Although we don't have any data to pass, we do want to
920 synchronize with the other processors. */
921 if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) {
922 printk(KERN_CRIT "flush_tlb_all: timed out\n");
926 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
929 ipi_flush_tlb_mm(void *x)
931 struct mm_struct *mm = (struct mm_struct *) x;
932 if (mm == current->active_mm && !asn_locked())
933 flush_tlb_current(mm);
939 flush_tlb_mm(struct mm_struct *mm)
943 if (mm == current->active_mm) {
944 flush_tlb_current(mm);
945 if (atomic_read(&mm->mm_users) <= 1) {
946 int cpu, this_cpu = smp_processor_id();
947 for (cpu = 0; cpu < NR_CPUS; cpu++) {
948 if (!cpu_online(cpu) || cpu == this_cpu)
950 if (mm->context[cpu])
951 mm->context[cpu] = 0;
958 if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) {
959 printk(KERN_CRIT "flush_tlb_mm: timed out\n");
965 struct flush_tlb_page_struct {
966 struct vm_area_struct *vma;
967 struct mm_struct *mm;
972 ipi_flush_tlb_page(void *x)
974 struct flush_tlb_page_struct *data = (struct flush_tlb_page_struct *)x;
975 struct mm_struct * mm = data->mm;
977 if (mm == current->active_mm && !asn_locked())
978 flush_tlb_current_page(mm, data->vma, data->addr);
984 flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
986 struct flush_tlb_page_struct data;
987 struct mm_struct *mm = vma->vm_mm;
991 if (mm == current->active_mm) {
992 flush_tlb_current_page(mm, vma, addr);
993 if (atomic_read(&mm->mm_users) <= 1) {
994 int cpu, this_cpu = smp_processor_id();
995 for (cpu = 0; cpu < NR_CPUS; cpu++) {
996 if (!cpu_online(cpu) || cpu == this_cpu)
998 if (mm->context[cpu])
999 mm->context[cpu] = 0;
1010 if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) {
1011 printk(KERN_CRIT "flush_tlb_page: timed out\n");
1018 flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1020 /* On the Alpha we always flush the whole user tlb. */
1021 flush_tlb_mm(vma->vm_mm);
1025 ipi_flush_icache_page(void *x)
1027 struct mm_struct *mm = (struct mm_struct *) x;
1028 if (mm == current->active_mm && !asn_locked())
1029 __load_new_mm_context(mm);
1031 flush_tlb_other(mm);
1035 flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
1036 unsigned long addr, int len)
1038 struct mm_struct *mm = vma->vm_mm;
1040 if ((vma->vm_flags & VM_EXEC) == 0)
1045 if (mm == current->active_mm) {
1046 __load_new_mm_context(mm);
1047 if (atomic_read(&mm->mm_users) <= 1) {
1048 int cpu, this_cpu = smp_processor_id();
1049 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1050 if (!cpu_online(cpu) || cpu == this_cpu)
1052 if (mm->context[cpu])
1053 mm->context[cpu] = 0;
1060 if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) {
1061 printk(KERN_CRIT "flush_icache_page: timed out\n");
1067 #ifdef CONFIG_DEBUG_SPINLOCK
1069 _raw_spin_unlock(spinlock_t * lock)
1075 lock->previous = NULL;
1077 lock->base_file = "none";
1082 debug_spin_lock(spinlock_t * lock, const char *base_file, int line_no)
1086 void *inline_pc = __builtin_return_address(0);
1087 unsigned long started = jiffies;
1089 int cpu = smp_processor_id();
1094 /* Use sub-sections to put the actual loop at the end
1095 of this object file's text section so as to perfect
1096 branch prediction. */
1097 __asm__ __volatile__(
1112 : "=r" (tmp), "=m" (lock->lock), "=r" (stuck)
1113 : "1" (lock->lock), "2" (stuck) : "memory");
1117 "%s:%d spinlock stuck in %s at %p(%d)"
1118 " owner %s at %p(%d) %s:%d\n",
1120 current->comm, inline_pc, cpu,
1121 lock->task->comm, lock->previous,
1122 lock->on_cpu, lock->base_file, lock->line_no);
1128 /* Exiting. Got the lock. */
1130 lock->previous = inline_pc;
1131 lock->task = current;
1132 lock->base_file = base_file;
1133 lock->line_no = line_no;
1137 "%s:%d spinlock grabbed in %s at %p(%d) %ld ticks\n",
1138 base_file, line_no, current->comm, inline_pc,
1139 cpu, jiffies - started);
1144 debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
1147 if ((ret = !test_and_set_bit(0, lock))) {
1148 lock->on_cpu = smp_processor_id();
1149 lock->previous = __builtin_return_address(0);
1150 lock->task = current;
1152 lock->base_file = base_file;
1153 lock->line_no = line_no;
1157 #endif /* CONFIG_DEBUG_SPINLOCK */
1159 #ifdef CONFIG_DEBUG_RWLOCK
1160 void _raw_write_lock(rwlock_t * lock)
1163 int stuck_lock, stuck_reader;
1164 void *inline_pc = __builtin_return_address(0);
1169 stuck_reader = 1<<30;
1171 __asm__ __volatile__(
1180 "6: blt %3,4b # debug\n"
1181 " subl %3,1,%3 # debug\n"
1184 "8: blt %4,4b # debug\n"
1185 " subl %4,1,%4 # debug\n"
1190 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (regy),
1191 "=&r" (stuck_lock), "=&r" (stuck_reader)
1192 : "0" (*(volatile int *)lock), "3" (stuck_lock), "4" (stuck_reader) : "memory");
1194 if (stuck_lock < 0) {
1195 printk(KERN_WARNING "write_lock stuck at %p\n", inline_pc);
1198 if (stuck_reader < 0) {
1199 printk(KERN_WARNING "write_lock stuck on readers at %p\n",
1205 void _raw_read_lock(rwlock_t * lock)
1209 void *inline_pc = __builtin_return_address(0);
1215 __asm__ __volatile__(
1224 " blt %2,4b # debug\n"
1225 " subl %2,1,%2 # debug\n"
1229 : "=m" (*(volatile int *)lock), "=&r" (regx), "=&r" (stuck_lock)
1230 : "0" (*(volatile int *)lock), "2" (stuck_lock) : "memory");
1232 if (stuck_lock < 0) {
1233 printk(KERN_WARNING "read_lock stuck at %p\n", inline_pc);
1237 #endif /* CONFIG_DEBUG_RWLOCK */