VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / sparc64 / kernel / smp.c
index 64b8732..abc65dc 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/uaccess.h>
 #include <asm/timer.h>
 #include <asm/starfire.h>
+#include <asm/tlb.h>
 
 extern int linux_num_cpus;
 extern void calibrate_delay(void);
@@ -406,23 +407,14 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
        int i;
 
        __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
-       for (i = 0; i < NR_CPUS; i++) {
-               if (cpu_isset(i, mask)) {
-                       spitfire_xcall_helper(data0, data1, data2, pstate, i);
-                       cpu_clear(i, mask);
-                       if (cpus_empty(mask))
-                               break;
-               }
-       }
+       for_each_cpu_mask(i, mask)
+               spitfire_xcall_helper(data0, data1, data2, pstate, i);
 }
 
 /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
  * packet, but we have no use for that.  However we do take advantage of
  * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
  */
-#if NR_CPUS > 32
-#error Fixup cheetah_xcall_deliver Dave...
-#endif
 static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
 {
        u64 pstate, ver;
@@ -456,25 +448,19 @@ retry:
 
        nack_busy_id = 0;
        {
-               cpumask_t work_mask = mask;
                int i;
 
-               for (i = 0; i < NR_CPUS; i++) {
-                       if (cpu_isset(i, work_mask)) {
-                               u64 target = (i << 14) | 0x70;
-
-                               if (!is_jalapeno)
-                                       target |= (nack_busy_id << 24);
-                               __asm__ __volatile__(
-                                       "stxa   %%g0, [%0] %1\n\t"
-                                       "membar #Sync\n\t"
-                                       : /* no outputs */
-                                       : "r" (target), "i" (ASI_INTR_W));
-                               nack_busy_id++;
-                               cpu_clear(i, work_mask);
-                               if (cpus_empty(work_mask))
-                                       break;
-                       }
+               for_each_cpu_mask(i, mask) {
+                       u64 target = (i << 14) | 0x70;
+
+                       if (!is_jalapeno)
+                               target |= (nack_busy_id << 24);
+                       __asm__ __volatile__(
+                               "stxa   %%g0, [%0] %1\n\t"
+                               "membar #Sync\n\t"
+                               : /* no outputs */
+                               : "r" (target), "i" (ASI_INTR_W));
+                       nack_busy_id++;
                }
        }
 
@@ -507,7 +493,6 @@ retry:
                        printk("CPU[%d]: mondo stuckage result[%016lx]\n",
                               smp_processor_id(), dispatch_stat);
                } else {
-                       cpumask_t work_mask = mask;
                        int i, this_busy_nack = 0;
 
                        /* Delay some random time with interrupts enabled
@@ -518,22 +503,17 @@ retry:
                        /* Clear out the mask bits for cpus which did not
                         * NACK us.
                         */
-                       for (i = 0; i < NR_CPUS; i++) {
-                               if (cpu_isset(i, work_mask)) {
-                                       u64 check_mask;
-
-                                       if (is_jalapeno)
-                                               check_mask = (0x2UL << (2*i));
-                                       else
-                                               check_mask = (0x2UL <<
-                                                             this_busy_nack);
-                                       if ((dispatch_stat & check_mask) == 0)
-                                               cpu_clear(i, mask);
-                                       this_busy_nack += 2;
-                                       cpu_clear(i, work_mask);
-                                       if (cpus_empty(work_mask))
-                                               break;
-                               }
+                       for_each_cpu_mask(i, mask) {
+                               u64 check_mask;
+
+                               if (is_jalapeno)
+                                       check_mask = (0x2UL << (2*i));
+                               else
+                                       check_mask = (0x2UL <<
+                                                     this_busy_nack);
+                               if ((dispatch_stat & check_mask) == 0)
+                                       cpu_clear(i, mask);
+                               this_busy_nack += 2;
                        }
 
                        goto retry;
@@ -656,9 +636,8 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
        }
 }
 
-extern unsigned long xcall_flush_tlb_page;
 extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_range;
+extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_flush_tlb_all_spitfire;
 extern unsigned long xcall_flush_tlb_all_cheetah;
@@ -675,13 +654,13 @@ extern atomic_t dcpage_flushes_xcall;
 static __inline__ void __local_flush_dcache_page(struct page *page)
 {
 #if (L1DCACHE_SIZE > PAGE_SIZE)
-       __flush_dcache_page(page->virtual,
+       __flush_dcache_page(page_address(page),
                            ((tlb_type == spitfire) &&
                             page_mapping(page) != NULL));
 #else
        if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
-               __flush_icache_page(__pa(page->virtual));
+               __flush_icache_page(__pa(page_address(page)));
 #endif
 }
 
@@ -696,6 +675,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
        if (cpu == this_cpu) {
                __local_flush_dcache_page(page);
        } else if (cpu_online(cpu)) {
+               void *pg_addr = page_address(page);
                u64 data0;
 
                if (tlb_type == spitfire) {
@@ -704,14 +684,14 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
                        if (page_mapping(page) != NULL)
                                data0 |= ((u64)1 << 32);
                        spitfire_xcall_deliver(data0,
-                                              __pa(page->virtual),
-                                              (u64) page->virtual,
+                                              __pa(pg_addr),
+                                              (u64) pg_addr,
                                               mask);
                } else {
                        data0 =
                                ((u64)&xcall_flush_dcache_page_cheetah);
                        cheetah_xcall_deliver(data0,
-                                             __pa(page->virtual),
+                                             __pa(pg_addr),
                                              0, mask);
                }
 #ifdef CONFIG_DEBUG_DCFLUSH
@@ -724,6 +704,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
 
 void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
 {
+       void *pg_addr = page_address(page);
        cpumask_t mask = cpu_online_map;
        u64 data0;
        int this_cpu = get_cpu();
@@ -740,13 +721,13 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
                if (page_mapping(page) != NULL)
                        data0 |= ((u64)1 << 32);
                spitfire_xcall_deliver(data0,
-                                      __pa(page->virtual),
-                                      (u64) page->virtual,
+                                      __pa(pg_addr),
+                                      (u64) pg_addr,
                                       mask);
        } else {
                data0 = ((u64)&xcall_flush_dcache_page_cheetah);
                cheetah_xcall_deliver(data0,
-                                     __pa(page->virtual),
+                                     __pa(pg_addr),
                                      0, mask);
        }
 #ifdef CONFIG_DEBUG_DCFLUSH
@@ -854,7 +835,6 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
                int cpu = get_cpu();
 
                if (atomic_read(&mm->mm_users) == 1) {
-                       /* See smp_flush_tlb_page for info about this. */
                        mm->cpu_vm_mask = cpumask_of_cpu(cpu);
                        goto local_flush_and_out;
                }
@@ -870,27 +850,40 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
        }
 }
 
-void smp_flush_tlb_range(struct mm_struct *mm, unsigned long start,
-                        unsigned long end)
+void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
 {
        u32 ctx = CTX_HWBITS(mm->context);
        int cpu = get_cpu();
 
-       start &= PAGE_MASK;
-       end    = PAGE_ALIGN(end);
-
        if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
                mm->cpu_vm_mask = cpumask_of_cpu(cpu);
                goto local_flush_and_out;
+       } else {
+               /* This optimization is not valid.  Normally
+                * we will be holding the page_table_lock, but
+                * there is an exception which is copy_page_range()
+                * when forking.  The lock is held during the individual
+                * page table updates in the parent, but not at the
+                * top level, which is where we are invoked.
+                */
+               if (0) {
+                       cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
+
+                       /* By virtue of running under the mm->page_table_lock,
+                        * and mmu_context.h:switch_mm doing the same, the
+                        * following operation is safe.
+                        */
+                       if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
+                               goto local_flush_and_out;
+               }
        }
 
-       smp_cross_call_masked(&xcall_flush_tlb_range,
-                             ctx, start, end,
+       smp_cross_call_masked(&xcall_flush_tlb_pending,
+                             ctx, nr, (unsigned long) vaddrs,
                              mm->cpu_vm_mask);
 
- local_flush_and_out:
-       __flush_tlb_range(ctx, start, SECONDARY_CONTEXT,
-                         end, PAGE_SIZE, (end-start));
+local_flush_and_out:
+       __flush_tlb_pending(ctx, nr, vaddrs);
 
        put_cpu();
 }
@@ -907,55 +900,6 @@ void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
        }
 }
 
-void smp_flush_tlb_page(struct mm_struct *mm, unsigned long page)
-{
-       {
-               u32 ctx = CTX_HWBITS(mm->context);
-               int cpu = get_cpu();
-
-               page &= PAGE_MASK;
-               if (mm == current->active_mm &&
-                   atomic_read(&mm->mm_users) == 1) {
-                       /* By virtue of being the current address space, and
-                        * having the only reference to it, the following
-                        * operation is safe.
-                        *
-                        * It would not be a win to perform the xcall tlb
-                        * flush in this case, because even if we switch back
-                        * to one of the other processors in cpu_vm_mask it
-                        * is almost certain that all TLB entries for this
-                        * context will be replaced by the time that happens.
-                        */
-                       mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-                       goto local_flush_and_out;
-               } else {
-                       cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
-                       /* By virtue of running under the mm->page_table_lock,
-                        * and mmu_context.h:switch_mm doing the same, the
-                        * following operation is safe.
-                        */
-                       if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
-                               goto local_flush_and_out;
-               }
-
-               /* OK, we have to actually perform the cross call.  Most
-                * likely this is a cloned mm or kswapd is kicking out pages
-                * for a task which has run recently on another cpu.
-                */
-               smp_cross_call_masked(&xcall_flush_tlb_page,
-                                     ctx, page, 0,
-                                     mm->cpu_vm_mask);
-               if (!cpu_isset(cpu, mm->cpu_vm_mask))
-                       return;
-
-       local_flush_and_out:
-               __flush_tlb_page(ctx, page, SECONDARY_CONTEXT);
-
-               put_cpu();
-       }
-}
-
 /* CPU capture. */
 /* #define CAPTURE_DEBUG */
 extern unsigned long xcall_capture;