Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / arch / sh64 / mm / cache.c
index 56fbbff..c0c1b21 100644 (file)
@@ -114,6 +114,16 @@ int __init sh64_cache_init(void)
        return 0;
 }
 
+#ifdef CONFIG_DCACHE_DISABLED
+#define sh64_dcache_purge_all()                                        do { } while (0)
+#define sh64_dcache_purge_coloured_phy_page(paddr, eaddr)      do { } while (0)
+#define sh64_dcache_purge_user_range(mm, start, end)           do { } while (0)
+#define sh64_dcache_purge_phy_page(paddr)                      do { } while (0)
+#define sh64_dcache_purge_virt_page(mm, eaddr)                 do { } while (0)
+#define sh64_dcache_purge_kernel_range(start, end)             do { } while (0)
+#define sh64_dcache_wback_current_user_range(start, end)       do { } while (0)
+#endif
+
 /*##########################################################################*/
 
 /* From here onwards, a rewrite of the implementation,
@@ -436,6 +446,7 @@ static void __inline__ sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets
                eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
                for (eaddr=eaddr0; eaddr<eaddr1; eaddr+=cpu_data->dcache.way_ofs) {
                        asm __volatile__ ("alloco %0, 0" : : "r" (eaddr));
+                       asm __volatile__ ("synco"); /* TAKum03020 */
                }
 
                eaddr1 = eaddr0 + cpu_data->dcache.way_ofs * cpu_data->dcache.ways;
@@ -573,57 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
        }
 }
 
-static void sh64_dcache_purge_virt_page(struct mm_struct *mm, unsigned long eaddr)
+static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
+                               unsigned long addr, unsigned long end)
 {
-       unsigned long phys;
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte;
        pte_t entry;
+       spinlock_t *ptl;
+       unsigned long paddr;
 
-       pgd = pgd_offset(mm, eaddr);
-       pmd = pmd_offset(pgd, eaddr);
+       if (!mm)
+               return; /* No way to find physical address of page */
 
-       if (pmd_none(*pmd) || pmd_bad(*pmd))
+       pgd = pgd_offset(mm, addr);
+       if (pgd_bad(*pgd))
                return;
 
-       pte = pte_offset_kernel(pmd, eaddr);
-       entry = *pte;
-
-       if (pte_none(entry) || !pte_present(entry))
+       pmd = pmd_offset(pgd, addr);
+       if (pmd_none(*pmd) || pmd_bad(*pmd))
                return;
 
-       phys = pte_val(entry) & PAGE_MASK;
-
-       sh64_dcache_purge_phy_page(phys);
-}
-
-static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr)
-{
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t entry;
-       unsigned long paddr;
-
-       /* NOTE : all the callers of this have mm->page_table_lock held, so the
-          following page table traversal is safe even on SMP/pre-emptible. */
-
-       if (!mm) return; /* No way to find physical address of page */
-       pgd = pgd_offset(mm, eaddr);
-       if (pgd_bad(*pgd)) return;
-
-       pmd = pmd_offset(pgd, eaddr);
-       if (pmd_none(*pmd) || pmd_bad(*pmd)) return;
-
-       pte = pte_offset_kernel(pmd, eaddr);
-       entry = *pte;
-       if (pte_none(entry) || !pte_present(entry)) return;
-
-       paddr = pte_val(entry) & PAGE_MASK;
-
-       sh64_dcache_purge_coloured_phy_page(paddr, eaddr);
-
+       pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+       do {
+               entry = *pte;
+               if (pte_none(entry) || !pte_present(entry))
+                       continue;
+               paddr = pte_val(entry) & PAGE_MASK;
+               sh64_dcache_purge_coloured_phy_page(paddr, addr);
+       } while (pte++, addr += PAGE_SIZE, addr != end);
+       pte_unmap_unlock(pte - 1, ptl);
 }
 /****************************************************************************/
 
@@ -682,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
        int n_pages;
 
        n_pages = ((end - start) >> PAGE_SHIFT);
-       if (n_pages >= 64) {
+       if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
 #if 1
                sh64_dcache_purge_all();
 #else
@@ -721,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
                }
 #endif
        } else {
-               /* 'Small' range */
-               unsigned long aligned_start;
-               unsigned long eaddr;
-               unsigned long last_page_start;
-
-               aligned_start = start & PAGE_MASK;
-               /* 'end' is 1 byte beyond the end of the range */
-               last_page_start = (end - 1) & PAGE_MASK;
-
-               eaddr = aligned_start;
-               while (eaddr <= last_page_start) {
-                       sh64_dcache_purge_user_page(mm, eaddr);
-                       eaddr += PAGE_SIZE;
-               }
+               /* Small range, covered by a single page table page */
+               start &= PAGE_MASK;     /* should already be so */
+               end = PAGE_ALIGN(end);  /* should already be so */
+               sh64_dcache_purge_user_pages(mm, start, end);
        }
        return;
 }
@@ -766,8 +746,6 @@ static void sh64_dcache_wback_current_user_range(unsigned long start, unsigned l
        }
 }
 
-#endif /* !CONFIG_DCACHE_DISABLED */
-
 /****************************************************************************/
 
 /* These *MUST* lie in an area of virtual address space that's otherwise unused. */
@@ -830,6 +808,8 @@ static void sh64_clear_user_page_coloured(void *to, unsigned long address)
        sh64_teardown_dtlb_cache_slot();
 }
 
+#endif /* !CONFIG_DCACHE_DISABLED */
+
 /****************************************************************************/
 
 /*##########################################################################
@@ -894,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
           addresses from the user address space specified by mm, after writing
           back any dirty data.
 
-          Note(1), 'end' is 1 byte beyond the end of the range to flush.
-
-          Note(2), this is called with mm->page_table_lock held.*/
+          Note, 'end' is 1 byte beyond the end of the range to flush. */
 
        sh64_dcache_purge_user_range(mm, start, end);
        sh64_icache_inv_user_page_range(mm, start, end);
@@ -904,7 +882,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
 
 /****************************************************************************/
 
-void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr)
+void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned long pfn)
 {
        /* Invalidate any entries in either cache for the vma within the user
           address space vma->vm_mm for the page starting at virtual address
@@ -912,10 +890,10 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr)
           the I-cache must be searched too in case the page in question is
           both writable and being executed from (e.g. stack trampolines.)
 
-          Note(1), this is called with mm->page_table_lock held.
+          Note, this is called with pte lock held.
           */
 
-       sh64_dcache_purge_virt_page(vma->vm_mm, eaddr);
+       sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
 
        if (vma->vm_flags & VM_EXEC) {
                sh64_icache_inv_user_page(vma, eaddr);