VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / arch / sparc64 / mm / init.c
index 043861f..60308ce 100644 (file)
@@ -37,8 +37,6 @@
 #include <asm/spitfire.h>
 #include <asm/sections.h>
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 extern void device_scan(void);
 
 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
@@ -137,13 +135,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 #endif
 
 #if (L1DCACHE_SIZE > PAGE_SIZE)
-       __flush_dcache_page(page->virtual,
+       __flush_dcache_page(page_address(page),
                            ((tlb_type == spitfire) &&
                             page_mapping(page) != NULL));
 #else
        if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
-               __flush_icache_page(__pa(page->virtual));
+               __flush_icache_page(__pa(page_address(page)));
 #endif
 }
 
@@ -252,87 +250,6 @@ out:
        put_cpu();
 }
 
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA.  It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
-       unsigned long offset;
-       pte_t *ptep;
-
-       if (pmd_none(*pmd))
-               return;
-       ptep = pte_offset_map(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-
-               if (pte_none(pte))
-                       continue;
-
-               if (pte_present(pte) && pte_dirty(pte)) {
-                       struct page *page;
-                       unsigned long pgaddr, uaddr;
-                       unsigned long pfn = pte_pfn(pte);
-
-                       if (!pfn_valid(pfn))
-                               continue;
-                       page = pfn_to_page(pfn);
-                       if (PageReserved(page) || !page_mapping(page))
-                               continue;
-                       pgaddr = (unsigned long) page_address(page);
-                       uaddr = address + offset;
-                       if ((pgaddr ^ uaddr) & (1 << 13))
-                               flush_dcache_page_all(mm, page);
-               }
-       }
-       pte_unmap(ptep - 1);
-}
-
-static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
-{
-       pmd_t *pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       do {
-               flush_cache_pte_range(mm, pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       pgd_t *dir = pgd_offset(mm, start);
-
-       if (mm == current->mm)
-               flushw_user();
-
-       if (vma->vm_file == NULL ||
-           ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
-               return;
-
-       do {
-               flush_cache_pmd_range(mm, dir, start, end - start);
-               start = (start + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (start && (start < end));
-}
-
 void flush_icache_range(unsigned long start, unsigned long end)
 {
        /* Cheetah has coherent I-cache. */
@@ -344,11 +261,21 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 }
 
+unsigned long page_to_pfn(struct page *page)
+{
+       return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+       return (mem_map + (pfn - pfn_base));
+}
+
 void show_mem(void)
 {
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6dkB\n",
+       printk("Free swap:       %6ldkB\n",
               nr_swap_pages << (PAGE_SHIFT-10));
        printk("%ld pages of RAM\n", num_physpages);
        printk("%d free pages\n", nr_free_pages());
@@ -1163,7 +1090,7 @@ struct pgtable_cache_struct pgt_quicklists;
 #else
 #define DC_ALIAS_SHIFT 0
 #endif
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        struct page *page;
        unsigned long color;