vserver 1.9.3
[linux-2.6.git] / arch / sparc64 / mm / init.c
index 043861f..a824926 100644 (file)
@@ -37,8 +37,6 @@
 #include <asm/spitfire.h>
 #include <asm/sections.h>
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 extern void device_scan(void);
 
 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
@@ -70,6 +68,7 @@ unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
 extern char  _start[], _end[];
 
 /* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
 extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
@@ -137,13 +136,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 #endif
 
 #if (L1DCACHE_SIZE > PAGE_SIZE)
-       __flush_dcache_page(page->virtual,
+       __flush_dcache_page(page_address(page),
                            ((tlb_type == spitfire) &&
                             page_mapping(page) != NULL));
 #else
        if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
-               __flush_icache_page(__pa(page->virtual));
+               __flush_icache_page(__pa(page_address(page)));
 #endif
 }
 
@@ -252,87 +251,6 @@ out:
        put_cpu();
 }
 
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA.  It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
-       unsigned long offset;
-       pte_t *ptep;
-
-       if (pmd_none(*pmd))
-               return;
-       ptep = pte_offset_map(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-
-               if (pte_none(pte))
-                       continue;
-
-               if (pte_present(pte) && pte_dirty(pte)) {
-                       struct page *page;
-                       unsigned long pgaddr, uaddr;
-                       unsigned long pfn = pte_pfn(pte);
-
-                       if (!pfn_valid(pfn))
-                               continue;
-                       page = pfn_to_page(pfn);
-                       if (PageReserved(page) || !page_mapping(page))
-                               continue;
-                       pgaddr = (unsigned long) page_address(page);
-                       uaddr = address + offset;
-                       if ((pgaddr ^ uaddr) & (1 << 13))
-                               flush_dcache_page_all(mm, page);
-               }
-       }
-       pte_unmap(ptep - 1);
-}
-
-static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
-{
-       pmd_t *pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       do {
-               flush_cache_pte_range(mm, pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       pgd_t *dir = pgd_offset(mm, start);
-
-       if (mm == current->mm)
-               flushw_user();
-
-       if (vma->vm_file == NULL ||
-           ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
-               return;
-
-       do {
-               flush_cache_pmd_range(mm, dir, start, end - start);
-               start = (start + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (start && (start < end));
-}
-
 void flush_icache_range(unsigned long start, unsigned long end)
 {
        /* Cheetah has coherent I-cache. */
@@ -344,11 +262,21 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 }
 
+unsigned long page_to_pfn(struct page *page)
+{
+       return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+       return (mem_map + (pfn - pfn_base));
+}
+
 void show_mem(void)
 {
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6dkB\n",
+       printk("Free swap:       %6ldkB\n",
               nr_swap_pages << (PAGE_SHIFT-10));
        printk("%ld pages of RAM\n", num_physpages);
        printk("%d free pages\n", nr_free_pages());
@@ -1163,7 +1091,7 @@ struct pgtable_cache_struct pgt_quicklists;
 #else
 #define DC_ALIAS_SHIFT 0
 #endif
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        struct page *page;
        unsigned long color;
@@ -1352,10 +1280,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
-       if (sparc_ramdisk_image) {
-               if (sparc_ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
-                       sparc_ramdisk_image -= KERNBASE;
-               initrd_start = sparc_ramdisk_image + phys_base;
+       if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+               unsigned long ramdisk_image = sparc_ramdisk_image ?
+                       sparc_ramdisk_image : sparc_ramdisk_image64;
+               if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
+                       ramdisk_image -= KERNBASE;
+               initrd_start = ramdisk_image + phys_base;
                initrd_end = initrd_start + sparc_ramdisk_size;
                if (initrd_end > end_of_phys_memory) {
                        printk(KERN_CRIT "initrd extends beyond end of memory "
@@ -1398,6 +1328,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                size = initrd_end - initrd_start;
 
                /* Resert the initrd image area. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+               prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
+                       initrd_start, initrd_end);
+#endif
                reserve_bootmem(initrd_start, size);
                *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
@@ -1450,7 +1384,7 @@ void __init paging_init(void)
        if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
                bigkernel = 1;
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (sparc_ramdisk_image)
+       if (sparc_ramdisk_image || sparc_ramdisk_image64)
                real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
 #endif
 
@@ -1575,7 +1509,7 @@ void __init paging_init(void)
                zones_size[ZONE_DMA] = npages;
                zholes_size[ZONE_DMA] = npages - pages_avail;
 
-               free_area_init_node(0, &contig_page_data, NULL, zones_size,
+               free_area_init_node(0, &contig_page_data, zones_size,
                                    phys_base >> PAGE_SHIFT, zholes_size);
                mem_map = contig_page_data.node_mem_map;
        }