vserver 1.9.3
[linux-2.6.git] / arch / sparc64 / mm / init.c
index a5518c2..a824926 100644 (file)
@@ -37,8 +37,6 @@
 #include <asm/spitfire.h>
 #include <asm/sections.h>
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 extern void device_scan(void);
 
 struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
@@ -70,6 +68,7 @@ unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
 extern char  _start[], _end[];
 
 /* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
 extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
@@ -137,13 +136,13 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 #endif
 
 #if (L1DCACHE_SIZE > PAGE_SIZE)
-       __flush_dcache_page(page->virtual,
+       __flush_dcache_page(page_address(page),
                            ((tlb_type == spitfire) &&
                             page_mapping(page) != NULL));
 #else
        if (page_mapping(page) != NULL &&
            tlb_type == spitfire)
-               __flush_icache_page(__pa(page->virtual));
+               __flush_icache_page(__pa(page_address(page)));
 #endif
 }
 
@@ -152,9 +151,9 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 #define dcache_dirty_cpu(page) \
        (((page)->flags >> 24) & (NR_CPUS - 1UL))
 
-static __inline__ void set_dcache_dirty(struct page *page)
+static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
 {
-       unsigned long mask = smp_processor_id();
+       unsigned long mask = this_cpu;
        unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
        mask = (mask << 24) | (1UL << PG_dcache_dirty);
        __asm__ __volatile__("1:\n\t"
@@ -206,16 +205,19 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
            (page = pfn_to_page(pfn), page_mapping(page)) &&
            ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
                int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+               int this_cpu = get_cpu();
 
                /* This is just to optimize away some function calls
                 * in the SMP case.
                 */
-               if (cpu == smp_processor_id())
+               if (cpu == this_cpu)
                        flush_dcache_page_impl(page);
                else
                        smp_flush_dcache_page_impl(page, cpu);
 
                clear_dcache_dirty_cpu(page, cpu);
+
+               put_cpu();
        }
        if (get_thread_fault_code())
                __update_mmu_cache(vma->vm_mm->context & TAG_CONTEXT_BITS,
@@ -227,14 +229,15 @@ void flush_dcache_page(struct page *page)
        struct address_space *mapping = page_mapping(page);
        int dirty = test_bit(PG_dcache_dirty, &page->flags);
        int dirty_cpu = dcache_dirty_cpu(page);
+       int this_cpu = get_cpu();
 
        if (mapping && !mapping_mapped(mapping)) {
                if (dirty) {
-                       if (dirty_cpu == smp_processor_id())
-                               return;
+                       if (dirty_cpu == this_cpu)
+                               goto out;
                        smp_flush_dcache_page_impl(page, dirty_cpu);
                }
-               set_dcache_dirty(page);
+               set_dcache_dirty(page, this_cpu);
        } else {
                /* We could delay the flush for the !page_mapping
                 * case too.  But that case is for exec env/arg
@@ -243,87 +246,9 @@ void flush_dcache_page(struct page *page)
                 */
                flush_dcache_page_impl(page);
        }
-}
-
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA.  It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
-       unsigned long offset;
-       pte_t *ptep;
 
-       if (pmd_none(*pmd))
-               return;
-       ptep = pte_offset_map(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-
-               if (pte_none(pte))
-                       continue;
-
-               if (pte_present(pte) && pte_dirty(pte)) {
-                       struct page *page;
-                       unsigned long pgaddr, uaddr;
-                       unsigned long pfn = pte_pfn(pte);
-
-                       if (!pfn_valid(pfn))
-                               continue;
-                       page = pfn_to_page(pfn);
-                       if (PageReserved(page) || !page_mapping(page))
-                               continue;
-                       pgaddr = (unsigned long) page_address(page);
-                       uaddr = address + offset;
-                       if ((pgaddr ^ uaddr) & (1 << 13))
-                               flush_dcache_page_all(mm, page);
-               }
-       }
-       pte_unmap(ptep - 1);
-}
-
-static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
-{
-       pmd_t *pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       do {
-               flush_cache_pte_range(mm, pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       pgd_t *dir = pgd_offset(mm, start);
-
-       if (mm == current->mm)
-               flushw_user();
-
-       if (vma->vm_file == NULL ||
-           ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
-               return;
-
-       do {
-               flush_cache_pmd_range(mm, dir, start, end - start);
-               start = (start + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (start && (start < end));
+out:
+       put_cpu();
 }
 
 void flush_icache_range(unsigned long start, unsigned long end)
@@ -337,11 +262,21 @@ void flush_icache_range(unsigned long start, unsigned long end)
        }
 }
 
+unsigned long page_to_pfn(struct page *page)
+{
+       return (unsigned long) ((page - mem_map) + pfn_base);
+}
+
+struct page *pfn_to_page(unsigned long pfn)
+{
+       return (mem_map + (pfn - pfn_base));
+}
+
 void show_mem(void)
 {
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6dkB\n",
+       printk("Free swap:       %6ldkB\n",
               nr_swap_pages << (PAGE_SHIFT-10));
        printk("%ld pages of RAM\n", num_physpages);
        printk("%d free pages\n", nr_free_pages());
@@ -1156,7 +1091,7 @@ struct pgtable_cache_struct pgt_quicklists;
 #else
 #define DC_ALIAS_SHIFT 0
 #endif
-pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
+pte_t *__pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        struct page *page;
        unsigned long color;
@@ -1345,10 +1280,12 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
-       if (sparc_ramdisk_image) {
-               if (sparc_ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
-                       sparc_ramdisk_image -= KERNBASE;
-               initrd_start = sparc_ramdisk_image + phys_base;
+       if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+               unsigned long ramdisk_image = sparc_ramdisk_image ?
+                       sparc_ramdisk_image : sparc_ramdisk_image64;
+               if (ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
+                       ramdisk_image -= KERNBASE;
+               initrd_start = ramdisk_image + phys_base;
                initrd_end = initrd_start + sparc_ramdisk_size;
                if (initrd_end > end_of_phys_memory) {
                        printk(KERN_CRIT "initrd extends beyond end of memory "
@@ -1391,6 +1328,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                size = initrd_end - initrd_start;
 
                /* Resert the initrd image area. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+               prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
+                       initrd_start, initrd_end);
+#endif
                reserve_bootmem(initrd_start, size);
                *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
@@ -1443,7 +1384,7 @@ void __init paging_init(void)
        if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
                bigkernel = 1;
 #ifdef CONFIG_BLK_DEV_INITRD
-       if (sparc_ramdisk_image)
+       if (sparc_ramdisk_image || sparc_ramdisk_image64)
                real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
 #endif
 
@@ -1568,7 +1509,7 @@ void __init paging_init(void)
                zones_size[ZONE_DMA] = npages;
                zholes_size[ZONE_DMA] = npages - pages_avail;
 
-               free_area_init_node(0, &contig_page_data, NULL, zones_size,
+               free_area_init_node(0, &contig_page_data, zones_size,
                                    phys_base >> PAGE_SHIFT, zholes_size);
                mem_map = contig_page_data.node_mem_map;
        }
@@ -1582,7 +1523,7 @@ void __init paging_init(void)
  * prom_set_traptable() call, and OBP is allocating a scratchpad
  * for saving client program register state etc.
  */
-void __init sort_memlist(struct linux_mlist_p1275 *thislist)
+static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
 {
        int swapi = 0;
        int i, mitr;