fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / arch / ppc / mm / init.c
index 4628e26..c374e53 100644 (file)
@@ -18,7 +18,6 @@
  *
  */
 
-#include <linux/config.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -31,6 +30,7 @@
 #include <linux/bootmem.h>
 #include <linux/highmem.h>
 #include <linux/initrd.h>
+#include <linux/pagemap.h>
 
 #include <asm/pgalloc.h>
 #include <asm/prom.h>
@@ -66,17 +66,10 @@ unsigned long ppc_memoffset = PAGE_OFFSET;
 int mem_init_done;
 int init_bootmem_done;
 int boot_mapsize;
-#ifdef CONFIG_PPC_PMAC
-unsigned long agp_special_page;
-#endif
 
 extern char _end[];
 extern char etext[], _stext[];
 extern char __init_begin, __init_end;
-extern char __prep_begin, __prep_end;
-extern char __chrp_begin, __chrp_end;
-extern char __pmac_begin, __pmac_end;
-extern char __openfirmware_begin, __openfirmware_end;
 
 #ifdef CONFIG_HIGHMEM
 pte_t *kmap_pte;
@@ -95,15 +88,13 @@ extern struct task_struct *current_set[NR_CPUS];
 char *klimit = _end;
 struct mem_pieces phys_avail;
 
-extern char *sysmap;
-extern unsigned long sysmap_size;
-
 /*
  * this tells the system to map all of ram with the segregs
  * (i.e. page tables) instead of the bats.
  * -- Cort
  */
 int __map_without_bats;
+int __map_without_ltlbs;
 
 /* max amount of RAM to use */
 unsigned long __max_memory;
@@ -118,7 +109,7 @@ void show_mem(void)
 
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6dkB\n",nr_swap_pages<<(PAGE_SHIFT-10));
+       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        i = max_mapnr;
        while (i-- > 0) {
                total++;
@@ -148,7 +139,7 @@ static void free_sec(unsigned long start, unsigned long end, const char *name)
 
        while (start < end) {
                ClearPageReserved(virt_to_page(start));
-               set_page_count(virt_to_page(start), 1);
+               init_page_count(virt_to_page(start));
                free_page(start);
                cnt++;
                start += PAGE_SIZE;
@@ -168,15 +159,8 @@ void free_initmem(void)
 
        printk ("Freeing unused kernel memory:");
        FREESEC(init);
-       if (_machine != _MACH_Pmac)
-               FREESEC(pmac);
-       if (_machine != _MACH_chrp)
-               FREESEC(chrp);
-       if (_machine != _MACH_prep)
-               FREESEC(prep);
-       if (!have_of)
-               FREESEC(openfirmware);
        printk("\n");
+       ppc_md.progress = NULL;
 #undef FREESEC
 }
 
@@ -187,7 +171,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 
        for (; start < end; start += PAGE_SIZE) {
                ClearPageReserved(virt_to_page(start));
-               set_page_count(virt_to_page(start), 1);
+               init_page_count(virt_to_page(start));
                free_page(start);
                totalram_pages++;
        }
@@ -204,6 +188,10 @@ void MMU_setup(void)
                __map_without_bats = 1;
        }
 
+       if (strstr(cmd_line, "noltlbs")) {
+               __map_without_ltlbs = 1;
+       }
+
        /* Look for mem= option on command line */
        if (strstr(cmd_line, "mem=")) {
                char *p, *q;
@@ -253,6 +241,12 @@ void __init MMU_init(void)
        if (__max_memory && total_memory > __max_memory)
                total_memory = __max_memory;
        total_lowmem = total_memory;
+#ifdef CONFIG_FSL_BOOKE
+       /* Freescale Book-E parts expect lowmem to be mapped by fixed TLB
+        * entries, so we need to adjust lowmem to match the amount we can map
+        * in the fixed entries */
+       adjust_total_lowmem();
+#endif /* CONFIG_FSL_BOOKE */
        if (total_lowmem > __max_low_memory) {
                total_lowmem = __max_low_memory;
 #ifndef CONFIG_HIGHMEM
@@ -364,8 +358,8 @@ void __init do_init_bootmem(void)
  */
 void __init paging_init(void)
 {
-       unsigned long zones_size[MAX_NR_ZONES], i;
-
+       unsigned long start_pfn, end_pfn;
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
 #ifdef CONFIG_HIGHMEM
        map_page(PKMAP_BASE, 0, 0);     /* XXX gross */
        pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k
@@ -375,19 +369,19 @@ void __init paging_init(void)
                        (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN);
        kmap_prot = PAGE_KERNEL;
 #endif /* CONFIG_HIGHMEM */
+       /* All pages are DMA-able so we put them all in the DMA zone. */
+       start_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT;
+       end_pfn = start_pfn + (total_memory >> PAGE_SHIFT);
+       add_active_range(0, start_pfn, end_pfn);
 
-       /*
-        * All pages are DMA-able so we put them all in the DMA zone.
-        */
-       zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
-       for (i = 1; i < MAX_NR_ZONES; i++)
-               zones_size[i] = 0;
-
+       memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_HIGHMEM] = total_memory >> PAGE_SHIFT;
+#else
+       max_zone_pfns[ZONE_DMA] = total_memory >> PAGE_SHIFT;
 #endif /* CONFIG_HIGHMEM */
-
-       free_area_init(zones_size);
+       free_area_init_nodes(max_zone_pfns);
 }
 
 void __init mem_init(void)
@@ -400,7 +394,6 @@ void __init mem_init(void)
        unsigned long highmem_mapnr;
 
        highmem_mapnr = total_lowmem >> PAGE_SHIFT;
-       highmem_start_page = mem_map + highmem_mapnr;
 #endif /* CONFIG_HIGHMEM */
        max_mapnr = total_memory >> PAGE_SHIFT;
 
@@ -418,24 +411,6 @@ void __init mem_init(void)
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-#ifdef CONFIG_PPC_OF
-       /* mark the RTAS pages as reserved */
-       if ( rtas_data )
-               for (addr = (ulong)__va(rtas_data);
-                    addr < PAGE_ALIGN((ulong)__va(rtas_data)+rtas_size) ;
-                    addr += PAGE_SIZE)
-                       SetPageReserved(virt_to_page(addr));
-#endif
-#ifdef CONFIG_PPC_PMAC
-       if (agp_special_page)
-               SetPageReserved(virt_to_page(agp_special_page));
-#endif
-       if ( sysmap )
-               for (addr = (unsigned long)sysmap;
-                    addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
-                    addr += PAGE_SIZE)
-                       SetPageReserved(virt_to_page(addr));
-
        for (addr = PAGE_OFFSET; addr < (unsigned long)high_memory;
             addr += PAGE_SIZE) {
                if (!PageReserved(virt_to_page(addr)))
@@ -457,8 +432,7 @@ void __init mem_init(void)
                        struct page *page = mem_map + pfn;
 
                        ClearPageReserved(page);
-                       set_bit(PG_highmem, &page->flags);
-                       set_page_count(page, 1);
+                       init_page_count(page);
                        __free_page(page);
                        totalhigh_pages++;
                }
@@ -471,25 +445,6 @@ void __init mem_init(void)
               codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
               initpages<< (PAGE_SHIFT-10),
               (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)));
-       if (sysmap)
-               printk("System.map loaded at 0x%08x for debugger, size: %ld bytes\n",
-                       (unsigned int)sysmap, sysmap_size);
-#ifdef CONFIG_PPC_PMAC
-       if (agp_special_page)
-               printk(KERN_INFO "AGP special page: 0x%08lx\n", agp_special_page);
-#endif
-
-       /* Make sure all our pagetable pages have page->mapping
-          and page->index set correctly. */
-       for (addr = KERNELBASE; addr != 0; addr += PGDIR_SIZE) {
-               struct page *pg;
-               pmd_t *pmd = pmd_offset(pgd_offset_k(addr), addr);
-               if (pmd_present(*pmd)) {
-                       pg = pmd_page(*pmd);
-                       pg->mapping = (void *) &init_mm;
-                       pg->index = addr;
-               }
-       }
 
        mem_init_done = 1;
 }
@@ -530,30 +485,6 @@ set_phys_avail(unsigned long total_memory)
                                  initrd_end - initrd_start, 1);
        }
 #endif /* CONFIG_BLK_DEV_INITRD */
-#ifdef CONFIG_PPC_OF
-       /* remove the RTAS pages from the available memory */
-       if (rtas_data)
-               mem_pieces_remove(&phys_avail, rtas_data, rtas_size, 1);
-#endif
-       /* remove the sysmap pages from the available memory */
-       if (sysmap)
-               mem_pieces_remove(&phys_avail, __pa(sysmap), sysmap_size, 1);
-#ifdef CONFIG_PPC_PMAC
-       /* Because of some uninorth weirdness, we need a page of
-        * memory as high as possible (it must be outside of the
-        * bus address seen as the AGP aperture). It will be used
-        * by the r128 DRM driver
-        *
-        * FIXME: We need to make sure that page doesn't overlap any of the\
-        * above. This could be done by improving mem_pieces_find to be able
-        * to do a backward search from the end of the list.
-        */
-       if (_machine == _MACH_Pmac && find_devices("uni-north-agp")) {
-               agp_special_page = (total_memory - PAGE_SIZE);
-               mem_pieces_remove(&phys_avail, agp_special_page, PAGE_SIZE, 0);
-               agp_special_page = (unsigned long)__va(agp_special_page);
-       }
-#endif /* CONFIG_PPC_PMAC */
 }
 
 /* Mark some memory as reserved by removing it from phys_avail. */
@@ -575,8 +506,12 @@ void flush_dcache_page(struct page *page)
 void flush_dcache_icache_page(struct page *page)
 {
 #ifdef CONFIG_BOOKE
-       __flush_dcache_icache(kmap(page));
-       kunmap(page);
+       void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE);
+       __flush_dcache_icache(start);
+       kunmap_atomic(start, KM_PPC_SYNC_ICACHE);
+#elif defined(CONFIG_8xx)
+       /* On 8xx there is no need to kmap since highmem is not supported */
+       __flush_dcache_icache(page_address(page)); 
 #else
        __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
 #endif
@@ -619,6 +554,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
 
        if (pfn_valid(pfn)) {
                struct page *page = pfn_to_page(pfn);
+#ifdef CONFIG_8xx
+               /* On 8xx, the TLB handlers work in 2 stages:
+                * First, a zeroed entry is loaded by TLBMiss handler,
+                * which causes the TLBError handler to be triggered.
+                * That means the zeroed TLB has to be invalidated
+                * whenever a page miss occurs.
+                */
+               _tlbie(address);
+#endif
                if (!PageReserved(page)
                    && !test_bit(PG_arch_1, &page->flags)) {
                        if (vma->vm_mm == current->active_mm)
@@ -638,7 +582,29 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
                mm = (address < TASK_SIZE)? vma->vm_mm: &init_mm;
                pmd = pmd_offset(pgd_offset(mm, address), address);
                if (!pmd_none(*pmd))
-                       add_hash_page(mm->context, address, pmd_val(*pmd));
+                       add_hash_page(mm->context.id, address, pmd_val(*pmd));
        }
 #endif
 }
+
+/*
+ * This is called by /dev/mem to know if a given address has to
+ * be mapped non-cacheable or not
+ */
+int page_is_ram(unsigned long pfn)
+{
+       return pfn < max_pfn;
+}
+
+pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+                             unsigned long size, pgprot_t vma_prot)
+{
+       if (ppc_md.phys_mem_access_prot)
+               return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
+
+       if (!page_is_ram(pfn))
+               vma_prot = __pgprot(pgprot_val(vma_prot)
+                                   | _PAGE_GUARDED | _PAGE_NO_CACHE);
+       return vma_prot;
+}
+EXPORT_SYMBOL(phys_mem_access_prot);