linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / arch / sparc64 / mm / init.c
index 994174a..7d8d762 100644 (file)
@@ -19,6 +19,9 @@
 #include <linux/pagemap.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/kprobes.h>
+#include <linux/cache.h>
+#include <linux/sort.h>
 
 #include <asm/head.h>
 #include <asm/system.h>
 #include <asm/spitfire.h>
 #include <asm/sections.h>
 
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
 extern void device_scan(void);
 
-struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+#define MAX_BANKS      32
+
+static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
+static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+static int pavail_ents __initdata;
+static int pavail_rescan_ents __initdata;
+
+static int cmp_p64(const void *a, const void *b)
+{
+       const struct linux_prom64_registers *x = a, *y = b;
+
+       if (x->phys_addr > y->phys_addr)
+               return 1;
+       if (x->phys_addr < y->phys_addr)
+               return -1;
+       return 0;
+}
+
+static void __init read_obp_memory(const char *property,
+                                  struct linux_prom64_registers *regs,
+                                  int *num_ents)
+{
+       int node = prom_finddevice("/memory");
+       int prop_size = prom_getproplen(node, property);
+       int ents, ret, i;
+
+       ents = prop_size / sizeof(struct linux_prom64_registers);
+       if (ents > MAX_BANKS) {
+               prom_printf("The machine has more %s property entries than "
+                           "this kernel can support (%d).\n",
+                           property, MAX_BANKS);
+               prom_halt();
+       }
+
+       ret = prom_getproperty(node, property, (char *) regs, prop_size);
+       if (ret == -1) {
+               prom_printf("Couldn't get %s property from /memory.\n");
+               prom_halt();
+       }
+
+       *num_ents = ents;
+
+       /* Sanitize what we got from the firmware, by page aligning
+        * everything.
+        */
+       for (i = 0; i < ents; i++) {
+               unsigned long base, size;
 
-unsigned long *sparc64_valid_addr_bitmap;
+               base = regs[i].phys_addr;
+               size = regs[i].reg_size;
+
+               size &= PAGE_MASK;
+               if (base & ~PAGE_MASK) {
+                       unsigned long new_base = PAGE_ALIGN(base);
+
+                       size -= new_base - base;
+                       if ((long) size < 0L)
+                               size = 0UL;
+                       base = new_base;
+               }
+               regs[i].phys_addr = base;
+               regs[i].reg_size = size;
+       }
+       sort(regs, ents, sizeof(struct linux_prom64_registers),
+            cmp_p64, NULL);
+}
+
+unsigned long *sparc64_valid_addr_bitmap __read_mostly;
 
 /* Ugly, but necessary... -DaveM */
-unsigned long phys_base;
-unsigned long kern_base;
-unsigned long kern_size;
-unsigned long pfn_base;
-
-/* This is even uglier. We have a problem where the kernel may not be
- * located at phys_base. However, initial __alloc_bootmem() calls need to
- * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
- * those page mappings wont work. Things are ok after inherit_prom_mappings
- * is called though. Dave says he'll clean this up some other time.
- * -- BenC
- */
-static unsigned long bootmap_base;
+unsigned long phys_base __read_mostly;
+unsigned long kern_base __read_mostly;
+unsigned long kern_size __read_mostly;
+unsigned long pfn_base __read_mostly;
 
 /* get_new_mmu_context() uses "cache + 1".  */
-spinlock_t ctx_alloc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(ctx_alloc_lock);
 unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
-#define CTX_BMAP_SLOTS (1UL << (CTX_VERSION_SHIFT - 6))
+#define CTX_BMAP_SLOTS (1UL << (CTX_NR_BITS - 6))
 unsigned long mmu_context_bmap[CTX_BMAP_SLOTS];
 
 /* References to special section boundaries */
 extern char  _start[], _end[];
 
 /* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
 extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
-struct page *mem_map_zero;
+struct page *mem_map_zero __read_mostly;
+
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
 
 int bigkernel = 0;
 
@@ -86,40 +150,14 @@ void check_pgt_cache(void)
        preempt_disable();
        if (pgtable_cache_size > PGT_CACHE_HIGH) {
                do {
-#ifdef CONFIG_SMP
                        if (pgd_quicklist)
                                free_pgd_slow(get_pgd_fast());
-#endif
                        if (pte_quicklist[0])
                                free_pte_slow(pte_alloc_one_fast(NULL, 0));
                        if (pte_quicklist[1])
                                free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
                } while (pgtable_cache_size > PGT_CACHE_LOW);
        }
-#ifndef CONFIG_SMP
-        if (pgd_cache_size > PGT_CACHE_HIGH / 4) {
-               struct page *page, *page2;
-                for (page2 = NULL, page = (struct page *)pgd_quicklist; page;) {
-                        if ((unsigned long)page->lru.prev == 3) {
-                                if (page2)
-                                        page2->lru.next = page->lru.next;
-                                else
-                                        pgd_quicklist = (void *) page->lru.next;
-                                pgd_cache_size -= 2;
-                                __free_page(page);
-                                if (page2)
-                                        page = (struct page *)page2->lru.next;
-                                else
-                                        page = (struct page *)pgd_quicklist;
-                                if (pgd_cache_size <= PGT_CACHE_LOW / 4)
-                                        break;
-                                continue;
-                        }
-                        page2 = page;
-                        page = (struct page *)page->lru.next;
-                }
-        }
-#endif
        preempt_enable();
 }
 
@@ -136,7 +174,7 @@ __inline__ void flush_dcache_page_impl(struct page *page)
        atomic_inc(&dcpage_flushes);
 #endif
 
-#if (L1DCACHE_SIZE > PAGE_SIZE)
+#ifdef DCACHE_ALIASING_POSSIBLE
        __flush_dcache_page(page_address(page),
                            ((tlb_type == spitfire) &&
                             page_mapping(page) != NULL));
@@ -148,26 +186,36 @@ __inline__ void flush_dcache_page_impl(struct page *page)
 }
 
 #define PG_dcache_dirty                PG_arch_1
+#define PG_dcache_cpu_shift    24
+#define PG_dcache_cpu_mask     (256 - 1)
+
+#if NR_CPUS > 256
+#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
+#endif
 
 #define dcache_dirty_cpu(page) \
-       (((page)->flags >> 24) & (NR_CPUS - 1UL))
+       (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
 
 static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
 {
        unsigned long mask = this_cpu;
-       unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL);
-       mask = (mask << 24) | (1UL << PG_dcache_dirty);
+       unsigned long non_cpu_bits;
+
+       non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
+       mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
+
        __asm__ __volatile__("1:\n\t"
                             "ldx       [%2], %%g7\n\t"
-                            "and       %%g7, %1, %%g5\n\t"
-                            "or        %%g5, %0, %%g5\n\t"
-                            "casx      [%2], %%g7, %%g5\n\t"
-                            "cmp       %%g7, %%g5\n\t"
+                            "and       %%g7, %1, %%g1\n\t"
+                            "or        %%g1, %0, %%g1\n\t"
+                            "casx      [%2], %%g7, %%g1\n\t"
+                            "cmp       %%g7, %%g1\n\t"
+                            "membar    #StoreLoad | #StoreStore\n\t"
                             "bne,pn    %%xcc, 1b\n\t"
-                            " membar   #StoreLoad | #StoreStore"
+                            " nop"
                             : /* no outputs */
                             : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
-                            : "g5", "g7");
+                            : "g1", "g7");
 }
 
 static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
@@ -177,24 +225,24 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
        __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
                             "1:\n\t"
                             "ldx       [%2], %%g7\n\t"
-                            "srlx      %%g7, 24, %%g5\n\t"
-                            "and       %%g5, %3, %%g5\n\t"
-                            "cmp       %%g5, %0\n\t"
+                            "srlx      %%g7, %4, %%g1\n\t"
+                            "and       %%g1, %3, %%g1\n\t"
+                            "cmp       %%g1, %0\n\t"
                             "bne,pn    %%icc, 2f\n\t"
-                            " andn     %%g7, %1, %%g5\n\t"
-                            "casx      [%2], %%g7, %%g5\n\t"
-                            "cmp       %%g7, %%g5\n\t"
+                            " andn     %%g7, %1, %%g1\n\t"
+                            "casx      [%2], %%g7, %%g1\n\t"
+                            "cmp       %%g7, %%g1\n\t"
+                            "membar    #StoreLoad | #StoreStore\n\t"
                             "bne,pn    %%xcc, 1b\n\t"
-                            " membar   #StoreLoad | #StoreStore\n"
+                            " nop\n"
                             "2:"
                             : /* no outputs */
                             : "r" (cpu), "r" (mask), "r" (&page->flags),
-                              "i" (NR_CPUS - 1UL)
-                            : "g5", "g7");
+                              "i" (PG_dcache_cpu_mask),
+                              "i" (PG_dcache_cpu_shift)
+                            : "g1", "g7");
 }
 
-extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
-
 void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 {
        struct page *page;
@@ -205,7 +253,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
        if (pfn_valid(pfn) &&
            (page = pfn_to_page(pfn), page_mapping(page)) &&
            ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
-               int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL));
+               int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
+                          PG_dcache_cpu_mask);
                int this_cpu = get_cpu();
 
                /* This is just to optimize away some function calls
@@ -220,20 +269,28 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
 
                put_cpu();
        }
-       if (get_thread_fault_code())
-               __update_mmu_cache(vma->vm_mm->context & TAG_CONTEXT_BITS,
-                                  address, pte, get_thread_fault_code());
 }
 
 void flush_dcache_page(struct page *page)
 {
-       struct address_space *mapping = page_mapping(page);
-       int dirty = test_bit(PG_dcache_dirty, &page->flags);
-       int dirty_cpu = dcache_dirty_cpu(page);
-       int this_cpu = get_cpu();
+       struct address_space *mapping;
+       int this_cpu;
+
+       /* Do not bother with the expensive D-cache flush if it
+        * is merely the zero page.  The 'bigcore' testcase in GDB
+        * causes this case to run millions of times.
+        */
+       if (page == ZERO_PAGE(0))
+               return;
 
+       this_cpu = get_cpu();
+
+       mapping = page_mapping(page);
        if (mapping && !mapping_mapped(mapping)) {
+               int dirty = test_bit(PG_dcache_dirty, &page->flags);
                if (dirty) {
+                       int dirty_cpu = dcache_dirty_cpu(page);
+
                        if (dirty_cpu == this_cpu)
                                goto out;
                        smp_flush_dcache_page_impl(page, dirty_cpu);
@@ -252,88 +309,7 @@ out:
        put_cpu();
 }
 
-/* When shared+writable mmaps of files go away, we lose all dirty
- * page state, so we have to deal with D-cache aliasing here.
- *
- * This code relies on the fact that flush_cache_range() is always
- * called for an area composed by a single VMA.  It also assumes that
- * the MM's page_table_lock is held.
- */
-static inline void flush_cache_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long address, unsigned long size)
-{
-       unsigned long offset;
-       pte_t *ptep;
-
-       if (pmd_none(*pmd))
-               return;
-       ptep = pte_offset_map(pmd, address);
-       offset = address & ~PMD_MASK;
-       if (offset + size > PMD_SIZE)
-               size = PMD_SIZE - offset;
-       size &= PAGE_MASK;
-       for (offset = 0; offset < size; ptep++, offset += PAGE_SIZE) {
-               pte_t pte = *ptep;
-
-               if (pte_none(pte))
-                       continue;
-
-               if (pte_present(pte) && pte_dirty(pte)) {
-                       struct page *page;
-                       unsigned long pgaddr, uaddr;
-                       unsigned long pfn = pte_pfn(pte);
-
-                       if (!pfn_valid(pfn))
-                               continue;
-                       page = pfn_to_page(pfn);
-                       if (PageReserved(page) || !page_mapping(page))
-                               continue;
-                       pgaddr = (unsigned long) page_address(page);
-                       uaddr = address + offset;
-                       if ((pgaddr ^ uaddr) & (1 << 13))
-                               flush_dcache_page_all(mm, page);
-               }
-       }
-       pte_unmap(ptep - 1);
-}
-
-static inline void flush_cache_pmd_range(struct mm_struct *mm, pgd_t *dir, unsigned long address, unsigned long size)
-{
-       pmd_t *pmd;
-       unsigned long end;
-
-       if (pgd_none(*dir))
-               return;
-       pmd = pmd_offset(dir, address);
-       end = address + size;
-       if (end > ((address + PGDIR_SIZE) & PGDIR_MASK))
-               end = ((address + PGDIR_SIZE) & PGDIR_MASK);
-       do {
-               flush_cache_pte_range(mm, pmd, address, end - address);
-               address = (address + PMD_SIZE) & PMD_MASK;
-               pmd++;
-       } while (address < end);
-}
-
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
-{
-       struct mm_struct *mm = vma->vm_mm;
-       pgd_t *dir = pgd_offset(mm, start);
-
-       if (mm == current->mm)
-               flushw_user();
-
-       if (vma->vm_file == NULL ||
-           ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)))
-               return;
-
-       do {
-               flush_cache_pmd_range(mm, dir, start, end - start);
-               start = (start + PGDIR_SIZE) & PGDIR_MASK;
-               dir++;
-       } while (start && (start < end));
-}
-
-void flush_icache_range(unsigned long start, unsigned long end)
+void __kprobes flush_icache_range(unsigned long start, unsigned long end)
 {
        /* Cheetah has coherent I-cache. */
        if (tlb_type == spitfire) {
@@ -363,9 +339,6 @@ void show_mem(void)
        printk("%ld pages of RAM\n", num_physpages);
        printk("%d free pages\n", nr_free_pages());
        printk("%d pages in page table cache\n",pgtable_cache_size);
-#ifndef CONFIG_SMP
-       printk("%d entries in page dir cache\n",pgd_cache_size);
-#endif 
 }
 
 void mmu_info(struct seq_file *m)
@@ -395,6 +368,11 @@ struct linux_prom_translation {
        unsigned long data;
 };
 
+/* Exported for kernel TLB miss handling in ktlb.S */
+struct linux_prom_translation prom_trans[512] __read_mostly;
+unsigned int prom_trans_ents __read_mostly;
+unsigned int swapper_pgd_zero __read_mostly;
+
 extern unsigned long prom_boot_page;
 extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
 extern int prom_get_mmu_ihandle(void);
@@ -403,282 +381,162 @@ extern void register_prom_callbacks(void);
 /* Exported for SMP bootup purposes. */
 unsigned long kern_locked_tte_data;
 
-void __init early_pgtable_allocfail(char *type)
-{
-       prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
-       prom_halt();
-}
-
-#define BASE_PAGE_SIZE 8192
-static pmd_t *prompmd;
-
 /*
  * Translate PROM's mapping we capture at boot time into physical address.
  * The second parameter is only set from prom_callback() invocations.
  */
 unsigned long prom_virt_to_phys(unsigned long promva, int *error)
 {
-       pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
-       pte_t *ptep;
-       unsigned long base;
-
-       if (pmd_none(*pmdp)) {
-               if (error)
-                       *error = 1;
-               return(0);
-       }
-       ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
-       if (!pte_present(*ptep)) {
-               if (error)
-                       *error = 1;
-               return(0);
-       }
-       if (error) {
-               *error = 0;
-               return(pte_val(*ptep));
+       int i;
+
+       for (i = 0; i < prom_trans_ents; i++) {
+               struct linux_prom_translation *p = &prom_trans[i];
+
+               if (promva >= p->virt &&
+                   promva < (p->virt + p->size)) {
+                       unsigned long base = p->data & _PAGE_PADDR;
+
+                       if (error)
+                               *error = 0;
+                       return base + (promva & (8192 - 1));
+               }
        }
-       base = pte_val(*ptep) & _PAGE_PADDR;
-       return(base + (promva & (BASE_PAGE_SIZE - 1)));
+       if (error)
+               *error = 1;
+       return 0UL;
 }
 
-static void inherit_prom_mappings(void)
+/* The obp translations are saved based on 8k pagesize, since obp can
+ * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
+ * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte
+ * scheme (also, see rant in inherit_locked_prom_mappings()).
+ */
+static inline int in_obp_range(unsigned long vaddr)
 {
-       struct linux_prom_translation *trans;
-       unsigned long phys_page, tte_vaddr, tte_data;
-       void (*remap_func)(unsigned long, unsigned long, int);
-       pmd_t *pmdp;
-       pte_t *ptep;
-       int node, n, i, tsz;
-       extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
+       return (vaddr >= LOW_OBP_ADDRESS &&
+               vaddr < HI_OBP_ADDRESS);
+}
+
+static int cmp_ptrans(const void *a, const void *b)
+{
+       const struct linux_prom_translation *x = a, *y = b;
+
+       if (x->virt > y->virt)
+               return 1;
+       if (x->virt < y->virt)
+               return -1;
+       return 0;
+}
+
+/* Read OBP translations property into 'prom_trans[]'.  */
+static void __init read_obp_translations(void)
+{
+       int n, node, ents, first, last, i;
 
        node = prom_finddevice("/virtual-memory");
        n = prom_getproplen(node, "translations");
-       if (n == 0 || n == -1) {
-               prom_printf("Couldn't get translation property\n");
+       if (unlikely(n == 0 || n == -1)) {
+               prom_printf("prom_mappings: Couldn't get size.\n");
                prom_halt();
        }
-       n += 5 * sizeof(struct linux_prom_translation);
-       for (tsz = 1; tsz < n; tsz <<= 1)
-               /* empty */;
-       trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
-       if (trans == NULL) {
-               prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
+       if (unlikely(n > sizeof(prom_trans))) {
+               prom_printf("prom_mappings: Size %Zd is too big.\n", n);
                prom_halt();
        }
-       memset(trans, 0, tsz);
 
-       if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
-               prom_printf("Couldn't get translation property\n");
+       if ((n = prom_getproperty(node, "translations",
+                                 (char *)&prom_trans[0],
+                                 sizeof(prom_trans))) == -1) {
+               prom_printf("prom_mappings: Couldn't get property.\n");
                prom_halt();
        }
-       n = n / sizeof(*trans);
 
-       /*
-        * The obp translations are saved based on 8k pagesize, since obp can use
-        * a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000, ie obp 
-        * range, are handled in entry.S and do not use the vpte scheme (see rant
-        * in inherit_locked_prom_mappings()).
-        */
-#define OBP_PMD_SIZE 2048
-       prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
-       if (prompmd == NULL)
-               early_pgtable_allocfail("pmd");
-       memset(prompmd, 0, OBP_PMD_SIZE);
-       for (i = 0; i < n; i++) {
-               unsigned long vaddr;
-
-               if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
-                       for (vaddr = trans[i].virt;
-                            ((vaddr < trans[i].virt + trans[i].size) && 
-                            (vaddr < HI_OBP_ADDRESS));
-                            vaddr += BASE_PAGE_SIZE) {
-                               unsigned long val;
-
-                               pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
-                               if (pmd_none(*pmdp)) {
-                                       ptep = __alloc_bootmem(BASE_PAGE_SIZE,
-                                                              BASE_PAGE_SIZE,
-                                                              bootmap_base);
-                                       if (ptep == NULL)
-                                               early_pgtable_allocfail("pte");
-                                       memset(ptep, 0, BASE_PAGE_SIZE);
-                                       pmd_set(pmdp, ptep);
-                               }
-                               ptep = (pte_t *)__pmd_page(*pmdp) +
-                                               ((vaddr >> 13) & 0x3ff);
+       n = n / sizeof(struct linux_prom_translation);
 
-                               val = trans[i].data;
+       ents = n;
 
-                               /* Clear diag TTE bits. */
-                               if (tlb_type == spitfire)
-                                       val &= ~0x0003fe0000000000UL;
+       sort(prom_trans, ents, sizeof(struct linux_prom_translation),
+            cmp_ptrans, NULL);
 
-                               set_pte (ptep, __pte(val | _PAGE_MODIFIED));
-                               trans[i].data += BASE_PAGE_SIZE;
-                       }
-               }
+       /* Now kick out all the non-OBP entries.  */
+       for (i = 0; i < ents; i++) {
+               if (in_obp_range(prom_trans[i].virt))
+                       break;
        }
-       phys_page = __pa(prompmd);
-       obp_iaddr_patch[0] |= (phys_page >> 10);
-       obp_iaddr_patch[1] |= (phys_page & 0x3ff);
-       flushi((long)&obp_iaddr_patch[0]);
-       obp_daddr_patch[0] |= (phys_page >> 10);
-       obp_daddr_patch[1] |= (phys_page & 0x3ff);
-       flushi((long)&obp_daddr_patch[0]);
+       first = i;
+       for (; i < ents; i++) {
+               if (!in_obp_range(prom_trans[i].virt))
+                       break;
+       }
+       last = i;
 
-       /* Now fixup OBP's idea about where we really are mapped. */
-       prom_printf("Remapping the kernel... ");
+       for (i = 0; i < (last - first); i++) {
+               struct linux_prom_translation *src = &prom_trans[i + first];
+               struct linux_prom_translation *dest = &prom_trans[i];
 
-       /* Spitfire Errata #32 workaround */
-       __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
-                            "flush     %%g6"
-                            : /* No outputs */
-                            : "r" (0),
-                            "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-       switch (tlb_type) {
-       default:
-       case spitfire:
-               phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
-               break;
-
-       case cheetah:
-       case cheetah_plus:
-               phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
-               break;
-       };
-
-       phys_page &= _PAGE_PADDR;
-       phys_page += ((unsigned long)&prom_boot_page -
-                     (unsigned long)KERNBASE);
+               *dest = *src;
+       }
+       for (; i < ents; i++) {
+               struct linux_prom_translation *dest = &prom_trans[i];
+               dest->virt = dest->size = dest->data = 0x0UL;
+       }
+
+       prom_trans_ents = last - first;
 
        if (tlb_type == spitfire) {
-               /* Lock this into i/d tlb entry 59 */
-               __asm__ __volatile__(
-                       "stxa   %%g0, [%2] %3\n\t"
-                       "stxa   %0, [%1] %4\n\t"
-                       "membar #Sync\n\t"
-                       "flush  %%g6\n\t"
-                       "stxa   %%g0, [%2] %5\n\t"
-                       "stxa   %0, [%1] %6\n\t"
-                       "membar #Sync\n\t"
-                       "flush  %%g6"
-                       : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
-                                _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
-                       "r" (59 << 3), "r" (TLB_TAG_ACCESS),
-                       "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
-                       "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
-                       : "memory");
-       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-               /* Lock this into i/d tlb-0 entry 11 */
-               __asm__ __volatile__(
-                       "stxa   %%g0, [%2] %3\n\t"
-                       "stxa   %0, [%1] %4\n\t"
-                       "membar #Sync\n\t"
-                       "flush  %%g6\n\t"
-                       "stxa   %%g0, [%2] %5\n\t"
-                       "stxa   %0, [%1] %6\n\t"
-                       "membar #Sync\n\t"
-                       "flush  %%g6"
-                       : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
-                                _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
-                       "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
-                       "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
-                       "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
-                       : "memory");
-       } else {
-               /* Implement me :-) */
-               BUG();
+               /* Clear diag TTE bits. */
+               for (i = 0; i < prom_trans_ents; i++)
+                       prom_trans[i].data &= ~0x0003fe0000000000UL;
        }
+}
 
-       tte_vaddr = (unsigned long) KERNBASE;
-
-       /* Spitfire Errata #32 workaround */
-       __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
-                            "flush     %%g6"
-                            : /* No outputs */
-                            : "r" (0),
-                            "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+static void __init remap_kernel(void)
+{
+       unsigned long phys_page, tte_vaddr, tte_data;
+       int tlb_ent = sparc64_highest_locked_tlbent();
 
-       if (tlb_type == spitfire)
-               tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
-       else
-               tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+       tte_vaddr = (unsigned long) KERNBASE;
+       phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
+                                _PAGE_CP | _PAGE_CV | _PAGE_P |
+                                _PAGE_L | _PAGE_W));
 
        kern_locked_tte_data = tte_data;
 
-       remap_func = (void *)  ((unsigned long) &prom_remap -
-                               (unsigned long) &prom_boot_page);
-
-
-       /* Spitfire Errata #32 workaround */
-       __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
-                            "flush     %%g6"
-                            : /* No outputs */
-                            : "r" (0),
-                            "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
-       remap_func((tlb_type == spitfire ?
-                   (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
-                   (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
-                  (unsigned long) KERNBASE,
-                  prom_get_mmu_ihandle());
-
-       if (bigkernel)
-               remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
-                       (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
-
-       /* Flush out that temporary mapping. */
-       spitfire_flush_dtlb_nucleus_page(0x0);
-       spitfire_flush_itlb_nucleus_page(0x0);
-
-       /* Now lock us back into the TLBs via OBP. */
-       prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
-       prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+       /* Now lock us into the TLBs via OBP. */
+       prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
+       prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
        if (bigkernel) {
-               prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
-                                                               tte_vaddr + 0x400000);
-               prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000, 
-                                                               tte_vaddr + 0x400000);
+               tlb_ent -= 1;
+               prom_dtlb_load(tlb_ent,
+                              tte_data + 0x400000, 
+                              tte_vaddr + 0x400000);
+               prom_itlb_load(tlb_ent,
+                              tte_data + 0x400000, 
+                              tte_vaddr + 0x400000);
        }
-
-       /* Re-read translations property. */
-       if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
-               prom_printf("Couldn't get translation property\n");
-               prom_halt();
+       sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+       if (tlb_type == cheetah_plus) {
+               sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+                                           CTX_CHEETAH_PLUS_NUC);
+               sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+               sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
        }
-       n = n / sizeof(*trans);
-
-       for (i = 0; i < n; i++) {
-               unsigned long vaddr = trans[i].virt;
-               unsigned long size = trans[i].size;
-
-               if (vaddr < 0xf0000000UL) {
-                       unsigned long avoid_start = (unsigned long) KERNBASE;
-                       unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
+}
 
-                       if (bigkernel)
-                               avoid_end += (4 * 1024 * 1024);
-                       if (vaddr < avoid_start) {
-                               unsigned long top = vaddr + size;
 
-                               if (top > avoid_start)
-                                       top = avoid_start;
-                               prom_unmap(top - vaddr, vaddr);
-                       }
-                       if ((vaddr + size) > avoid_end) {
-                               unsigned long bottom = vaddr;
-
-                               if (bottom < avoid_end)
-                                       bottom = avoid_end;
-                               prom_unmap((vaddr + size) - bottom, bottom);
-                       }
-               }
-       }
+static void __init inherit_prom_mappings(void)
+{
+       read_obp_translations();
 
+       /* Now fixup OBP's idea about where we really are mapped. */
+       prom_printf("Remapping the kernel... ");
+       remap_kernel();
        prom_printf("done.\n");
 
+       prom_printf("Registering callbacks... ");
        register_prom_callbacks();
+       prom_printf("done.\n");
 }
 
 /* The OBP specifications for sun4u mark 0xfffffffc00000000 and
@@ -698,6 +556,9 @@ static void __flush_nucleus_vptes(void)
                        unsigned long tag;
 
                        /* Spitfire Errata #32 workaround */
+                       /* NOTE: Always runs on spitfire, so no cheetah+
+                        *       page size encodings.
+                        */
                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                             "flush     %%g6"
                                             : /* No outputs */
@@ -859,11 +720,14 @@ void inherit_locked_prom_mappings(int save_p)
                }
        }
        if (tlb_type == spitfire) {
-               int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
-               for (i = 0; i < high; i++) {
+               int high = sparc64_highest_unlocked_tlb_ent;
+               for (i = 0; i <= high; i++) {
                        unsigned long data;
 
                        /* Spitfire Errata #32 workaround */
+                       /* NOTE: Always runs on spitfire, so no cheetah+
+                        *       page size encodings.
+                        */
                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                             "flush     %%g6"
                                             : /* No outputs */
@@ -875,6 +739,9 @@ void inherit_locked_prom_mappings(int save_p)
                                unsigned long tag;
 
                                /* Spitfire Errata #32 workaround */
+                               /* NOTE: Always runs on spitfire, so no
+                                *       cheetah+ page size encodings.
+                                */
                                __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                                     "flush     %%g6"
                                                     : /* No outputs */
@@ -902,6 +769,9 @@ void inherit_locked_prom_mappings(int save_p)
                        unsigned long data;
 
                        /* Spitfire Errata #32 workaround */
+                       /* NOTE: Always runs on spitfire, so no
+                        *       cheetah+ page size encodings.
+                        */
                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                             "flush     %%g6"
                                             : /* No outputs */
@@ -913,6 +783,9 @@ void inherit_locked_prom_mappings(int save_p)
                                unsigned long tag;
 
                                /* Spitfire Errata #32 workaround */
+                               /* NOTE: Always runs on spitfire, so no
+                                *       cheetah+ page size encodings.
+                                */
                                __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                                     "flush     %%g6"
                                                     : /* No outputs */
@@ -936,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p)
                        }
                }
        } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-               int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+               int high = sparc64_highest_unlocked_tlb_ent;
 
-               for (i = 0; i < high; i++) {
+               for (i = 0; i <= high; i++) {
                        unsigned long data;
 
                        data = cheetah_get_ldtlb_data(i);
@@ -1028,6 +901,7 @@ void prom_reload_locked(void)
        }
 }
 
+#ifdef DCACHE_ALIASING_POSSIBLE
 void __flush_dcache_range(unsigned long start, unsigned long end)
 {
        unsigned long va;
@@ -1051,6 +925,7 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
                                               "i" (ASI_DCACHE_INVALIDATE));
        }
 }
+#endif /* DCACHE_ALIASING_POSSIBLE */
 
 /* If not locked, zap it. */
 void __flush_tlb_all(void)
@@ -1066,6 +941,9 @@ void __flush_tlb_all(void)
        if (tlb_type == spitfire) {
                for (i = 0; i < 64; i++) {
                        /* Spitfire Errata #32 workaround */
+                       /* NOTE: Always runs on spitfire, so no
+                        *       cheetah+ page size encodings.
+                        */
                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                             "flush     %%g6"
                                             : /* No outputs */
@@ -1081,6 +959,9 @@ void __flush_tlb_all(void)
                        }
 
                        /* Spitfire Errata #32 workaround */
+                       /* NOTE: Always runs on spitfire, so no
+                        *       cheetah+ page size encodings.
+                        */
                        __asm__ __volatile__("stxa      %0, [%1] %2\n\t"
                                             "flush     %%g6"
                                             : /* No outputs */
@@ -1114,11 +995,14 @@ void __flush_tlb_all(void)
 void get_new_mmu_context(struct mm_struct *mm)
 {
        unsigned long ctx, new_ctx;
+       unsigned long orig_pgsz_bits;
        
+
        spin_lock(&ctx_alloc_lock);
-       ctx = CTX_HWBITS(tlb_context_cache + 1);
-       new_ctx = find_next_zero_bit(mmu_context_bmap, 1UL << CTX_VERSION_SHIFT, ctx);
-       if (new_ctx >= (1UL << CTX_VERSION_SHIFT)) {
+       orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
+       ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
+       new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+       if (new_ctx >= (1 << CTX_NR_BITS)) {
                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
                if (new_ctx >= ctx) {
                        int i;
@@ -1147,9 +1031,8 @@ void get_new_mmu_context(struct mm_struct *mm)
        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
 out:
        tlb_context_cache = new_ctx;
+       mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
        spin_unlock(&ctx_alloc_lock);
-
-       mm->context = new_ctx;
 }
 
 #ifndef CONFIG_SMP
@@ -1168,7 +1051,7 @@ struct pgtable_cache_struct pgt_quicklists;
  * using the later address range, accesses with the first address
  * range will see the newly initialized data rather than the garbage.
  */
-#if (L1DCACHE_SIZE > PAGE_SIZE)                        /* is there D$ aliasing problem */
+#ifdef DCACHE_ALIASING_POSSIBLE
 #define DC_ALIAS_SHIFT 1
 #else
 #define DC_ALIAS_SHIFT 0
@@ -1192,7 +1075,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
                unsigned long paddr;
                pte_t *pte;
 
-#if (L1DCACHE_SIZE > PAGE_SIZE)                        /* is there D$ aliasing problem */
+#ifdef DCACHE_ALIASING_POSSIBLE
                set_page_count(page, 1);
                ClearPageCompound(page);
 
@@ -1210,7 +1093,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
                        to_free = (unsigned long *) paddr;
                }
 
-#if (L1DCACHE_SIZE > PAGE_SIZE)                        /* is there D$ aliasing problem */
+#ifdef DCACHE_ALIASING_POSSIBLE
                /* Now free the other one up, adjust cache size. */
                preempt_disable();
                *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
@@ -1321,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
        int i;
 
 #ifdef CONFIG_DEBUG_BOOTMEM
-       prom_printf("bootmem_init: Scan sp_banks, ");
+       prom_printf("bootmem_init: Scan pavail, ");
 #endif
 
        bytes_avail = 0UL;
-       for (i = 0; sp_banks[i].num_bytes != 0; i++) {
-               end_of_phys_memory = sp_banks[i].base_addr +
-                       sp_banks[i].num_bytes;
-               bytes_avail += sp_banks[i].num_bytes;
+       for (i = 0; i < pavail_ents; i++) {
+               end_of_phys_memory = pavail[i].phys_addr +
+                       pavail[i].reg_size;
+               bytes_avail += pavail[i].reg_size;
                if (cmdline_memory_size) {
                        if (bytes_avail > cmdline_memory_size) {
                                unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1336,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                                bytes_avail -= slack;
                                end_of_phys_memory -= slack;
 
-                               sp_banks[i].num_bytes -= slack;
-                               if (sp_banks[i].num_bytes == 0) {
-                                       sp_banks[i].base_addr = 0xdeadbeef;
+                               pavail[i].reg_size -= slack;
+                               if ((long)pavail[i].reg_size <= 0L) {
+                                       pavail[i].phys_addr = 0xdeadbeefUL;
+                                       pavail[i].reg_size = 0UL;
+                                       pavail_ents = i;
                                } else {
-                                       sp_banks[i+1].num_bytes = 0;
-                                       sp_banks[i+1].base_addr = 0xdeadbeef;
+                                       pavail[i+1].reg_size = 0Ul;
+                                       pavail[i+1].phys_addr = 0xdeadbeefUL;
+                                       pavail_ents = i + 1;
                                }
                                break;
                        }
@@ -1362,10 +1248,11 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 
 #ifdef CONFIG_BLK_DEV_INITRD
        /* Now have to check initial ramdisk, so that bootmap does not overwrite it */
-       if (sparc_ramdisk_image) {
-               if (sparc_ramdisk_image >= (unsigned long)_end - 2 * PAGE_SIZE)
-                       sparc_ramdisk_image -= KERNBASE;
-               initrd_start = sparc_ramdisk_image + phys_base;
+       if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+               unsigned long ramdisk_image = sparc_ramdisk_image ?
+                       sparc_ramdisk_image : sparc_ramdisk_image64;
+               ramdisk_image -= KERNBASE;
+               initrd_start = ramdisk_image + phys_base;
                initrd_end = initrd_start + sparc_ramdisk_size;
                if (initrd_end > end_of_phys_memory) {
                        printk(KERN_CRIT "initrd extends beyond end of memory "
@@ -1390,17 +1277,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
 #endif
        bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
 
-       bootmap_base = bootmap_pfn << PAGE_SHIFT;
-
        /* Now register the available physical memory with the
         * allocator.
         */
-       for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+       for (i = 0; i < pavail_ents; i++) {
 #ifdef CONFIG_DEBUG_BOOTMEM
-               prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
-                           i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+               prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
+                           i, pavail[i].phys_addr, pavail[i].reg_size);
 #endif
-               free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+               free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
        }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -1408,6 +1293,10 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
                size = initrd_end - initrd_start;
 
                /* Resert the initrd image area. */
+#ifdef CONFIG_DEBUG_BOOTMEM
+               prom_printf("reserve_bootmem(initrd): base[%llx] size[%lx]\n",
+                       initrd_start, initrd_end);
+#endif
                reserve_bootmem(initrd_start, size);
                *pages_avail -= PAGE_ALIGN(size) >> PAGE_SHIFT;
 
@@ -1437,120 +1326,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
        return end_pfn;
 }
 
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
+{
+       unsigned long vstart = PAGE_OFFSET + pstart;
+       unsigned long vend = PAGE_OFFSET + pend;
+       unsigned long alloc_bytes = 0UL;
+
+       if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
+               prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
+                           vstart, vend);
+               prom_halt();
+       }
+
+       while (vstart < vend) {
+               unsigned long this_end, paddr = __pa(vstart);
+               pgd_t *pgd = pgd_offset_k(vstart);
+               pud_t *pud;
+               pmd_t *pmd;
+               pte_t *pte;
+
+               pud = pud_offset(pgd, vstart);
+               if (pud_none(*pud)) {
+                       pmd_t *new;
+
+                       new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+                       alloc_bytes += PAGE_SIZE;
+                       pud_populate(&init_mm, pud, new);
+               }
+
+               pmd = pmd_offset(pud, vstart);
+               if (!pmd_present(*pmd)) {
+                       pte_t *new;
+
+                       new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+                       alloc_bytes += PAGE_SIZE;
+                       pmd_populate_kernel(&init_mm, pmd, new);
+               }
+
+               pte = pte_offset_kernel(pmd, vstart);
+               this_end = (vstart + PMD_SIZE) & PMD_MASK;
+               if (this_end > vend)
+                       this_end = vend;
+
+               while (vstart < this_end) {
+                       pte_val(*pte) = (paddr | pgprot_val(prot));
+
+                       vstart += PAGE_SIZE;
+                       paddr += PAGE_SIZE;
+                       pte++;
+               }
+       }
+
+       return alloc_bytes;
+}
+
+static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+static int pall_ents __initdata;
+
+extern unsigned int kvmap_linear_patch[1];
+
+static void __init kernel_physical_mapping_init(void)
+{
+       unsigned long i, mem_alloced = 0UL;
+
+       read_obp_memory("reg", &pall[0], &pall_ents);
+
+       for (i = 0; i < pall_ents; i++) {
+               unsigned long phys_start, phys_end;
+
+               phys_start = pall[i].phys_addr;
+               phys_end = phys_start + pall[i].reg_size;
+               mem_alloced += kernel_map_range(phys_start, phys_end,
+                                               PAGE_KERNEL);
+       }
+
+       printk("Allocated %ld bytes for kernel page tables.\n",
+              mem_alloced);
+
+       kvmap_linear_patch[0] = 0x01000000; /* nop */
+       flushi(&kvmap_linear_patch[0]);
+
+       __flush_tlb_all();
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+       unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
+       unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+       kernel_map_range(phys_start, phys_end,
+                        (enable ? PAGE_KERNEL : __pgprot(0)));
+
+       /* we should perform an IPI and flush all tlbs,
+        * but that can deadlock->flush only current cpu.
+        */
+       __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
+                                PAGE_OFFSET + phys_end);
+}
+#endif
+
+unsigned long __init find_ecache_flush_span(unsigned long size)
+{
+       int i;
+
+       for (i = 0; i < pavail_ents; i++) {
+               if (pavail[i].reg_size >= size)
+                       return pavail[i].phys_addr;
+       }
+
+       return ~0UL;
+}
+
 /* paging_init() sets up the page tables */
 
 extern void cheetah_ecache_flush_init(void);
 
 static unsigned long last_valid_pfn;
+pgd_t swapper_pg_dir[2048];
 
 void __init paging_init(void)
 {
-       extern pmd_t swapper_pmd_dir[1024];
-       extern unsigned int sparc64_vpte_patchme1[1];
-       extern unsigned int sparc64_vpte_patchme2[1];
-       unsigned long alias_base = kern_base + PAGE_OFFSET;
-       unsigned long second_alias_page = 0;
-       unsigned long pt, flags, end_pfn, pages_avail;
-       unsigned long shift = alias_base - ((unsigned long)KERNBASE);
-       unsigned long real_end;
+       unsigned long end_pfn, pages_avail, shift;
+       unsigned long real_end, i;
+
+       /* Find available physical memory... */
+       read_obp_memory("available", &pavail[0], &pavail_ents);
+
+       phys_base = 0xffffffffffffffffUL;
+       for (i = 0; i < pavail_ents; i++)
+               phys_base = min(phys_base, pavail[i].phys_addr);
+
+       pfn_base = phys_base >> PAGE_SHIFT;
+
+       kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+       kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
        set_bit(0, mmu_context_bmap);
 
+       shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+
        real_end = (unsigned long)_end;
        if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
                bigkernel = 1;
-#ifdef CONFIG_BLK_DEV_INITRD
-       if (sparc_ramdisk_image)
-               real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
-#endif
-
-       /* We assume physical memory starts at some 4mb multiple,
-        * if this were not true we wouldn't boot up to this point
-        * anyways.
-        */
-       pt  = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
-       pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
-       local_irq_save(flags);
-       if (tlb_type == spitfire) {
-               __asm__ __volatile__(
-       "       stxa    %1, [%0] %3\n"
-       "       stxa    %2, [%5] %4\n"
-       "       membar  #Sync\n"
-       "       flush   %%g6\n"
-       "       nop\n"
-       "       nop\n"
-       "       nop\n"
-               : /* No outputs */
-               : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
-                 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
-               : "memory");
-               if (real_end >= KERNBASE + 0x340000) {
-                       second_alias_page = alias_base + 0x400000;
-                       __asm__ __volatile__(
-               "       stxa    %1, [%0] %3\n"
-               "       stxa    %2, [%5] %4\n"
-               "       membar  #Sync\n"
-               "       flush   %%g6\n"
-               "       nop\n"
-               "       nop\n"
-               "       nop\n"
-                       : /* No outputs */
-                       : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
-                         "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
-                       : "memory");
-               }
-       } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
-               __asm__ __volatile__(
-       "       stxa    %1, [%0] %3\n"
-       "       stxa    %2, [%5] %4\n"
-       "       membar  #Sync\n"
-       "       flush   %%g6\n"
-       "       nop\n"
-       "       nop\n"
-       "       nop\n"
-               : /* No outputs */
-               : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
-                 "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
-               : "memory");
-               if (real_end >= KERNBASE + 0x340000) {
-                       second_alias_page = alias_base + 0x400000;
-                       __asm__ __volatile__(
-               "       stxa    %1, [%0] %3\n"
-               "       stxa    %2, [%5] %4\n"
-               "       membar  #Sync\n"
-               "       flush   %%g6\n"
-               "       nop\n"
-               "       nop\n"
-               "       nop\n"
-                       : /* No outputs */
-                       : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
-                         "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
-                       : "memory");
-               }
+       if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
+               prom_printf("paging_init: Kernel > 8MB, too large.\n");
+               prom_halt();
        }
-       local_irq_restore(flags);
-       
-       /* Now set kernel pgd to upper alias so physical page computations
+
+       /* Set kernel pgd to upper alias so physical page computations
         * work.
         */
        init_mm.pgd += ((shift) / (sizeof(pgd_t)));
        
-       memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+       memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
 
        /* Now can init the kernel/bad page tables. */
-       pgd_set(&swapper_pg_dir[0], swapper_pmd_dir + (shift / sizeof(pgd_t)));
+       pud_set(pud_offset(&swapper_pg_dir[0], 0),
+               swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
        
-       sparc64_vpte_patchme1[0] |=
-               (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
-       sparc64_vpte_patchme2[0] |=
-               (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
-       flushi((long)&sparc64_vpte_patchme1[0]);
+       swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
        
-       /* Setup bootmem... */
-       pages_avail = 0;
-       last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
-
-       /* Inherit non-locked OBP mappings. */
        inherit_prom_mappings();
        
        /* Ok, we can use our TLB miss and window trap handlers safely.
@@ -1565,13 +1501,16 @@ void __init paging_init(void)
 
        inherit_locked_prom_mappings(1);
 
-       /* We only created DTLB mapping of this stuff. */
-       spitfire_flush_dtlb_nucleus_page(alias_base);
-       if (second_alias_page)
-               spitfire_flush_dtlb_nucleus_page(second_alias_page);
-
        __flush_tlb_all();
 
+       /* Setup bootmem... */
+       pages_avail = 0;
+       last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       kernel_physical_mapping_init();
+#endif
+
        {
                unsigned long zones_size[MAX_NR_ZONES];
                unsigned long zholes_size[MAX_NR_ZONES];
@@ -1585,136 +1524,42 @@ void __init paging_init(void)
                zones_size[ZONE_DMA] = npages;
                zholes_size[ZONE_DMA] = npages - pages_avail;
 
-               free_area_init_node(0, &contig_page_data, NULL, zones_size,
+               free_area_init_node(0, &contig_page_data, zones_size,
                                    phys_base >> PAGE_SHIFT, zholes_size);
-               mem_map = contig_page_data.node_mem_map;
        }
 
        device_scan();
 }
 
-/* Ok, it seems that the prom can allocate some more memory chunks
- * as a side effect of some prom calls we perform during the
- * boot sequence.  My most likely theory is that it is from the
- * prom_set_traptable() call, and OBP is allocating a scratchpad
- * for saving client program register state etc.
- */
-static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
-{
-       int swapi = 0;
-       int i, mitr;
-       unsigned long tmpaddr, tmpsize;
-       unsigned long lowest;
-
-       for (i = 0; thislist[i].theres_more != 0; i++) {
-               lowest = thislist[i].start_adr;
-               for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
-                       if (thislist[mitr].start_adr < lowest) {
-                               lowest = thislist[mitr].start_adr;
-                               swapi = mitr;
-                       }
-               if (lowest == thislist[i].start_adr)
-                       continue;
-               tmpaddr = thislist[swapi].start_adr;
-               tmpsize = thislist[swapi].num_bytes;
-               for (mitr = swapi; mitr > i; mitr--) {
-                       thislist[mitr].start_adr = thislist[mitr-1].start_adr;
-                       thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
-               }
-               thislist[i].start_adr = tmpaddr;
-               thislist[i].num_bytes = tmpsize;
-       }
-}
-
-void __init rescan_sp_banks(void)
-{
-       struct linux_prom64_registers memlist[64];
-       struct linux_mlist_p1275 avail[64], *mlist;
-       unsigned long bytes, base_paddr;
-       int num_regs, node = prom_finddevice("/memory");
-       int i;
-
-       num_regs = prom_getproperty(node, "available",
-                                   (char *) memlist, sizeof(memlist));
-       num_regs = (num_regs / sizeof(struct linux_prom64_registers));
-       for (i = 0; i < num_regs; i++) {
-               avail[i].start_adr = memlist[i].phys_addr;
-               avail[i].num_bytes = memlist[i].reg_size;
-               avail[i].theres_more = &avail[i + 1];
-       }
-       avail[i - 1].theres_more = NULL;
-       sort_memlist(avail);
-
-       mlist = &avail[0];
-       i = 0;
-       bytes = mlist->num_bytes;
-       base_paddr = mlist->start_adr;
-  
-       sp_banks[0].base_addr = base_paddr;
-       sp_banks[0].num_bytes = bytes;
-
-       while (mlist->theres_more != NULL){
-               i++;
-               mlist = mlist->theres_more;
-               bytes = mlist->num_bytes;
-               if (i >= SPARC_PHYS_BANKS-1) {
-                       printk ("The machine has more banks than "
-                               "this kernel can support\n"
-                               "Increase the SPARC_PHYS_BANKS "
-                               "setting (currently %d)\n",
-                               SPARC_PHYS_BANKS);
-                       i = SPARC_PHYS_BANKS-1;
-                       break;
-               }
-    
-               sp_banks[i].base_addr = mlist->start_adr;
-               sp_banks[i].num_bytes = mlist->num_bytes;
-       }
-
-       i++;
-       sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
-       sp_banks[i].num_bytes = 0;
-
-       for (i = 0; sp_banks[i].num_bytes != 0; i++)
-               sp_banks[i].num_bytes &= PAGE_MASK;
-}
-
 static void __init taint_real_pages(void)
 {
-       struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
        int i;
 
-       for (i = 0; i < SPARC_PHYS_BANKS; i++) {
-               saved_sp_banks[i].base_addr =
-                       sp_banks[i].base_addr;
-               saved_sp_banks[i].num_bytes =
-                       sp_banks[i].num_bytes;
-       }
-
-       rescan_sp_banks();
+       read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
 
-       /* Find changes discovered in the sp_bank rescan and
+       /* Find changes discovered in the physmem available rescan and
         * reserve the lost portions in the bootmem maps.
         */
-       for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+       for (i = 0; i < pavail_ents; i++) {
                unsigned long old_start, old_end;
 
-               old_start = saved_sp_banks[i].base_addr;
+               old_start = pavail[i].phys_addr;
                old_end = old_start +
-                       saved_sp_banks[i].num_bytes;
+                       pavail[i].reg_size;
                while (old_start < old_end) {
                        int n;
 
-                       for (n = 0; sp_banks[n].num_bytes; n++) {
+                       for (n = 0; pavail_rescan_ents; n++) {
                                unsigned long new_start, new_end;
 
-                               new_start = sp_banks[n].base_addr;
-                               new_end = new_start + sp_banks[n].num_bytes;
+                               new_start = pavail_rescan[n].phys_addr;
+                               new_end = new_start +
+                                       pavail_rescan[n].reg_size;
 
                                if (new_start <= old_start &&
                                    new_end >= (old_start + PAGE_SIZE)) {
-                                       set_bit (old_start >> 22,
-                                                sparc64_valid_addr_bitmap);
+                                       set_bit(old_start >> 22,
+                                               sparc64_valid_addr_bitmap);
                                        goto do_next_page;
                                }
                        }
@@ -1734,8 +1579,7 @@ void __init mem_init(void)
 
        i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
        i += 1;
-       sparc64_valid_addr_bitmap = (unsigned long *)
-               __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
+       sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
        if (sparc64_valid_addr_bitmap == NULL) {
                prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
                prom_halt();
@@ -1763,13 +1607,12 @@ void __init mem_init(void)
         * Set up the zero page, mark it reserved, so that page count
         * is not manipulated when freeing the page from user ptes.
         */
-       mem_map_zero = alloc_pages(GFP_KERNEL, 0);
+       mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if (mem_map_zero == NULL) {
                prom_printf("paging_init: Cannot alloc zero page.\n");
                prom_halt();
        }
        SetPageReserved(mem_map_zero);
-       clear_page(page_address(mem_map_zero));
 
        codepages = (((unsigned long) _etext) - ((unsigned long) _start));
        codepages = PAGE_ALIGN(codepages) >> PAGE_SHIFT;
@@ -1778,22 +1621,6 @@ void __init mem_init(void)
        initpages = (((unsigned long) __init_end) - ((unsigned long) __init_begin));
        initpages = PAGE_ALIGN(initpages) >> PAGE_SHIFT;
 
-#ifndef CONFIG_SMP
-       {
-               /* Put empty_pg_dir on pgd_quicklist */
-               extern pgd_t empty_pg_dir[1024];
-               unsigned long addr = (unsigned long)empty_pg_dir;
-               unsigned long alias_base = kern_base + PAGE_OFFSET -
-                       (long)(KERNBASE);
-               
-               memset(empty_pg_dir, 0, sizeof(empty_pg_dir));
-               addr += alias_base;
-               free_pgd_fast((pgd_t *)addr);
-               num_physpages++;
-               totalram_pages++;
-       }
-#endif
-
        printk("Memory: %uk available (%ldk kernel code, %ldk data, %ldk init) [%016lx,%016lx]\n",
               nr_free_pages() << (PAGE_SHIFT-10),
               codepages << (PAGE_SHIFT-10),
@@ -1805,7 +1632,7 @@ void __init mem_init(void)
                cheetah_ecache_flush_init();
 }
 
-void free_initmem (void)
+void free_initmem(void)
 {
        unsigned long addr, initend;
 
@@ -1821,6 +1648,7 @@ void free_initmem (void)
                page = (addr +
                        ((unsigned long) __va(kern_base)) -
                        ((unsigned long) KERNBASE));
+               memset((void *)addr, 0xcc, PAGE_SIZE);
                p = virt_to_page(page);
 
                ClearPageReserved(p);