vserver 1.9.5.x5
[linux-2.6.git] / arch / ia64 / mm / init.c
index 87a8016..df0f121 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/proc_fs.h>
+#include <linux/bitops.h>
 
 #include <asm/a.out.h>
-#include <asm/bitops.h>
 #include <asm/dma.h>
 #include <asm/ia32.h>
 #include <asm/io.h>
@@ -98,7 +98,7 @@ update_mmu_cache (struct vm_area_struct *vma, unsigned long vaddr, pte_t pte)
 inline void
 ia64_set_rbs_bot (void)
 {
-       unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
+       unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
 
        if (stack_size > MAX_USER_STACK_SIZE)
                stack_size = MAX_USER_STACK_SIZE;
@@ -131,7 +131,13 @@ ia64_init_addr_space (void)
                vma->vm_end = vma->vm_start + PAGE_SIZE;
                vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
                vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
-               insert_vm_struct(current->mm, vma);
+               down_write(&current->mm->mmap_sem);
+               if (insert_vm_struct(current->mm, vma)) {
+                       up_write(&current->mm->mmap_sem);
+                       kmem_cache_free(vm_area_cachep, vma);
+                       return;
+               }
+               up_write(&current->mm->mmap_sem);
        }
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
@@ -143,7 +149,13 @@ ia64_init_addr_space (void)
                        vma->vm_end = PAGE_SIZE;
                        vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
                        vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
-                       insert_vm_struct(current->mm, vma);
+                       down_write(&current->mm->mmap_sem);
+                       if (insert_vm_struct(current->mm, vma)) {
+                               up_write(&current->mm->mmap_sem);
+                               kmem_cache_free(vm_area_cachep, vma);
+                               return;
+                       }
+                       up_write(&current->mm->mmap_sem);
                }
        }
 }
@@ -225,6 +237,7 @@ struct page *
 put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
 {
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
@@ -236,7 +249,11 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
 
        spin_lock(&init_mm.page_table_lock);
        {
-               pmd = pmd_alloc(&init_mm, pgd, address);
+               pud = pud_alloc(&init_mm, pgd, address);
+               if (!pud)
+                       goto out;
+
+               pmd = pmd_alloc(&init_mm, pud, address);
                if (!pmd)
                        goto out;
                pte = pte_alloc_map(&init_mm, pmd, address);
@@ -279,7 +296,6 @@ ia64_mmu_init (void *my_cpu_data)
 {
        unsigned long psr, pta, impl_va_bits;
        extern void __devinit tlb_init (void);
-       int cpu;
 
 #ifdef CONFIG_DISABLE_VHPT
 #      define VHPT_ENABLE_BIT  0
@@ -344,20 +360,6 @@ ia64_mmu_init (void *my_cpu_data)
        ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
        ia64_srlz_d();
 #endif
-
-       cpu = smp_processor_id();
-
-       /* mca handler uses cr.lid as key to pick the right entry */
-       ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
-
-       /* insert this percpu data information into our list for MCA recovery purposes */
-       ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
-       /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
-       ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
-       ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
-       ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
-       ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
-       ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
 }
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
@@ -369,6 +371,7 @@ create_mem_map_page_table (u64 start, u64 end, void *arg)
        struct page *map_start, *map_end;
        int node;
        pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
@@ -383,7 +386,11 @@ create_mem_map_page_table (u64 start, u64 end, void *arg)
                pgd = pgd_offset_k(address);
                if (pgd_none(*pgd))
                        pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
-               pmd = pmd_offset(pgd, address);
+               pud = pud_offset(pgd, address);
+
+               if (pud_none(*pud))
+                       pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+               pmd = pmd_offset(pud, address);
 
                if (pmd_none(*pmd))
                        pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
@@ -410,7 +417,6 @@ virtual_memmap_init (u64 start, u64 end, void *arg)
        struct page *map_start, *map_end;
 
        args = (struct memmap_init_callback_data *) arg;
-
        map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
        map_end   = vmem_map + (__pa(end) >> PAGE_SHIFT);