#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/proc_fs.h>
+#include <linux/bitops.h>
#include <asm/a.out.h>
-#include <asm/bitops.h>
#include <asm/dma.h>
#include <asm/ia32.h>
#include <asm/io.h>
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
vma->vm_flags = VM_DATA_DEFAULT_FLAGS | VM_GROWSUP;
- insert_vm_struct(current->mm, vma);
+ down_write(¤t->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+ up_write(¤t->mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, vma);
+ return;
+ }
+ up_write(¤t->mm->mmap_sem);
}
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO | VM_RESERVED;
- insert_vm_struct(current->mm, vma);
+ down_write(¤t->mm->mmap_sem);
+ if (insert_vm_struct(current->mm, vma)) {
+ up_write(¤t->mm->mmap_sem);
+ kmem_cache_free(vm_area_cachep, vma);
+ return;
+ }
+ up_write(¤t->mm->mmap_sem);
}
}
}
put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
{
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
spin_lock(&init_mm.page_table_lock);
{
- pmd = pmd_alloc(&init_mm, pgd, address);
+ pud = pud_alloc(&init_mm, pgd, address);
+ if (!pud)
+ goto out;
+
+ pmd = pmd_alloc(&init_mm, pud, address);
if (!pmd)
goto out;
pte = pte_alloc_map(&init_mm, pmd, address);
{
unsigned long psr, pta, impl_va_bits;
extern void __devinit tlb_init (void);
- int cpu;
#ifdef CONFIG_DISABLE_VHPT
# define VHPT_ENABLE_BIT 0
ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
ia64_srlz_d();
#endif
-
- cpu = smp_processor_id();
-
- /* mca handler uses cr.lid as key to pick the right entry */
- ia64_mca_tlb_list[cpu].cr_lid = ia64_getreg(_IA64_REG_CR_LID);
-
- /* insert this percpu data information into our list for MCA recovery purposes */
- ia64_mca_tlb_list[cpu].percpu_paddr = pte_val(mk_pte_phys(__pa(my_cpu_data), PAGE_KERNEL));
- /* Also save per-cpu tlb flush recipe for use in physical mode mca handler */
- ia64_mca_tlb_list[cpu].ptce_base = local_cpu_data->ptce_base;
- ia64_mca_tlb_list[cpu].ptce_count[0] = local_cpu_data->ptce_count[0];
- ia64_mca_tlb_list[cpu].ptce_count[1] = local_cpu_data->ptce_count[1];
- ia64_mca_tlb_list[cpu].ptce_stride[0] = local_cpu_data->ptce_stride[0];
- ia64_mca_tlb_list[cpu].ptce_stride[1] = local_cpu_data->ptce_stride[1];
}
#ifdef CONFIG_VIRTUAL_MEM_MAP
struct page *map_start, *map_end;
int node;
pgd_t *pgd;
+ pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset_k(address);
if (pgd_none(*pgd))
pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
- pmd = pmd_offset(pgd, address);
+ pud = pud_offset(pgd, address);
+
+ if (pud_none(*pud))
+ pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
+ pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
struct page *map_start, *map_end;
args = (struct memmap_init_callback_data *) arg;
-
map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);