#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
+#include <linux/nodemask.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
-#include <asm/naca.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
hash = hpt_hash(vpn, 0);
- hpteg = ((hash & htab_data.htab_hash_mask)*HPTES_PER_GROUP);
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Panic if a pte grpup is full */
if (ppc_md.hpte_insert(hpteg, va, pa >> PAGE_SHIFT, 0,
*/
;
} else {
- area = im_get_area(ea, size, IM_REGION_UNUSED|IM_REGION_SUBSET);
+ area = im_get_area(ea, size,
+ IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
if (area == NULL) {
- printk(KERN_ERR "could not obtain imalloc area for ea 0x%lx\n", ea);
+ /* Expected when PHB-dlpar is in play */
return 1;
}
if (ea != (unsigned long) area->addr) {
}
#endif
-static spinlock_t mmu_context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDR(mmu_context_idr);
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
int index;
int err;
+#ifdef CONFIG_HUGETLB_PAGE
+ /* We leave htlb_segs as it was, but for a fork, we need to
+ * clear the huge_pgdir. */
+ mm->context.huge_pgdir = NULL;
+#endif
+
again:
if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
return -ENOMEM;
spin_lock(&mmu_context_lock);
- err = idr_get_new(&mmu_context_idr, NULL, &index);
+ err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index);
spin_unlock(&mmu_context_lock);
if (err == -EAGAIN)
spin_unlock(&mmu_context_lock);
mm->context.id = NO_CONTEXT;
-}
-
-static int __init mmu_context_init(void)
-{
- int index;
- /* Reserve the first (invalid) context*/
- idr_pre_get(&mmu_context_idr, GFP_KERNEL);
- idr_get_new(&mmu_context_idr, NULL, &index);
- BUG_ON(0 != index);
-
- return 0;
+ hugetlb_mm_free_pgd(mm);
}
-arch_initcall(mmu_context_init);
/*
* Do very early mm setup.
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_DISCONTIGMEM
- for (nid = 0; nid < numnodes; nid++) {
+ for_each_online_node(nid) {
if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
totalram_pages +=
local_irq_restore(flags);
}
-void * reserve_phb_iospace(unsigned long size)
+void __iomem * reserve_phb_iospace(unsigned long size)
{
- void *virt_addr;
+ void __iomem *virt_addr;
if (phbs_io_bot >= IMALLOC_BASE)
panic("reserve_phb_iospace(): phb io space overflow\n");
- virt_addr = (void *) phbs_io_bot;
+ virt_addr = (void __iomem *) phbs_io_bot;
phbs_io_bot += size;
return virt_addr;