#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
+#include <linux/idr.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
-
-struct mmu_context_queue_t mmu_context_queue;
int mem_init_done;
unsigned long ioremap_bot = IMALLOC_BASE;
static unsigned long phbs_io_bot = PHBS_IO_BASE;
/* info on what we think the IO hole is */
unsigned long io_hole_start;
unsigned long io_hole_size;
-unsigned long top_of_ram;
void show_mem(void)
{
#ifdef CONFIG_PPC_ISERIES
-void *ioremap(unsigned long addr, unsigned long size)
+void __iomem *ioremap(unsigned long addr, unsigned long size)
{
- return (void *)addr;
+ return (void __iomem *)addr;
}
-extern void *__ioremap(unsigned long addr, unsigned long size,
+extern void __iomem *__ioremap(unsigned long addr, unsigned long size,
unsigned long flags)
{
- return (void *)addr;
+ return (void __iomem *)addr;
}
-void iounmap(void *addr)
+void iounmap(volatile void __iomem *addr)
{
return;
}
}
-static void * __ioremap_com(unsigned long addr, unsigned long pa,
+static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
unsigned long ea, unsigned long size,
unsigned long flags)
{
map_io_page(ea+i, pa+i, flags);
}
- return (void *) (ea + (addr & ~PAGE_MASK));
+ return (void __iomem *) (ea + (addr & ~PAGE_MASK));
}
-void *
+void __iomem *
ioremap(unsigned long addr, unsigned long size)
{
- void *ret = __ioremap(addr, size, _PAGE_NO_CACHE);
- if(mem_init_done)
- return eeh_ioremap(addr, ret); /* may remap the addr */
- return ret;
+ return __ioremap(addr, size, _PAGE_NO_CACHE);
}
-void *
+void __iomem *
__ioremap(unsigned long addr, unsigned long size, unsigned long flags)
{
unsigned long pa, ea;
*
* XXX what about calls before mem_init_done (ie python_countermeasures())
*/
-void iounmap(void *addr)
+void iounmap(volatile void __iomem *token)
{
unsigned long address, start, end, size;
struct mm_struct *mm;
pgd_t *dir;
+ void *addr;
if (!mem_init_done) {
return;
}
- /* addr could be in EEH or IO region, map it to IO region regardless.
- */
- addr = (void *) (IO_TOKEN_TO_ADDR(addr) & PAGE_MASK);
+ addr = (void *) ((unsigned long __force) token & PAGE_MASK);
if ((size = im_free(addr)) == 0) {
return;
return;
}
-int iounmap_explicit(void *addr, unsigned long size)
+static int iounmap_subset_regions(unsigned long addr, unsigned long size)
{
struct vm_struct *area;
+
+ /* Check whether subsets of this region exist */
+ area = im_get_area(addr, size, IM_REGION_SUPERSET);
+ if (area == NULL)
+ return 1;
+
+ while (area) {
+ iounmap((void __iomem *) area->addr);
+ area = im_get_area(addr, size,
+ IM_REGION_SUPERSET);
+ }
+
+ return 0;
+}
+
+int iounmap_explicit(volatile void __iomem *start, unsigned long size)
+{
+ struct vm_struct *area;
+ unsigned long addr;
+ int rc;
- /* addr could be in EEH or IO region, map it to IO region regardless.
- */
- addr = (void *) (IO_TOKEN_TO_ADDR(addr) & PAGE_MASK);
+ addr = (unsigned long __force) start & PAGE_MASK;
/* Verify that the region either exists or is a subset of an existing
* region. In the latter case, split the parent region to create
* the exact region
*/
- area = im_get_area((unsigned long) addr, size,
+ area = im_get_area(addr, size,
IM_REGION_EXISTS | IM_REGION_SUBSET);
if (area == NULL) {
- printk(KERN_ERR "%s() cannot unmap nonexistent range 0x%lx\n",
- __FUNCTION__, (unsigned long) addr);
- return 1;
+ /* Determine whether subset regions exist. If so, unmap */
+ rc = iounmap_subset_regions(addr, size);
+ if (rc) {
+ printk(KERN_ERR
+ "%s() cannot unmap nonexistent range 0x%lx\n",
+ __FUNCTION__, addr);
+ return 1;
+ }
+ } else {
+ iounmap((void __iomem *) area->addr);
}
-
+ /*
+ * FIXME! This can't be right:
iounmap(area->addr);
+ * Maybe it should be "iounmap(area);"
+ */
return 0;
}
}
#endif
+static spinlock_t mmu_context_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_IDR(mmu_context_idr);
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+ int index;
+ int err;
+
+again:
+ if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ spin_lock(&mmu_context_lock);
+ err = idr_get_new(&mmu_context_idr, NULL, &index);
+ spin_unlock(&mmu_context_lock);
+
+ if (err == -EAGAIN)
+ goto again;
+ else if (err)
+ return err;
+
+ if (index > MAX_CONTEXT) {
+ idr_remove(&mmu_context_idr, index);
+ return -ENOMEM;
+ }
+
+ mm->context.id = index;
+
+ return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+ spin_lock(&mmu_context_lock);
+ idr_remove(&mmu_context_idr, mm->context.id);
+ spin_unlock(&mmu_context_lock);
+
+ mm->context.id = NO_CONTEXT;
+}
+
+static int __init mmu_context_init(void)
+{
+ int index;
+
+ /* Reserve the first (invalid) context*/
+ idr_pre_get(&mmu_context_idr, GFP_KERNEL);
+ idr_get_new(&mmu_context_idr, NULL, &index);
+ BUG_ON(0 != index);
+
+ return 0;
+}
+arch_initcall(mmu_context_init);
+
/*
* Do very early mm setup.
*/
void __init mm_init_ppc64(void)
{
+#ifndef CONFIG_PPC_ISERIES
unsigned long i;
+#endif
ppc64_boot_msg(0x100, "MM Init");
- /* Reserve all contexts < FIRST_USER_CONTEXT for kernel use.
- * The range of contexts [FIRST_USER_CONTEXT, NUM_USER_CONTEXT)
- * are stored on a stack/queue for easy allocation and deallocation.
- */
- mmu_context_queue.lock = SPIN_LOCK_UNLOCKED;
- mmu_context_queue.head = 0;
- mmu_context_queue.tail = NUM_USER_CONTEXT-1;
- mmu_context_queue.size = NUM_USER_CONTEXT;
- for (i = 0; i < NUM_USER_CONTEXT; i++)
- mmu_context_queue.elements[i] = i + FIRST_USER_CONTEXT;
-
/* This is the story of the IO hole... please, keep seated,
* unfortunately, we are out of oxygen masks at the moment.
* So we need some rough way to tell where your big IO hole
* is. On pmac, it's between 2G and 4G, on POWER3, it's around
* that area as well, on POWER4 we don't have one, etc...
- * We need that to implement something approx. decent for
- * page_is_ram() so that /dev/mem doesn't map cacheable IO space
- * when XFree resquest some IO regions witout using O_SYNC, we
- * also need that as a "hint" when sizing the TCE table on POWER3
+ * We need that as a "hint" when sizing the TCE table on POWER3
* So far, the simplest way that seem work well enough for us it
* to just assume that the first discontinuity in our physical
* RAM layout is the IO hole. That may not be correct in the future
* (and isn't on iSeries but then we don't care ;)
*/
- top_of_ram = lmb_end_of_DRAM();
#ifndef CONFIG_PPC_ISERIES
for (i = 1; i < lmb.memory.cnt; i++) {
ppc64_boot_msg(0x100, "MM Init Done");
}
-
/*
* This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not
*/
-int page_is_ram(unsigned long physaddr)
+int page_is_ram(unsigned long pfn)
{
-#ifdef CONFIG_PPC_ISERIES
- return 1;
+ int i;
+ unsigned long paddr = (pfn << PAGE_SHIFT);
+
+ for (i=0; i < lmb.memory.cnt; i++) {
+ unsigned long base;
+
+#ifdef CONFIG_MSCHUNKS
+ base = lmb.memory.region[i].physbase;
+#else
+ base = lmb.memory.region[i].base;
#endif
- if (physaddr >= top_of_ram)
- return 0;
- return io_hole_start == 0 || physaddr < io_hole_start ||
- physaddr >= (io_hole_start + io_hole_size);
-}
+ if ((paddr >= base) &&
+ (paddr < (base + lmb.memory.region[i].size))) {
+ return 1;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL(page_is_ram);
/*
* Initialize the bootmem system and give it all the memory we
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
unsigned long total_ram = lmb_phys_mem_size();
+ unsigned long top_of_ram = lmb_end_of_DRAM();
printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
top_of_ram, total_ram);
zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT;
- free_area_init_node(0, &contig_page_data, NULL, zones_size,
+ free_area_init_node(0, &contig_page_data, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
mem_map = contig_page_data.node_mem_map;
}
void __init mem_init(void)
{
-#ifndef CONFIG_DISCONTIGMEM
- unsigned long addr;
+#ifdef CONFIG_DISCONTIGMEM
+ int nid;
#endif
- int codepages = 0;
- int datapages = 0;
- int initpages = 0;
+ pg_data_t *pgdat;
+ unsigned long i;
+ struct page *page;
+ unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
num_physpages = max_low_pfn; /* RAM is assumed contiguous */
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
#ifdef CONFIG_DISCONTIGMEM
-{
- int nid;
-
for (nid = 0; nid < numnodes; nid++) {
- if (node_data[nid].node_spanned_pages != 0) {
+ if (NODE_DATA(nid)->node_spanned_pages != 0) {
printk("freeing bootmem node %x\n", nid);
totalram_pages +=
free_all_bootmem_node(NODE_DATA(nid));
}
}
-
- printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
- (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
- codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
- initpages<< (PAGE_SHIFT-10),
- PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
-}
#else
max_mapnr = num_physpages;
-
totalram_pages += free_all_bootmem();
+#endif
- for (addr = KERNELBASE; addr < (unsigned long)__va(lmb_end_of_DRAM());
- addr += PAGE_SIZE) {
- if (!PageReserved(virt_to_page(addr)))
- continue;
- if (addr < (unsigned long)_etext)
- codepages++;
-
- else if (addr >= (unsigned long)__init_begin
- && addr < (unsigned long)__init_end)
- initpages++;
- else if (addr < klimit)
- datapages++;
+ for_each_pgdat(pgdat) {
+ for (i = 0; i < pgdat->node_spanned_pages; i++) {
+ page = pgdat->node_mem_map + i;
+ if (PageReserved(page))
+ reservedpages++;
+ }
}
- printk("Memory: %luk available (%dk kernel code, %dk data, %dk init) [%08lx,%08lx]\n",
- (unsigned long)nr_free_pages()<< (PAGE_SHIFT-10),
- codepages<< (PAGE_SHIFT-10), datapages<< (PAGE_SHIFT-10),
- initpages<< (PAGE_SHIFT-10),
- PAGE_OFFSET, (unsigned long)__va(lmb_end_of_DRAM()));
-#endif
+ codesize = (unsigned long)&_etext - (unsigned long)&_stext;
+ initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
+ datasize = (unsigned long)&_edata - (unsigned long)&__init_end;
+ bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
+ "%luk reserved, %luk data, %luk bss, %luk init)\n",
+ (unsigned long)nr_free_pages() << (PAGE_SHIFT-10),
+ num_physpages << (PAGE_SHIFT-10),
+ codesize >> 10,
+ reservedpages << (PAGE_SHIFT-10),
+ datasize >> 10,
+ bsssize >> 10,
+ initsize >> 10);
+
mem_init_done = 1;
#ifdef CONFIG_PPC_ISERIES