vserver 1.9.3
[linux-2.6.git] / arch / um / kernel / mem.c
index 84a895c..09add57 100644 (file)
@@ -1,74 +1,66 @@
 /* 
- * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
+ * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
  * Licensed under the GPL
  */
 
-#include "linux/config.h"
-#include "linux/module.h"
-#include "linux/types.h"
+#include "linux/stddef.h"
+#include "linux/kernel.h"
 #include "linux/mm.h"
-#include "linux/fs.h"
-#include "linux/init.h"
 #include "linux/bootmem.h"
 #include "linux/swap.h"
-#include "linux/slab.h"
-#include "linux/vmalloc.h"
 #include "linux/highmem.h"
+#include "linux/gfp.h"
 #include "asm/page.h"
-#include "asm/pgtable.h"
+#include "asm/fixmap.h"
 #include "asm/pgalloc.h"
-#include "asm/bitops.h"
-#include "asm/uaccess.h"
-#include "asm/tlb.h"
 #include "user_util.h"
 #include "kern_util.h"
-#include "mem_user.h"
-#include "mem.h"
 #include "kern.h"
-#include "init.h"
-#include "os.h"
-#include "mode_kern.h"
+#include "mem_user.h"
 #include "uml_uaccess.h"
+#include "os.h"
+
+extern char __binary_start;
 
 /* Changed during early boot */
-pgd_t swapper_pg_dir[1024];
-unsigned long high_physmem;
-unsigned long vm_start;
-unsigned long vm_end;
-unsigned long highmem;
 unsigned long *empty_zero_page = NULL;
 unsigned long *empty_bad_page = NULL;
-
-/* Not modified */
-const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
-
-extern char __init_begin, __init_end;
-extern long physmem_size;
-
-/* Not changed by UML */
-DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
-
-/* Changed during early boot */
+pgd_t swapper_pg_dir[1024];
+unsigned long highmem;
 int kmalloc_ok = 0;
 
-#define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
-struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
-#define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
-
-/* Changed during early boot */
 static unsigned long brk_end;
 
+void unmap_physmem(void)
+{
+       os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
+}
+
 static void map_cb(void *unused)
 {
        map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
 }
 
-void unmap_physmem(void)
+#ifdef CONFIG_HIGHMEM
+static void setup_highmem(unsigned long highmem_start,
+                         unsigned long highmem_len)
 {
-       os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
-}
+       struct page *page;
+       unsigned long highmem_pfn;
+       int i;
 
-extern char __binary_start;
+       highmem_start_page = virt_to_page(highmem_start);
+
+       highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
+       for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
+               page = &mem_map[highmem_pfn + i];
+               ClearPageReserved(page);
+               set_bit(PG_highmem, &page->flags);
+               set_page_count(page, 1);
+               __free_page(page);
+       }
+}
+#endif
 
 void mem_init(void)
 {
@@ -103,50 +95,15 @@ void mem_init(void)
        totalhigh_pages = highmem >> PAGE_SHIFT;
        totalram_pages += totalhigh_pages;
        num_physpages = totalram_pages;
-       max_mapnr = totalram_pages;
        max_pfn = totalram_pages;
        printk(KERN_INFO "Memory: %luk available\n", 
               (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
        kmalloc_ok = 1;
-}
-
-/* Changed during early boot */
-static unsigned long kmem_top = 0;
-
-unsigned long get_kmem_end(void)
-{
-       if(kmem_top == 0)
-               kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
-       return(kmem_top);
-}
-
-void set_kmem_end(unsigned long new)
-{
-       kmem_top = new;
-}
 
 #ifdef CONFIG_HIGHMEM
-/* Changed during early boot */
-pte_t *kmap_pte;
-pgprot_t kmap_prot;
-
-EXPORT_SYMBOL(kmap_prot);
-EXPORT_SYMBOL(kmap_pte);
-
-#define kmap_get_fixmap_pte(vaddr)                                     \
-       pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
-
-void __init kmap_init(void)
-{
-       unsigned long kmap_vstart;
-
-       /* cache the first kmap pte */
-       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
-       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
-
-       kmap_prot = PAGE_KERNEL;
+       setup_highmem(end_iomem, highmem);
+#endif
 }
-#endif /* CONFIG_HIGHMEM */
 
 static void __init fixrange_init(unsigned long start, unsigned long end, 
                                 pgd_t *pgd_base)
@@ -178,76 +135,24 @@ static void __init fixrange_init(unsigned long start, unsigned long end,
        }
 }
 
-int init_maps(struct mem_region *region)
-{
-       struct page *p, *map;
-       int i, n, len;
-
-       if(region == &physmem_region){
-               region->mem_map = mem_map;
-               return(0);
-       }
-       else if(region->mem_map != NULL) return(0);
-
-       n = region->len >> PAGE_SHIFT;
-       len = n * sizeof(struct page);
-       if(kmalloc_ok){
-               map = kmalloc(len, GFP_KERNEL);
-               if(map == NULL) map = vmalloc(len);
-       }
-       else map = alloc_bootmem_low_pages(len);
-
-       if(map == NULL)
-               return(-ENOMEM);
-       for(i = 0; i < n; i++){
-               p = &map[i];
-               set_page_count(p, 0);
-               SetPageReserved(p);
-               INIT_LIST_HEAD(&p->list);
-       }
-       region->mem_map = map;
-       return(0);
-}
+#if CONFIG_HIGHMEM
+pte_t *kmap_pte;
+pgprot_t kmap_prot;
 
-DECLARE_MUTEX(regions_sem);
+#define kmap_get_fixmap_pte(vaddr)                                     \
+       pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
 
-static int setup_one_range(int fd, char *driver, unsigned long start, 
-                          unsigned long pfn, int len, 
-                          struct mem_region *region)
+void __init kmap_init(void)
 {
-       int i;
-
-       down(&regions_sem);
-       for(i = 0; i < NREGIONS; i++){
-               if(regions[i] == NULL) break;           
-       }
-       if(i == NREGIONS){
-               printk("setup_range : no free regions\n");
-               i = -1;
-               goto out;
-       }
-
-       if(fd == -1)
-               fd = create_mem_file(len);
+       unsigned long kmap_vstart;
 
-       if(region == NULL){
-               region = alloc_bootmem_low_pages(sizeof(*region));
-               if(region == NULL)
-                       panic("Failed to allocating mem_region");
-       }
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
 
-       *region = ((struct mem_region) { .driver        = driver,
-                                        .start_pfn     = pfn,
-                                        .start         = start, 
-                                        .len           = len, 
-                                        .fd            = fd } );
-       regions[i] = region;
- out:
-       up(&regions_sem);
-       return(i);
+       kmap_prot = PAGE_KERNEL;
 }
 
-#ifdef CONFIG_HIGHMEM
 static void init_highmem(void)
 {
        pgd_t *pgd;
@@ -268,63 +173,20 @@ static void init_highmem(void)
 
        kmap_init();
 }
-
-void setup_highmem(unsigned long len)
-{
-       struct mem_region *region;
-       struct page *page, *map;
-       unsigned long phys;
-       int i, cur, index;
-
-       phys = physmem_size;
-       do {
-               cur = min(len, (unsigned long) REGION_SIZE);
-               i = setup_one_range(-1, NULL, -1, phys >> PAGE_SHIFT, cur, 
-                                   NULL);
-               if(i == -1){
-                       printk("setup_highmem - setup_one_range failed\n");
-                       return;
-               }
-               region = regions[i];
-               index = phys / PAGE_SIZE;
-               region->mem_map = &mem_map[index];
-
-               map = region->mem_map;
-               for(i = 0; i < (cur >> PAGE_SHIFT); i++){
-                       page = &map[i];
-                       ClearPageReserved(page);
-                       set_bit(PG_highmem, &page->flags);
-                       set_page_count(page, 1);
-                       __free_page(page);
-               }
-               phys += cur;
-               len -= cur;
-       } while(len > 0);
-}
-#endif
+#endif /* CONFIG_HIGHMEM */
 
 void paging_init(void)
 {
-       struct mem_region *region;
-       unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr;
-       int i, index;
+       unsigned long zones_size[MAX_NR_ZONES], vaddr;
+       int i;
 
        empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
        empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
        for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) 
                zones_size[i] = 0;
-       zones_size[0] = (high_physmem >> PAGE_SHIFT) - 
-               (uml_physmem >> PAGE_SHIFT);
+       zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT);
        zones_size[2] = highmem >> PAGE_SHIFT;
        free_area_init(zones_size);
-       start = phys_region_index(__pa(uml_physmem));
-       end = phys_region_index(__pa(high_physmem - 1));
-       for(i = start; i <= end; i++){
-               region = regions[i];
-               index = (region->start - uml_physmem) / PAGE_SIZE;
-               region->mem_map = &mem_map[index];
-               if(i > start) free_bootmem(__pa(region->start), region->len);
-       }
 
        /*
         * Fixed mappings, only the page table structure has to be
@@ -335,15 +197,33 @@ void paging_init(void)
 
 #ifdef CONFIG_HIGHMEM
        init_highmem();
-       setup_highmem(highmem);
 #endif
 }
 
-pte_t __bad_page(void)
+struct page *arch_validate(struct page *page, int mask, int order)
 {
-       clear_page(empty_bad_page);
-        return pte_mkdirty(mk_pte((struct page *) empty_bad_page, 
-                                 PAGE_SHARED));
+       unsigned long addr, zero = 0;
+       int i;
+
+ again:
+       if(page == NULL) return(page);
+       if(PageHighMem(page)) return(page);
+
+       addr = (unsigned long) page_address(page);
+       for(i = 0; i < (1 << order); i++){
+               current->thread.fault_addr = (void *) addr;
+               if(__do_copy_to_user((void *) addr, &zero,
+                                    sizeof(zero),
+                                    &current->thread.fault_addr,
+                                    &current->thread.fault_catcher)){
+                       if(!(mask & __GFP_WAIT)) return(NULL);
+                       else break;
+               }
+               addr += PAGE_SIZE;
+       }
+       if(i == (1 << order)) return(page);
+       page = alloc_pages(mask, order);
+       goto again;
 }
 
 /* This can't do anything because nothing in the kernel image can be freed
@@ -401,395 +281,6 @@ void show_mem(void)
         printk("%d pages swap cached\n", cached);
 }
 
-static int __init uml_mem_setup(char *line, int *add)
-{
-       char *retptr;
-       physmem_size = memparse(line,&retptr);
-       return 0;
-}
-__uml_setup("mem=", uml_mem_setup,
-"mem=<Amount of desired ram>\n"
-"    This controls how much \"physical\" memory the kernel allocates\n"
-"    for the system. The size is specified as a number followed by\n"
-"    one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
-"    This is not related to the amount of memory in the physical\n"
-"    machine. It can be more, and the excess, if it's ever used, will\n"
-"    just be swapped out.\n        Example: mem=64M\n\n"
-);
-
-struct page *arch_validate(struct page *page, int mask, int order)
-{
-       unsigned long addr, zero = 0;
-       int i;
-
- again:
-       if(page == NULL) return(page);
-       if(PageHighMem(page)) return(page);
-
-       addr = (unsigned long) page_address(page);
-       for(i = 0; i < (1 << order); i++){
-               current->thread.fault_addr = (void *) addr;
-               if(__do_copy_to_user((void *) addr, &zero, 
-                                    sizeof(zero),
-                                    &current->thread.fault_addr,
-                                    &current->thread.fault_catcher)){
-                       if(!(mask & __GFP_WAIT)) return(NULL);
-                       else break;
-               }
-               addr += PAGE_SIZE;
-       }
-       if(i == (1 << order)) return(page);
-       page = alloc_pages(mask, order);
-       goto again;
-}
-
-DECLARE_MUTEX(vm_reserved_sem);
-static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
-
-/* Static structures, linked in to the list in early boot */
-static struct vm_reserved head = {
-       .list           = LIST_HEAD_INIT(head.list),
-       .start          = 0,
-       .end            = 0xffffffff
-};
-
-static struct vm_reserved tail = {
-       .list           = LIST_HEAD_INIT(tail.list),
-       .start          = 0,
-       .end            = 0xffffffff
-};
-
-void set_usable_vm(unsigned long start, unsigned long end)
-{
-       list_add(&head.list, &vm_reserved);
-       list_add(&tail.list, &head.list);
-       head.end = start;
-       tail.start = end;
-}
-
-int reserve_vm(unsigned long start, unsigned long end, void *e)
-              
-{
-       struct vm_reserved *entry = e, *reserved, *prev;
-       struct list_head *ele;
-       int err;
-
-       down(&vm_reserved_sem);
-       list_for_each(ele, &vm_reserved){
-               reserved = list_entry(ele, struct vm_reserved, list);
-               if(reserved->start >= end) goto found;
-       }
-       panic("Reserved vm out of range");
- found:
-       prev = list_entry(ele->prev, struct vm_reserved, list);
-       if(prev->end > start)
-               panic("Can't reserve vm");
-       if(entry == NULL)
-               entry = kmalloc(sizeof(*entry), GFP_KERNEL);
-       if(entry == NULL){
-               printk("reserve_vm : Failed to allocate entry\n");
-               err = -ENOMEM;
-               goto out;
-       }
-       *entry = ((struct vm_reserved) 
-               { .list         = LIST_HEAD_INIT(entry->list),
-                 .start        = start,
-                 .end          = end });
-       list_add(&entry->list, &prev->list);
-       err = 0;
- out:
-       up(&vm_reserved_sem);
-       return(0);
-}
-
-unsigned long get_vm(unsigned long len)
-{
-       struct vm_reserved *this, *next;
-       struct list_head *ele;
-       unsigned long start;
-       int err;
-       
-       down(&vm_reserved_sem);
-       list_for_each(ele, &vm_reserved){
-               this = list_entry(ele, struct vm_reserved, list);
-               next = list_entry(ele->next, struct vm_reserved, list);
-               if((this->start < next->start) && 
-                  (this->end + len + PAGE_SIZE <= next->start))
-                       goto found;
-       }
-       up(&vm_reserved_sem);
-       return(0);
- found:
-       up(&vm_reserved_sem);
-       start = (unsigned long) UML_ROUND_UP(this->end) + PAGE_SIZE;
-       err = reserve_vm(start, start + len, NULL);
-       if(err) return(0);
-       return(start);
-}
-
-int nregions(void)
-{
-       return(NREGIONS);
-}
-
-void setup_range(int fd, char *driver, unsigned long start, unsigned long pfn,
-                unsigned long len, int need_vm, struct mem_region *region, 
-                void *reserved)
-{
-       int i, cur;
-
-       do {
-               cur = min(len, (unsigned long) REGION_SIZE);
-               i = setup_one_range(fd, driver, start, pfn, cur, region);
-               region = regions[i];
-               if(need_vm && setup_region(region, reserved)){
-                       kfree(region);
-                       regions[i] = NULL;
-                       return;
-               }
-               start += cur;
-               if(pfn != -1) pfn += cur;
-               len -= cur;
-       } while(len > 0);
-}
-
-struct iomem {
-       char *name;
-       int fd;
-       unsigned long size;
-};
-
-/* iomem regions can only be added on the command line at the moment.  
- * Locking will be needed when they can be added via mconsole.
- */
-
-struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
-                                        { .name        = NULL,
-                                          .fd          = -1,
-                                          .size        = 0 } };
-
-int num_iomem_regions = 0;
-
-void add_iomem(char *name, int fd, unsigned long size)
-{
-       if(num_iomem_regions == sizeof(iomem_regions)/sizeof(iomem_regions[0]))
-               return;
-       size = (size + PAGE_SIZE - 1) & PAGE_MASK;
-       iomem_regions[num_iomem_regions++] = 
-               ((struct iomem) { .name         = name,
-                                 .fd           = fd,
-                                 .size         = size } );
-}
-
-int setup_iomem(void)
-{
-       struct iomem *iomem;
-       int i;
-
-       for(i = 0; i < num_iomem_regions; i++){
-               iomem = &iomem_regions[i];
-               setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1, 
-                           NULL, NULL);
-       }
-       return(0);
-}
-
-__initcall(setup_iomem);
-
-#define PFN_UP(x)      (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x)    ((x) >> PAGE_SHIFT)
-
-/* Changed during early boot */
-static struct mem_region physmem_region;
-static struct vm_reserved physmem_reserved;
-
-void setup_physmem(unsigned long start, unsigned long reserve_end,
-                  unsigned long len)
-{
-       struct mem_region *region = &physmem_region;
-       struct vm_reserved *reserved = &physmem_reserved;
-       unsigned long cur, pfn = 0;
-       int do_free = 1, bootmap_size;
-
-       do {
-               cur = min(len, (unsigned long) REGION_SIZE);
-               if(region == NULL) 
-                       region = alloc_bootmem_low_pages(sizeof(*region));
-               if(reserved == NULL) 
-                       reserved = alloc_bootmem_low_pages(sizeof(*reserved));
-               if((region == NULL) || (reserved == NULL))
-                       panic("Couldn't allocate physmem region or vm "
-                             "reservation\n");
-               setup_range(-1, NULL, start, pfn, cur, 1, region, reserved);
-
-               if(do_free){
-                       unsigned long reserve = reserve_end - start;
-                       int pfn = PFN_UP(__pa(reserve_end));
-                       int delta = (len - reserve) >> PAGE_SHIFT;
-
-                       bootmap_size = init_bootmem(pfn, pfn + delta);
-                       free_bootmem(__pa(reserve_end) + bootmap_size,
-                                    cur - bootmap_size - reserve);
-                       do_free = 0;
-               }
-               start += cur;
-               pfn += cur >> PAGE_SHIFT;
-               len -= cur;
-               region = NULL;
-               reserved = NULL;
-       } while(len > 0);
-}
-
-struct mem_region *phys_region(unsigned long phys)
-{
-       unsigned int n = phys_region_index(phys);
-
-       if(regions[n] == NULL) 
-               panic("Physical address in uninitialized region");
-       return(regions[n]);
-}
-
-unsigned long phys_offset(unsigned long phys)
-{
-       return(phys_addr(phys));
-}
-
-struct page *phys_mem_map(unsigned long phys)
-{
-       return((struct page *) phys_region(phys)->mem_map);
-}
-
-struct page *pte_mem_map(pte_t pte)
-{
-       return(phys_mem_map(pte_val(pte)));
-}
-
-struct mem_region *page_region(struct page *page, int *index_out)
-{
-       int i;
-       struct mem_region *region;
-       struct page *map;
-
-       for(i = 0; i < NREGIONS; i++){
-               region = regions[i];
-               if(region == NULL) continue;
-               map = region->mem_map;
-               if((page >= map) && (page < &map[region->len >> PAGE_SHIFT])){
-                       if(index_out != NULL) *index_out = i;
-                       return(region);
-               }
-       }
-       panic("No region found for page");
-       return(NULL);
-}
-
-unsigned long page_to_pfn(struct page *page)
-{
-       struct mem_region *region = page_region(page, NULL);
-
-       return(region->start_pfn + (page - (struct page *) region->mem_map));
-}
-
-struct mem_region *pfn_to_region(unsigned long pfn, int *index_out)
-{
-       struct mem_region *region;
-       int i;
-
-       for(i = 0; i < NREGIONS; i++){
-               region = regions[i];
-               if(region == NULL)
-                       continue;
-
-               if((region->start_pfn <= pfn) &&
-                  (region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){
-                       if(index_out != NULL) 
-                               *index_out = i;
-                       return(region);
-               }
-       }
-       return(NULL);
-}
-
-struct page *pfn_to_page(unsigned long pfn)
-{
-       struct mem_region *region = pfn_to_region(pfn, NULL);
-       struct page *mem_map = (struct page *) region->mem_map;
-
-       return(&mem_map[pfn - region->start_pfn]);
-}
-
-unsigned long phys_to_pfn(unsigned long p)
-{
-       struct mem_region *region = regions[phys_region_index(p)];
-
-       return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT));
-}
-
-unsigned long pfn_to_phys(unsigned long pfn)
-{
-       int n;
-       struct mem_region *region = pfn_to_region(pfn, &n);
-
-       return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n));
-}
-
-struct page *page_mem_map(struct page *page)
-{
-       return((struct page *) page_region(page, NULL)->mem_map);
-}
-
-extern unsigned long region_pa(void *virt)
-{
-       struct mem_region *region;
-       unsigned long addr = (unsigned long) virt;
-       int i;
-
-       for(i = 0; i < NREGIONS; i++){
-               region = regions[i];
-               if(region == NULL) continue;
-               if((region->start <= addr) && 
-                  (addr <= region->start + region->len))
-                       return(mk_phys(addr - region->start, i));
-       }
-       panic("region_pa : no region for virtual address");
-       return(0);
-}
-
-extern void *region_va(unsigned long phys)
-{
-       return((void *) (phys_region(phys)->start + phys_addr(phys)));
-}
-
-unsigned long page_to_phys(struct page *page)
-{
-       int n;
-       struct mem_region *region = page_region(page, &n);
-       struct page *map = region->mem_map;
-       return(mk_phys((page - map) << PAGE_SHIFT, n));
-}
-
-struct page *phys_to_page(unsigned long phys)
-{
-       struct page *mem_map;
-
-       mem_map = phys_mem_map(phys);
-       return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
-}
-
-static int setup_mem_maps(void)
-{
-       struct mem_region *region;
-       int i;
-
-       for(i = 0; i < NREGIONS; i++){
-               region = regions[i];
-               if((region != NULL) && (region->fd > 0)) init_maps(region);
-       }
-       return(0);
-}
-
-__initcall(setup_mem_maps);
-
 /*
  * Allocate and free page tables.
  */