-static int __init uml_mem_setup(char *line, int *add)
-{
- char *retptr;
- physmem_size = memparse(line,&retptr);
- return 0;
-}
-__uml_setup("mem=", uml_mem_setup,
-"mem=<Amount of desired ram>\n"
-" This controls how much \"physical\" memory the kernel allocates\n"
-" for the system. The size is specified as a number followed by\n"
-" one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
-" This is not related to the amount of memory in the physical\n"
-" machine. It can be more, and the excess, if it's ever used, will\n"
-" just be swapped out.\n Example: mem=64M\n\n"
-);
-
-struct page *arch_validate(struct page *page, int mask, int order)
-{
- unsigned long addr, zero = 0;
- int i;
-
- again:
- if(page == NULL) return(page);
- if(PageHighMem(page)) return(page);
-
- addr = (unsigned long) page_address(page);
- for(i = 0; i < (1 << order); i++){
- current->thread.fault_addr = (void *) addr;
- if(__do_copy_to_user((void *) addr, &zero,
- sizeof(zero),
- ¤t->thread.fault_addr,
- ¤t->thread.fault_catcher)){
- if(!(mask & __GFP_WAIT)) return(NULL);
- else break;
- }
- addr += PAGE_SIZE;
- }
- if(i == (1 << order)) return(page);
- page = alloc_pages(mask, order);
- goto again;
-}
-
-DECLARE_MUTEX(vm_reserved_sem);
-static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
-
-/* Static structures, linked in to the list in early boot */
-static struct vm_reserved head = {
- .list = LIST_HEAD_INIT(head.list),
- .start = 0,
- .end = 0xffffffff
-};
-
-static struct vm_reserved tail = {
- .list = LIST_HEAD_INIT(tail.list),
- .start = 0,
- .end = 0xffffffff
-};
-
-void set_usable_vm(unsigned long start, unsigned long end)
-{
- list_add(&head.list, &vm_reserved);
- list_add(&tail.list, &head.list);
- head.end = start;
- tail.start = end;
-}
-
-int reserve_vm(unsigned long start, unsigned long end, void *e)
-
-{
- struct vm_reserved *entry = e, *reserved, *prev;
- struct list_head *ele;
- int err;
-
- down(&vm_reserved_sem);
- list_for_each(ele, &vm_reserved){
- reserved = list_entry(ele, struct vm_reserved, list);
- if(reserved->start >= end) goto found;
- }
- panic("Reserved vm out of range");
- found:
- prev = list_entry(ele->prev, struct vm_reserved, list);
- if(prev->end > start)
- panic("Can't reserve vm");
- if(entry == NULL)
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if(entry == NULL){
- printk("reserve_vm : Failed to allocate entry\n");
- err = -ENOMEM;
- goto out;
- }
- *entry = ((struct vm_reserved)
- { .list = LIST_HEAD_INIT(entry->list),
- .start = start,
- .end = end });
- list_add(&entry->list, &prev->list);
- err = 0;
- out:
- up(&vm_reserved_sem);
- return(0);
-}
-
-unsigned long get_vm(unsigned long len)
-{
- struct vm_reserved *this, *next;
- struct list_head *ele;
- unsigned long start;
- int err;
-
- down(&vm_reserved_sem);
- list_for_each(ele, &vm_reserved){
- this = list_entry(ele, struct vm_reserved, list);
- next = list_entry(ele->next, struct vm_reserved, list);
- if((this->start < next->start) &&
- (this->end + len + PAGE_SIZE <= next->start))
- goto found;
- }
- up(&vm_reserved_sem);
- return(0);
- found:
- up(&vm_reserved_sem);
- start = (unsigned long) UML_ROUND_UP(this->end) + PAGE_SIZE;
- err = reserve_vm(start, start + len, NULL);
- if(err) return(0);
- return(start);
-}
-
-int nregions(void)
-{
- return(NREGIONS);
-}
-
-void setup_range(int fd, char *driver, unsigned long start, unsigned long pfn,
- unsigned long len, int need_vm, struct mem_region *region,
- void *reserved)
-{
- int i, cur;
-
- do {
- cur = min(len, (unsigned long) REGION_SIZE);
- i = setup_one_range(fd, driver, start, pfn, cur, region);
- region = regions[i];
- if(need_vm && setup_region(region, reserved)){
- kfree(region);
- regions[i] = NULL;
- return;
- }
- start += cur;
- if(pfn != -1) pfn += cur;
- len -= cur;
- } while(len > 0);
-}
-
-struct iomem {
- char *name;
- int fd;
- unsigned long size;
-};
-
-/* iomem regions can only be added on the command line at the moment.
- * Locking will be needed when they can be added via mconsole.
- */
-
-struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
- { .name = NULL,
- .fd = -1,
- .size = 0 } };
-
-int num_iomem_regions = 0;
-
-void add_iomem(char *name, int fd, unsigned long size)
-{
- if(num_iomem_regions == sizeof(iomem_regions)/sizeof(iomem_regions[0]))
- return;
- size = (size + PAGE_SIZE - 1) & PAGE_MASK;
- iomem_regions[num_iomem_regions++] =
- ((struct iomem) { .name = name,
- .fd = fd,
- .size = size } );
-}
-
-int setup_iomem(void)
-{
- struct iomem *iomem;
- int i;
-
- for(i = 0; i < num_iomem_regions; i++){
- iomem = &iomem_regions[i];
- setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1,
- NULL, NULL);
- }
- return(0);
-}
-
-__initcall(setup_iomem);
-
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-
-/* Changed during early boot */
-static struct mem_region physmem_region;
-static struct vm_reserved physmem_reserved;
-
-void setup_physmem(unsigned long start, unsigned long reserve_end,
- unsigned long len)
-{
- struct mem_region *region = &physmem_region;
- struct vm_reserved *reserved = &physmem_reserved;
- unsigned long cur, pfn = 0;
- int do_free = 1, bootmap_size;
-
- do {
- cur = min(len, (unsigned long) REGION_SIZE);
- if(region == NULL)
- region = alloc_bootmem_low_pages(sizeof(*region));
- if(reserved == NULL)
- reserved = alloc_bootmem_low_pages(sizeof(*reserved));
- if((region == NULL) || (reserved == NULL))
- panic("Couldn't allocate physmem region or vm "
- "reservation\n");
- setup_range(-1, NULL, start, pfn, cur, 1, region, reserved);
-
- if(do_free){
- unsigned long reserve = reserve_end - start;
- int pfn = PFN_UP(__pa(reserve_end));
- int delta = (len - reserve) >> PAGE_SHIFT;
-
- bootmap_size = init_bootmem(pfn, pfn + delta);
- free_bootmem(__pa(reserve_end) + bootmap_size,
- cur - bootmap_size - reserve);
- do_free = 0;
- }
- start += cur;
- pfn += cur >> PAGE_SHIFT;
- len -= cur;
- region = NULL;
- reserved = NULL;
- } while(len > 0);
-}
-
-struct mem_region *phys_region(unsigned long phys)
-{
- unsigned int n = phys_region_index(phys);
-
- if(regions[n] == NULL)
- panic("Physical address in uninitialized region");
- return(regions[n]);
-}
-
-unsigned long phys_offset(unsigned long phys)
-{
- return(phys_addr(phys));
-}
-
-struct page *phys_mem_map(unsigned long phys)
-{
- return((struct page *) phys_region(phys)->mem_map);
-}
-
-struct page *pte_mem_map(pte_t pte)
-{
- return(phys_mem_map(pte_val(pte)));
-}
-
-struct mem_region *page_region(struct page *page, int *index_out)
-{
- int i;
- struct mem_region *region;
- struct page *map;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL) continue;
- map = region->mem_map;
- if((page >= map) && (page < &map[region->len >> PAGE_SHIFT])){
- if(index_out != NULL) *index_out = i;
- return(region);
- }
- }
- panic("No region found for page");
- return(NULL);
-}
-
-unsigned long page_to_pfn(struct page *page)
-{
- struct mem_region *region = page_region(page, NULL);
-
- return(region->start_pfn + (page - (struct page *) region->mem_map));
-}
-
-struct mem_region *pfn_to_region(unsigned long pfn, int *index_out)
-{
- struct mem_region *region;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL)
- continue;
-
- if((region->start_pfn <= pfn) &&
- (region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){
- if(index_out != NULL)
- *index_out = i;
- return(region);
- }
- }
- return(NULL);
-}
-
-struct page *pfn_to_page(unsigned long pfn)
-{
- struct mem_region *region = pfn_to_region(pfn, NULL);
- struct page *mem_map = (struct page *) region->mem_map;
-
- return(&mem_map[pfn - region->start_pfn]);
-}
-
-unsigned long phys_to_pfn(unsigned long p)
-{
- struct mem_region *region = regions[phys_region_index(p)];
-
- return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT));
-}
-
-unsigned long pfn_to_phys(unsigned long pfn)
-{
- int n;
- struct mem_region *region = pfn_to_region(pfn, &n);
-
- return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n));
-}
-
-struct page *page_mem_map(struct page *page)
-{
- return((struct page *) page_region(page, NULL)->mem_map);
-}
-
-extern unsigned long region_pa(void *virt)
-{
- struct mem_region *region;
- unsigned long addr = (unsigned long) virt;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if(region == NULL) continue;
- if((region->start <= addr) &&
- (addr <= region->start + region->len))
- return(mk_phys(addr - region->start, i));
- }
- panic("region_pa : no region for virtual address");
- return(0);
-}
-
-extern void *region_va(unsigned long phys)
-{
- return((void *) (phys_region(phys)->start + phys_addr(phys)));
-}
-
-unsigned long page_to_phys(struct page *page)
-{
- int n;
- struct mem_region *region = page_region(page, &n);
- struct page *map = region->mem_map;
- return(mk_phys((page - map) << PAGE_SHIFT, n));
-}
-
-struct page *phys_to_page(unsigned long phys)
-{
- struct page *mem_map;
-
- mem_map = phys_mem_map(phys);
- return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
-}
-
-static int setup_mem_maps(void)
-{
- struct mem_region *region;
- int i;
-
- for(i = 0; i < NREGIONS; i++){
- region = regions[i];
- if((region != NULL) && (region->fd > 0)) init_maps(region);
- }
- return(0);
-}
-
-__initcall(setup_mem_maps);
-