2 * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3 * Licensed under the GPL
7 #include "linux/ghash.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "asm/types.h"
12 #include "asm/pgtable.h"
13 #include "kern_util.h"
14 #include "user_util.h"
15 #include "mode_kern.h"
23 static pgd_t physmem_pgd[PTRS_PER_PGD];
25 static struct phys_desc *lookup_mapping(void *addr)
27 pgd = &physmem_pgd[pgd_index(addr)];
31 pmd = pmd_offset(pgd, addr);
35 pte = pte_offset_kernel(pmd, addr);
36 return((struct phys_desc *) pte_val(pte));
39 static struct add_mapping(void *addr, struct phys_desc *new)
44 #define PHYS_HASHSIZE (8192)
48 DEF_HASH_STRUCTS(virtmem, PHYS_HASHSIZE, struct phys_desc);
51 struct virtmem_ptrs virt_ptrs;
56 struct list_head list;
59 struct virtmem_table virtmem_hash;
61 static int virt_cmp(void *virt1, void *virt2)
63 return(virt1 != virt2);
66 static int virt_hash(void *virt)
68 unsigned long addr = ((unsigned long) virt) >> PAGE_SHIFT;
69 return(addr % PHYS_HASHSIZE);
72 DEF_HASH(static, virtmem, struct phys_desc, virt_ptrs, void *, virt, virt_cmp,
75 LIST_HEAD(descriptor_mappings);
79 struct list_head list;
80 struct list_head pages;
83 static struct desc_mapping *find_mapping(int fd)
85 struct desc_mapping *desc;
86 struct list_head *ele;
88 list_for_each(ele, &descriptor_mappings){
89 desc = list_entry(ele, struct desc_mapping, list);
97 static struct desc_mapping *descriptor_mapping(int fd)
99 struct desc_mapping *desc;
101 desc = find_mapping(fd);
105 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
109 *desc = ((struct desc_mapping)
111 .list = LIST_HEAD_INIT(desc->list),
112 .pages = LIST_HEAD_INIT(desc->pages) });
113 list_add(&desc->list, &descriptor_mappings);
118 int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
120 struct desc_mapping *fd_maps;
121 struct phys_desc *desc;
126 desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
128 if((virt != desc->virt) || (fd != desc->fd) ||
129 (offset != desc->offset))
130 panic("Address 0x%p is already substituted\n", virt);
134 fd_maps = descriptor_mapping(fd);
139 desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
143 *desc = ((struct phys_desc)
144 { .virt_ptrs = { NULL, NULL },
149 .list = LIST_HEAD_INIT(desc->list) });
150 insert_virtmem_hash(&virtmem_hash, desc);
152 list_add(&desc->list, &fd_maps->pages);
154 virt = (void *) ((unsigned long) virt & PAGE_MASK);
155 err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
159 remove_virtmem_hash(&virtmem_hash, desc);
165 static int physmem_fd = -1;
167 static void remove_mapping(struct phys_desc *desc)
169 void *virt = desc->virt;
172 remove_virtmem_hash(&virtmem_hash, desc);
173 list_del(&desc->list);
176 err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
178 panic("Failed to unmap block device page from physical memory, "
182 int physmem_remove_mapping(void *virt)
184 struct phys_desc *desc;
186 virt = (void *) ((unsigned long) virt & PAGE_MASK);
187 desc = find_virtmem_hash(&virtmem_hash, virt);
191 remove_mapping(desc);
195 void physmem_forget_descriptor(int fd)
197 struct desc_mapping *desc;
198 struct phys_desc *page;
199 struct list_head *ele, *next;
204 desc = find_mapping(fd);
208 if(!list_empty(&desc->pages))
209 printk("Still have mapped pages on fd %d\n", fd);
211 list_for_each_safe(ele, next, &desc->pages){
212 page = list_entry(ele, struct phys_desc, list);
213 offset = page->offset;
215 remove_mapping(page);
216 err = os_seek_file(fd, offset);
218 panic("physmem_forget_descriptor - failed to seek "
219 "to %lld in fd %d, error = %d\n",
221 err = os_read_file(fd, addr, PAGE_SIZE);
223 panic("physmem_forget_descriptor - failed to read "
224 "from fd %d to 0x%p, error = %d\n",
228 list_del(&desc->list);
232 void arch_free_page(struct page *page, int order)
237 for(i = 0; i < (1 << order); i++){
238 virt = __va(page_to_phys(page + i));
239 physmem_remove_mapping(virt);
243 int is_remapped(const void *virt, int fd, __u64 offset)
245 struct phys_desc *desc;
247 desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
250 if(offset != desc->offset)
251 printk("offset mismatch\n");
252 return(find_virtmem_hash(&virtmem_hash, (void *) virt) != NULL);
255 /* Changed during early boot */
256 unsigned long high_physmem;
258 extern unsigned long physmem_size;
260 void *to_virt(unsigned long phys)
262 return((void *) uml_physmem + phys);
265 unsigned long to_phys(void *virt)
267 return(((unsigned long) virt) - uml_physmem);
270 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
272 struct page *p, *map;
273 unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
274 unsigned long iomem_len, iomem_pages, total_len, total_pages;
277 phys_pages = physmem >> PAGE_SHIFT;
278 phys_len = phys_pages * sizeof(struct page);
280 iomem_pages = iomem >> PAGE_SHIFT;
281 iomem_len = iomem_pages * sizeof(struct page);
283 highmem_pages = highmem >> PAGE_SHIFT;
284 highmem_len = highmem_pages * sizeof(struct page);
286 total_pages = phys_pages + iomem_pages + highmem_pages;
287 total_len = phys_len + iomem_pages + highmem_len;
290 map = kmalloc(total_len, GFP_KERNEL);
292 map = vmalloc(total_len);
294 else map = alloc_bootmem_low_pages(total_len);
299 for(i = 0; i < total_pages; i++){
301 set_page_count(p, 0);
303 INIT_LIST_HEAD(&p->lru);
307 max_mapnr = total_pages;
311 struct page *phys_to_page(const unsigned long phys)
313 return(&mem_map[phys >> PAGE_SHIFT]);
316 struct page *__virt_to_page(const unsigned long virt)
318 return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
321 unsigned long page_to_phys(struct page *page)
323 return((page - mem_map) << PAGE_SHIFT);
326 pte_t mk_pte(struct page *page, pgprot_t pgprot)
330 pte_val(pte) = page_to_phys(page) + pgprot_val(pgprot);
331 if(pte_present(pte)) pte_mknewprot(pte_mknewpage(pte));
335 /* Changed during early boot */
336 static unsigned long kmem_top = 0;
338 unsigned long get_kmem_end(void)
341 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
345 void map_memory(unsigned long virt, unsigned long phys, unsigned long len,
351 fd = phys_mapping(phys, &offset);
352 err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
354 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
355 "err = %d\n", virt, fd, offset, len, r, w, x, err);
358 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
360 void setup_physmem(unsigned long start, unsigned long reserve_end,
361 unsigned long len, unsigned long highmem)
363 unsigned long reserve = reserve_end - start;
364 int pfn = PFN_UP(__pa(reserve_end));
365 int delta = (len - reserve) >> PAGE_SHIFT;
366 int err, offset, bootmap_size;
368 physmem_fd = create_mem_file(len + highmem);
370 offset = uml_reserved - uml_physmem;
371 err = os_map_memory((void *) uml_reserved, physmem_fd, offset,
372 len - offset, 1, 1, 0);
374 os_print_error(err, "Mapping memory");
378 bootmap_size = init_bootmem(pfn, pfn + delta);
379 free_bootmem(__pa(reserve_end) + bootmap_size,
380 len - bootmap_size - reserve);
383 int phys_mapping(unsigned long phys, __u64 *offset_out)
385 struct phys_desc *desc = find_virtmem_hash(&virtmem_hash,
386 __va(phys & PAGE_MASK));
391 *offset_out = desc->offset;
393 else if(phys < physmem_size){
397 else if(phys < __pa(end_iomem)){
398 struct iomem_region *region = iomem_regions;
400 while(region != NULL){
401 if((phys >= region->phys) &&
402 (phys < region->phys + region->size)){
404 *offset_out = phys - region->phys;
407 region = region->next;
410 else if(phys < __pa(end_iomem) + highmem){
412 *offset_out = phys - iomem_size;
418 static int __init uml_mem_setup(char *line, int *add)
421 physmem_size = memparse(line,&retptr);
424 __uml_setup("mem=", uml_mem_setup,
425 "mem=<Amount of desired ram>\n"
426 " This controls how much \"physical\" memory the kernel allocates\n"
427 " for the system. The size is specified as a number followed by\n"
428 " one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
429 " This is not related to the amount of memory in the host. It can\n"
430 " be more, and the excess, if it's ever used, will just be swapped out.\n"
431 " Example: mem=64M\n\n"
434 unsigned long find_iomem(char *driver, unsigned long *len_out)
436 struct iomem_region *region = iomem_regions;
438 while(region != NULL){
439 if(!strcmp(region->driver, driver)){
440 *len_out = region->size;
441 return(region->virt);
448 int setup_iomem(void)
450 struct iomem_region *region = iomem_regions;
451 unsigned long iomem_start = high_physmem + PAGE_SIZE;
454 while(region != NULL){
455 err = os_map_memory((void *) iomem_start, region->fd, 0,
456 region->size, 1, 1, 0);
458 printk("Mapping iomem region for driver '%s' failed, "
459 "errno = %d\n", region->driver, -err);
461 region->virt = iomem_start;
462 region->phys = __pa(region->virt);
465 iomem_start += region->size + PAGE_SIZE;
466 region = region->next;
472 __initcall(setup_iomem);
475 * Overrides for Emacs so that we follow Linus's tabbing style.
476 * Emacs will notice this stuff at the end of the file and automatically
477 * adjust the settings for this buffer only. This must remain at the end
479 * ---------------------------------------------------------------------------
481 * c-file-style: "linux"