This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / arch / um / kernel / physmem.c
1 /* 
2  * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "linux/ghash.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "asm/types.h"
12 #include "asm/pgtable.h"
13 #include "kern_util.h"
14 #include "user_util.h"
15 #include "mode_kern.h"
16 #include "mem.h"
17 #include "mem_user.h"
18 #include "os.h"
19 #include "kern.h"
20 #include "init.h"
21
22 #if 0
23 static pgd_t physmem_pgd[PTRS_PER_PGD];
24
25 static struct phys_desc *lookup_mapping(void *addr)
26 {
27         pgd = &physmem_pgd[pgd_index(addr)];
28         if(pgd_none(pgd))
29                 return(NULL);
30
31         pmd = pmd_offset(pgd, addr);
32         if(pmd_none(pmd))
33                 return(NULL);
34
35         pte = pte_offset_kernel(pmd, addr);
36         return((struct phys_desc *) pte_val(pte));
37 }
38
39 static struct add_mapping(void *addr, struct phys_desc *new)
40 {
41 }
42 #endif
43
44 #define PHYS_HASHSIZE (8192)
45
46 struct phys_desc;
47
48 DEF_HASH_STRUCTS(virtmem, PHYS_HASHSIZE, struct phys_desc);
49
50 struct phys_desc {
51         struct virtmem_ptrs virt_ptrs;
52         int fd;
53         __u64 offset;
54         void *virt;
55         unsigned long phys;
56         struct list_head list;
57 };
58
59 struct virtmem_table virtmem_hash;
60
61 static int virt_cmp(void *virt1, void *virt2)
62 {
63         return(virt1 != virt2);
64 }
65
66 static int virt_hash(void *virt)
67 {
68         unsigned long addr = ((unsigned long) virt) >> PAGE_SHIFT;
69         return(addr % PHYS_HASHSIZE);
70 }
71
72 DEF_HASH(static, virtmem, struct phys_desc, virt_ptrs, void *, virt, virt_cmp, 
73          virt_hash);
74
75 LIST_HEAD(descriptor_mappings);
76
77 struct desc_mapping {
78         int fd;
79         struct list_head list;
80         struct list_head pages;
81 };
82
83 static struct desc_mapping *find_mapping(int fd)
84 {
85         struct desc_mapping *desc;
86         struct list_head *ele;
87
88         list_for_each(ele, &descriptor_mappings){
89                 desc = list_entry(ele, struct desc_mapping, list);
90                 if(desc->fd == fd)
91                         return(desc);
92         }
93
94         return(NULL);
95 }
96
97 static struct desc_mapping *descriptor_mapping(int fd)
98 {
99         struct desc_mapping *desc;
100
101         desc = find_mapping(fd);
102         if(desc != NULL)
103                 return(desc);
104
105         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
106         if(desc == NULL)
107                 return(NULL);
108
109         *desc = ((struct desc_mapping) 
110                 { .fd =         fd,
111                   .list =       LIST_HEAD_INIT(desc->list),
112                   .pages =      LIST_HEAD_INIT(desc->pages) });
113         list_add(&desc->list, &descriptor_mappings);
114
115         return(desc);
116 }
117
118 int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
119 {
120         struct desc_mapping *fd_maps;
121         struct phys_desc *desc;
122         unsigned long phys;
123         int err;
124
125         fd_maps = descriptor_mapping(fd);
126         if(fd_maps == NULL)
127                 return(-ENOMEM);
128
129         phys = __pa(virt);
130         if(find_virtmem_hash(&virtmem_hash, virt) != NULL)
131                 panic("Address 0x%p is already substituted\n", virt);
132
133         err = -ENOMEM;
134         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
135         if(desc == NULL)
136                 goto out;
137
138         *desc = ((struct phys_desc) 
139                 { .virt_ptrs =  { NULL, NULL },
140                   .fd =         fd,
141                   .offset =             offset,
142                   .virt =               virt,
143                   .phys =               __pa(virt),
144                   .list =               LIST_HEAD_INIT(desc->list) });
145         insert_virtmem_hash(&virtmem_hash, desc);
146
147         list_add(&desc->list, &fd_maps->pages);
148
149         virt = (void *) ((unsigned long) virt & PAGE_MASK);
150         err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
151         if(!err)
152                 goto out;
153
154         remove_virtmem_hash(&virtmem_hash, desc);
155         kfree(desc);
156  out:
157         return(err);
158 }
159
160 static int physmem_fd = -1;
161
162 static void remove_mapping(struct phys_desc *desc)
163 {
164         void *virt = desc->virt;
165         int err;
166
167         remove_virtmem_hash(&virtmem_hash, desc);
168         list_del(&desc->list);
169         kfree(desc);
170
171         err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
172         if(err)
173                 panic("Failed to unmap block device page from physical memory, "
174                       "errno = %d", -err);
175 }
176
177 int physmem_remove_mapping(void *virt)
178 {
179         struct phys_desc *desc;
180
181         virt = (void *) ((unsigned long) virt & PAGE_MASK);
182         desc = find_virtmem_hash(&virtmem_hash, virt);
183         if(desc == NULL)
184                 return(0);
185
186         remove_mapping(desc);
187         return(1);
188 }
189
190 void physmem_forget_descriptor(int fd)
191 {
192         struct desc_mapping *desc;
193         struct phys_desc *page;
194         struct list_head *ele, *next;
195         __u64 offset;
196         void *addr;
197         int err;
198
199         desc = find_mapping(fd);
200         if(desc == NULL)
201                 return;
202
203         list_for_each_safe(ele, next, &desc->pages){
204                 page = list_entry(ele, struct phys_desc, list);
205                 offset = page->offset;
206                 addr = page->virt;
207                 remove_mapping(page);
208                 err = os_seek_file(fd, offset);
209                 if(err)
210                         panic("physmem_forget_descriptor - failed to seek "
211                               "to %lld in fd %d, error = %d\n",
212                               offset, fd, -err);
213                 err = os_read_file(fd, addr, PAGE_SIZE);
214                 if(err < 0)
215                         panic("physmem_forget_descriptor - failed to read "
216                               "from fd %d to 0x%p, error = %d\n",
217                               fd, addr, -err);
218         }
219
220         list_del(&desc->list);
221         kfree(desc);
222 }
223
224 void arch_free_page(struct page *page, int order)
225 {
226         void *virt;
227         int i;
228
229         for(i = 0; i < (1 << order); i++){
230                 virt = __va(page_to_phys(page + i));
231                 physmem_remove_mapping(virt);
232         }
233 }
234
235 int is_remapped(void *virt)
236 {
237         return(find_virtmem_hash(&virtmem_hash, virt) != NULL);
238 }
239
240 /* Changed during early boot */
241 unsigned long high_physmem;
242
243 extern unsigned long physmem_size;
244
245 void *to_virt(unsigned long phys)
246 {
247         return((void *) uml_physmem + phys);
248 }
249
250 unsigned long to_phys(void *virt)
251 {
252         return(((unsigned long) virt) - uml_physmem);
253 }
254
255 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
256 {
257         struct page *p, *map;
258         unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
259         unsigned long iomem_len, iomem_pages, total_len, total_pages;
260         int i;
261
262         phys_pages = physmem >> PAGE_SHIFT;
263         phys_len = phys_pages * sizeof(struct page);
264
265         iomem_pages = iomem >> PAGE_SHIFT;
266         iomem_len = iomem_pages * sizeof(struct page);
267
268         highmem_pages = highmem >> PAGE_SHIFT;
269         highmem_len = highmem_pages * sizeof(struct page);
270
271         total_pages = phys_pages + iomem_pages + highmem_pages;
272         total_len = phys_len + iomem_pages + highmem_len;
273
274         if(kmalloc_ok){
275                 map = kmalloc(total_len, GFP_KERNEL);
276                 if(map == NULL) 
277                         map = vmalloc(total_len);
278         }
279         else map = alloc_bootmem_low_pages(total_len);
280
281         if(map == NULL)
282                 return(-ENOMEM);
283
284         for(i = 0; i < total_pages; i++){
285                 p = &map[i];
286                 set_page_count(p, 0);
287                 SetPageReserved(p);
288                 INIT_LIST_HEAD(&p->lru);
289         }
290
291         mem_map = map;
292         max_mapnr = total_pages;
293         return(0);
294 }
295
296 struct page *phys_to_page(const unsigned long phys)
297 {
298         return(&mem_map[phys >> PAGE_SHIFT]);
299 }
300
301 struct page *__virt_to_page(const unsigned long virt)
302 {
303         return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
304 }
305
306 unsigned long page_to_phys(struct page *page)
307 {
308         return((page - mem_map) << PAGE_SHIFT);
309 }
310
311 pte_t mk_pte(struct page *page, pgprot_t pgprot)
312 {
313         pte_t pte;
314
315         pte_val(pte) = page_to_phys(page) + pgprot_val(pgprot);
316         if(pte_present(pte)) pte_mknewprot(pte_mknewpage(pte));
317         return(pte);
318 }
319
320 /* Changed during early boot */
321 static unsigned long kmem_top = 0;
322
323 unsigned long get_kmem_end(void)
324 {
325         if(kmem_top == 0) 
326                 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
327         return(kmem_top);
328 }
329
330 void map_memory(unsigned long virt, unsigned long phys, unsigned long len, 
331                 int r, int w, int x)
332 {
333         __u64 offset;
334         int fd, err;
335
336         fd = phys_mapping(phys, &offset);
337         err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
338         if(err)
339                 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
340                       "err = %d\n", virt, fd, offset, len, r, w, x, err);
341 }
342
343 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
344
345 void setup_physmem(unsigned long start, unsigned long reserve_end,
346                    unsigned long len, unsigned long highmem)
347 {
348         unsigned long reserve = reserve_end - start;
349         int pfn = PFN_UP(__pa(reserve_end));
350         int delta = (len - reserve) >> PAGE_SHIFT;
351         int err, offset, bootmap_size;
352
353         physmem_fd = create_mem_file(len + highmem);
354
355         offset = uml_reserved - uml_physmem;
356         err = os_map_memory((void *) uml_reserved, physmem_fd, offset, 
357                             len - offset, 1, 1, 0);
358         if(err < 0){
359                 os_print_error(err, "Mapping memory");
360                 exit(1);
361         }
362
363         bootmap_size = init_bootmem(pfn, pfn + delta);
364         free_bootmem(__pa(reserve_end) + bootmap_size,
365                      len - bootmap_size - reserve);
366 }
367
368 int phys_mapping(unsigned long phys, __u64 *offset_out)
369 {
370         struct phys_desc *desc = find_virtmem_hash(&virtmem_hash, 
371                                                    __va(phys & PAGE_MASK));
372         int fd = -1;
373
374         if(desc != NULL){
375                 fd = desc->fd;
376                 *offset_out = desc->offset;
377         }
378         else if(phys < physmem_size){
379                 fd = physmem_fd;
380                 *offset_out = phys;
381         }
382         else if(phys < __pa(end_iomem)){
383                 struct iomem_region *region = iomem_regions;
384         
385                 while(region != NULL){
386                         if((phys >= region->phys) && 
387                            (phys < region->phys + region->size)){
388                                 fd = region->fd;
389                                 *offset_out = phys - region->phys;
390                                 break;
391                         }
392                         region = region->next;
393                 }
394         }
395         else if(phys < __pa(end_iomem) + highmem){
396                 fd = physmem_fd;
397                 *offset_out = phys - iomem_size;
398         }
399
400         return(fd);
401 }
402
403 static int __init uml_mem_setup(char *line, int *add)
404 {
405         char *retptr;
406         physmem_size = memparse(line,&retptr);
407         return 0;
408 }
409 __uml_setup("mem=", uml_mem_setup,
410 "mem=<Amount of desired ram>\n"
411 "    This controls how much \"physical\" memory the kernel allocates\n"
412 "    for the system. The size is specified as a number followed by\n"
413 "    one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
414 "    This is not related to the amount of memory in the host.  It can\n"
415 "    be more, and the excess, if it's ever used, will just be swapped out.\n"
416 "       Example: mem=64M\n\n"
417 );
418
419 unsigned long find_iomem(char *driver, unsigned long *len_out)
420 {
421         struct iomem_region *region = iomem_regions;
422         
423         while(region != NULL){
424                 if(!strcmp(region->driver, driver)){
425                         *len_out = region->size;
426                         return(region->virt);
427                 }
428         }
429
430         return(0);
431 }
432
433 int setup_iomem(void)
434 {
435         struct iomem_region *region = iomem_regions;
436         unsigned long iomem_start = high_physmem + PAGE_SIZE;
437         int err;
438
439         while(region != NULL){
440                 err = os_map_memory((void *) iomem_start, region->fd, 0, 
441                                     region->size, 1, 1, 0);
442                 if(err)
443                         printk("Mapping iomem region for driver '%s' failed, "
444                                "errno = %d\n", region->driver, -err);
445                 else {
446                         region->virt = iomem_start;
447                         region->phys = __pa(region->virt);
448                 }
449
450                 iomem_start += region->size + PAGE_SIZE;
451                 region = region->next;
452         }
453
454         return(0);
455 }
456
457 __initcall(setup_iomem);
458
459 /*
460  * Overrides for Emacs so that we follow Linus's tabbing style.
461  * Emacs will notice this stuff at the end of the file and automatically
462  * adjust the settings for this buffer only.  This must remain at the end
463  * of the file.
464  * ---------------------------------------------------------------------------
465  * Local variables:
466  * c-file-style: "linux"
467  * End:
468  */