This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / um / kernel / physmem.c
1 /* 
2  * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/mm.h"
7 #include "linux/ghash.h"
8 #include "linux/slab.h"
9 #include "linux/vmalloc.h"
10 #include "linux/bootmem.h"
11 #include "asm/types.h"
12 #include "asm/pgtable.h"
13 #include "kern_util.h"
14 #include "user_util.h"
15 #include "mode_kern.h"
16 #include "mem.h"
17 #include "mem_user.h"
18 #include "os.h"
19 #include "kern.h"
20 #include "init.h"
21
22 #if 0
23 static pgd_t physmem_pgd[PTRS_PER_PGD];
24
25 static struct phys_desc *lookup_mapping(void *addr)
26 {
27         pgd = &physmem_pgd[pgd_index(addr)];
28         if(pgd_none(pgd))
29                 return(NULL);
30
31         pmd = pmd_offset(pgd, addr);
32         if(pmd_none(pmd))
33                 return(NULL);
34
35         pte = pte_offset_kernel(pmd, addr);
36         return((struct phys_desc *) pte_val(pte));
37 }
38
39 static struct add_mapping(void *addr, struct phys_desc *new)
40 {
41 }
42 #endif
43
44 #define PHYS_HASHSIZE (8192)
45
46 struct phys_desc;
47
48 DEF_HASH_STRUCTS(virtmem, PHYS_HASHSIZE, struct phys_desc);
49
50 struct phys_desc {
51         struct virtmem_ptrs virt_ptrs;
52         int fd;
53         __u64 offset;
54         void *virt;
55         unsigned long phys;
56         struct list_head list;
57 };
58
59 struct virtmem_table virtmem_hash;
60
61 static int virt_cmp(void *virt1, void *virt2)
62 {
63         return(virt1 != virt2);
64 }
65
66 static int virt_hash(void *virt)
67 {
68         unsigned long addr = ((unsigned long) virt) >> PAGE_SHIFT;
69         return(addr % PHYS_HASHSIZE);
70 }
71
72 DEF_HASH(static, virtmem, struct phys_desc, virt_ptrs, void *, virt, virt_cmp, 
73          virt_hash);
74
75 LIST_HEAD(descriptor_mappings);
76
77 struct desc_mapping {
78         int fd;
79         struct list_head list;
80         struct list_head pages;
81 };
82
83 static struct desc_mapping *find_mapping(int fd)
84 {
85         struct desc_mapping *desc;
86         struct list_head *ele;
87
88         list_for_each(ele, &descriptor_mappings){
89                 desc = list_entry(ele, struct desc_mapping, list);
90                 if(desc->fd == fd)
91                         return(desc);
92         }
93
94         return(NULL);
95 }
96
97 static struct desc_mapping *descriptor_mapping(int fd)
98 {
99         struct desc_mapping *desc;
100
101         desc = find_mapping(fd);
102         if(desc != NULL)
103                 return(desc);
104
105         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
106         if(desc == NULL)
107                 return(NULL);
108
109         *desc = ((struct desc_mapping) 
110                 { .fd =         fd,
111                   .list =       LIST_HEAD_INIT(desc->list),
112                   .pages =      LIST_HEAD_INIT(desc->pages) });
113         list_add(&desc->list, &descriptor_mappings);
114
115         return(desc);
116 }
117
118 int physmem_subst_mapping(void *virt, int fd, __u64 offset, int w)
119 {
120         struct desc_mapping *fd_maps;
121         struct phys_desc *desc;
122         unsigned long phys;
123         int err;
124
125         phys = __pa(virt);
126         desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
127         if(desc != NULL){
128                 if((virt != desc->virt) || (fd != desc->fd) || 
129                    (offset != desc->offset))
130                         panic("Address 0x%p is already substituted\n", virt);
131                 return(0);
132         }
133
134         fd_maps = descriptor_mapping(fd);
135         if(fd_maps == NULL)
136                 return(-ENOMEM);
137
138         err = -ENOMEM;
139         desc = kmalloc(sizeof(*desc), GFP_ATOMIC);
140         if(desc == NULL)
141                 goto out;
142
143         *desc = ((struct phys_desc) 
144                 { .virt_ptrs =  { NULL, NULL },
145                   .fd =         fd,
146                   .offset =             offset,
147                   .virt =               virt,
148                   .phys =               __pa(virt),
149                   .list =               LIST_HEAD_INIT(desc->list) });
150         insert_virtmem_hash(&virtmem_hash, desc);
151
152         list_add(&desc->list, &fd_maps->pages);
153
154         virt = (void *) ((unsigned long) virt & PAGE_MASK);
155         err = os_map_memory(virt, fd, offset, PAGE_SIZE, 1, w, 0);
156         if(!err)
157                 goto out;
158
159         remove_virtmem_hash(&virtmem_hash, desc);
160         kfree(desc);
161  out:
162         return(err);
163 }
164
165 static int physmem_fd = -1;
166
167 static void remove_mapping(struct phys_desc *desc)
168 {
169         void *virt = desc->virt;
170         int err;
171
172         remove_virtmem_hash(&virtmem_hash, desc);
173         list_del(&desc->list);
174         kfree(desc);
175
176         err = os_map_memory(virt, physmem_fd, __pa(virt), PAGE_SIZE, 1, 1, 0);
177         if(err)
178                 panic("Failed to unmap block device page from physical memory, "
179                       "errno = %d", -err);
180 }
181
182 int physmem_remove_mapping(void *virt)
183 {
184         struct phys_desc *desc;
185
186         virt = (void *) ((unsigned long) virt & PAGE_MASK);
187         desc = find_virtmem_hash(&virtmem_hash, virt);
188         if(desc == NULL)
189                 return(0);
190
191         remove_mapping(desc);
192         return(1);
193 }
194
195 void physmem_forget_descriptor(int fd)
196 {
197         struct desc_mapping *desc;
198         struct phys_desc *page;
199         struct list_head *ele, *next;
200         __u64 offset;
201         void *addr;
202         int err;
203
204         desc = find_mapping(fd);
205         if(desc == NULL)
206                 return;
207
208         if(!list_empty(&desc->pages))
209                 printk("Still have mapped pages on fd %d\n", fd);
210
211         list_for_each_safe(ele, next, &desc->pages){
212                 page = list_entry(ele, struct phys_desc, list);
213                 offset = page->offset;
214                 addr = page->virt;
215                 remove_mapping(page);
216                 err = os_seek_file(fd, offset);
217                 if(err)
218                         panic("physmem_forget_descriptor - failed to seek "
219                               "to %lld in fd %d, error = %d\n",
220                               offset, fd, -err);
221                 err = os_read_file(fd, addr, PAGE_SIZE);
222                 if(err < 0)
223                         panic("physmem_forget_descriptor - failed to read "
224                               "from fd %d to 0x%p, error = %d\n",
225                               fd, addr, -err);
226         }
227
228         list_del(&desc->list);
229         kfree(desc);
230 }
231
232 void arch_free_page(struct page *page, int order)
233 {
234         void *virt;
235         int i;
236
237         for(i = 0; i < (1 << order); i++){
238                 virt = __va(page_to_phys(page + i));
239                 physmem_remove_mapping(virt);
240         }
241 }
242
243 int is_remapped(const void *virt, int fd, __u64 offset)
244 {
245         struct phys_desc *desc;
246
247         desc = find_virtmem_hash(&virtmem_hash, (void *) virt);
248         if(desc == NULL)
249                 return(0);
250         if(offset != desc->offset)
251                 printk("offset mismatch\n");
252         return(find_virtmem_hash(&virtmem_hash, (void *) virt) != NULL);
253 }
254
255 /* Changed during early boot */
256 unsigned long high_physmem;
257
258 extern unsigned long physmem_size;
259
260 void *to_virt(unsigned long phys)
261 {
262         return((void *) uml_physmem + phys);
263 }
264
265 unsigned long to_phys(void *virt)
266 {
267         return(((unsigned long) virt) - uml_physmem);
268 }
269
270 int init_maps(unsigned long physmem, unsigned long iomem, unsigned long highmem)
271 {
272         struct page *p, *map;
273         unsigned long phys_len, phys_pages, highmem_len, highmem_pages;
274         unsigned long iomem_len, iomem_pages, total_len, total_pages;
275         int i;
276
277         phys_pages = physmem >> PAGE_SHIFT;
278         phys_len = phys_pages * sizeof(struct page);
279
280         iomem_pages = iomem >> PAGE_SHIFT;
281         iomem_len = iomem_pages * sizeof(struct page);
282
283         highmem_pages = highmem >> PAGE_SHIFT;
284         highmem_len = highmem_pages * sizeof(struct page);
285
286         total_pages = phys_pages + iomem_pages + highmem_pages;
287         total_len = phys_len + iomem_pages + highmem_len;
288
289         if(kmalloc_ok){
290                 map = kmalloc(total_len, GFP_KERNEL);
291                 if(map == NULL) 
292                         map = vmalloc(total_len);
293         }
294         else map = alloc_bootmem_low_pages(total_len);
295
296         if(map == NULL)
297                 return(-ENOMEM);
298
299         for(i = 0; i < total_pages; i++){
300                 p = &map[i];
301                 set_page_count(p, 0);
302                 SetPageReserved(p);
303                 INIT_LIST_HEAD(&p->lru);
304         }
305
306         mem_map = map;
307         max_mapnr = total_pages;
308         return(0);
309 }
310
311 struct page *phys_to_page(const unsigned long phys)
312 {
313         return(&mem_map[phys >> PAGE_SHIFT]);
314 }
315
316 struct page *__virt_to_page(const unsigned long virt)
317 {
318         return(&mem_map[__pa(virt) >> PAGE_SHIFT]);
319 }
320
321 unsigned long page_to_phys(struct page *page)
322 {
323         return((page - mem_map) << PAGE_SHIFT);
324 }
325
326 pte_t mk_pte(struct page *page, pgprot_t pgprot)
327 {
328         pte_t pte;
329
330         pte_val(pte) = page_to_phys(page) + pgprot_val(pgprot);
331         if(pte_present(pte)) pte_mknewprot(pte_mknewpage(pte));
332         return(pte);
333 }
334
335 /* Changed during early boot */
336 static unsigned long kmem_top = 0;
337
338 unsigned long get_kmem_end(void)
339 {
340         if(kmem_top == 0) 
341                 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
342         return(kmem_top);
343 }
344
345 void map_memory(unsigned long virt, unsigned long phys, unsigned long len, 
346                 int r, int w, int x)
347 {
348         __u64 offset;
349         int fd, err;
350
351         fd = phys_mapping(phys, &offset);
352         err = os_map_memory((void *) virt, fd, offset, len, r, w, x);
353         if(err)
354                 panic("map_memory(0x%lx, %d, 0x%llx, %ld, %d, %d, %d) failed, "
355                       "err = %d\n", virt, fd, offset, len, r, w, x, err);
356 }
357
358 #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
359
360 void setup_physmem(unsigned long start, unsigned long reserve_end,
361                    unsigned long len, unsigned long highmem)
362 {
363         unsigned long reserve = reserve_end - start;
364         int pfn = PFN_UP(__pa(reserve_end));
365         int delta = (len - reserve) >> PAGE_SHIFT;
366         int err, offset, bootmap_size;
367
368         physmem_fd = create_mem_file(len + highmem);
369
370         offset = uml_reserved - uml_physmem;
371         err = os_map_memory((void *) uml_reserved, physmem_fd, offset, 
372                             len - offset, 1, 1, 0);
373         if(err < 0){
374                 os_print_error(err, "Mapping memory");
375                 exit(1);
376         }
377
378         bootmap_size = init_bootmem(pfn, pfn + delta);
379         free_bootmem(__pa(reserve_end) + bootmap_size,
380                      len - bootmap_size - reserve);
381 }
382
383 int phys_mapping(unsigned long phys, __u64 *offset_out)
384 {
385         struct phys_desc *desc = find_virtmem_hash(&virtmem_hash, 
386                                                    __va(phys & PAGE_MASK));
387         int fd = -1;
388
389         if(desc != NULL){
390                 fd = desc->fd;
391                 *offset_out = desc->offset;
392         }
393         else if(phys < physmem_size){
394                 fd = physmem_fd;
395                 *offset_out = phys;
396         }
397         else if(phys < __pa(end_iomem)){
398                 struct iomem_region *region = iomem_regions;
399         
400                 while(region != NULL){
401                         if((phys >= region->phys) && 
402                            (phys < region->phys + region->size)){
403                                 fd = region->fd;
404                                 *offset_out = phys - region->phys;
405                                 break;
406                         }
407                         region = region->next;
408                 }
409         }
410         else if(phys < __pa(end_iomem) + highmem){
411                 fd = physmem_fd;
412                 *offset_out = phys - iomem_size;
413         }
414
415         return(fd);
416 }
417
418 static int __init uml_mem_setup(char *line, int *add)
419 {
420         char *retptr;
421         physmem_size = memparse(line,&retptr);
422         return 0;
423 }
424 __uml_setup("mem=", uml_mem_setup,
425 "mem=<Amount of desired ram>\n"
426 "    This controls how much \"physical\" memory the kernel allocates\n"
427 "    for the system. The size is specified as a number followed by\n"
428 "    one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
429 "    This is not related to the amount of memory in the host.  It can\n"
430 "    be more, and the excess, if it's ever used, will just be swapped out.\n"
431 "       Example: mem=64M\n\n"
432 );
433
434 unsigned long find_iomem(char *driver, unsigned long *len_out)
435 {
436         struct iomem_region *region = iomem_regions;
437         
438         while(region != NULL){
439                 if(!strcmp(region->driver, driver)){
440                         *len_out = region->size;
441                         return(region->virt);
442                 }
443         }
444
445         return(0);
446 }
447
448 int setup_iomem(void)
449 {
450         struct iomem_region *region = iomem_regions;
451         unsigned long iomem_start = high_physmem + PAGE_SIZE;
452         int err;
453
454         while(region != NULL){
455                 err = os_map_memory((void *) iomem_start, region->fd, 0, 
456                                     region->size, 1, 1, 0);
457                 if(err)
458                         printk("Mapping iomem region for driver '%s' failed, "
459                                "errno = %d\n", region->driver, -err);
460                 else {
461                         region->virt = iomem_start;
462                         region->phys = __pa(region->virt);
463                 }
464
465                 iomem_start += region->size + PAGE_SIZE;
466                 region = region->next;
467         }
468
469         return(0);
470 }
471
472 __initcall(setup_iomem);
473
474 /*
475  * Overrides for Emacs so that we follow Linus's tabbing style.
476  * Emacs will notice this stuff at the end of the file and automatically
477  * adjust the settings for this buffer only.  This must remain at the end
478  * of the file.
479  * ---------------------------------------------------------------------------
480  * Local variables:
481  * c-file-style: "linux"
482  * End:
483  */