patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / arch / um / kernel / mem.c
1 /* 
2  * Copyright (C) 2000, 2001, 2002 Jeff Dike (jdike@karaya.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/config.h"
7 #include "linux/module.h"
8 #include "linux/types.h"
9 #include "linux/mm.h"
10 #include "linux/fs.h"
11 #include "linux/init.h"
12 #include "linux/bootmem.h"
13 #include "linux/swap.h"
14 #include "linux/slab.h"
15 #include "linux/vmalloc.h"
16 #include "linux/highmem.h"
17 #include "asm/page.h"
18 #include "asm/pgtable.h"
19 #include "asm/pgalloc.h"
20 #include "asm/bitops.h"
21 #include "asm/uaccess.h"
22 #include "asm/tlb.h"
23 #include "user_util.h"
24 #include "kern_util.h"
25 #include "mem_user.h"
26 #include "mem.h"
27 #include "kern.h"
28 #include "init.h"
29 #include "os.h"
30 #include "mode_kern.h"
31 #include "uml_uaccess.h"
32
33 /* Changed during early boot */
34 pgd_t swapper_pg_dir[1024];
35 unsigned long high_physmem;
36 unsigned long vm_start;
37 unsigned long vm_end;
38 unsigned long highmem;
39 unsigned long *empty_zero_page = NULL;
40 unsigned long *empty_bad_page = NULL;
41
42 /* Not modified */
43 const char bad_pmd_string[] = "Bad pmd in pte_alloc: %08lx\n";
44
45 extern char __init_begin, __init_end;
46 extern long physmem_size;
47
48 /* Not changed by UML */
49 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
50
51 /* Changed during early boot */
52 int kmalloc_ok = 0;
53
54 #define NREGIONS (phys_region_index(0xffffffff) - phys_region_index(0x0) + 1)
55 struct mem_region *regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] = NULL };
56 #define REGION_SIZE ((0xffffffff & ~REGION_MASK) + 1)
57
58 /* Changed during early boot */
59 static unsigned long brk_end;
60
61 static void map_cb(void *unused)
62 {
63         map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
64 }
65
66 void unmap_physmem(void)
67 {
68         os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
69 }
70
71 extern char __binary_start;
72
73 void mem_init(void)
74 {
75         unsigned long start;
76
77         max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
78 #ifdef CONFIG_HIGHMEM
79         highmem_start_page = phys_page(__pa(high_physmem));
80 #endif
81
82         /* clear the zero-page */
83         memset((void *) empty_zero_page, 0, PAGE_SIZE);
84
85         /* Map in the area just after the brk now that kmalloc is about
86          * to be turned on.
87          */
88         brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
89         map_cb(NULL);
90         initial_thread_cb(map_cb, NULL);
91         free_bootmem(__pa(brk_end), uml_reserved - brk_end);
92         uml_reserved = brk_end;
93
94         /* Fill in any hole at the start of the binary */
95         start = (unsigned long) &__binary_start;
96         if(uml_physmem != start){
97                 map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem,
98                            1, 1, 0);
99         }
100
101         /* this will put all low memory onto the freelists */
102         totalram_pages = free_all_bootmem();
103         totalhigh_pages = highmem >> PAGE_SHIFT;
104         totalram_pages += totalhigh_pages;
105         num_physpages = totalram_pages;
106         max_mapnr = totalram_pages;
107         max_pfn = totalram_pages;
108         printk(KERN_INFO "Memory: %luk available\n", 
109                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
110         kmalloc_ok = 1;
111 }
112
113 /* Changed during early boot */
114 static unsigned long kmem_top = 0;
115
116 unsigned long get_kmem_end(void)
117 {
118         if(kmem_top == 0)
119                 kmem_top = CHOOSE_MODE(kmem_end_tt, kmem_end_skas);
120         return(kmem_top);
121 }
122
123 void set_kmem_end(unsigned long new)
124 {
125         kmem_top = new;
126 }
127
128 #ifdef CONFIG_HIGHMEM
129 /* Changed during early boot */
130 pte_t *kmap_pte;
131 pgprot_t kmap_prot;
132
133 EXPORT_SYMBOL(kmap_prot);
134 EXPORT_SYMBOL(kmap_pte);
135
136 #define kmap_get_fixmap_pte(vaddr)                                      \
137         pte_offset_kernel(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
138
139 void __init kmap_init(void)
140 {
141         unsigned long kmap_vstart;
142
143         /* cache the first kmap pte */
144         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
145         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
146
147         kmap_prot = PAGE_KERNEL;
148 }
149 #endif /* CONFIG_HIGHMEM */
150
151 static void __init fixrange_init(unsigned long start, unsigned long end, 
152                                  pgd_t *pgd_base)
153 {
154         pgd_t *pgd;
155         pmd_t *pmd;
156         pte_t *pte;
157         int i, j;
158         unsigned long vaddr;
159
160         vaddr = start;
161         i = pgd_index(vaddr);
162         j = pmd_index(vaddr);
163         pgd = pgd_base + i;
164
165         for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
166                 pmd = (pmd_t *)pgd;
167                 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
168                         if (pmd_none(*pmd)) {
169                                 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
170                                 set_pmd(pmd, __pmd(_KERNPG_TABLE + 
171                                                    (unsigned long) __pa(pte)));
172                                 if (pte != pte_offset_kernel(pmd, 0))
173                                         BUG();
174                         }
175                         vaddr += PMD_SIZE;
176                 }
177                 j = 0;
178         }
179 }
180
181 int init_maps(struct mem_region *region)
182 {
183         struct page *p, *map;
184         int i, n, len;
185
186         if(region == &physmem_region){
187                 region->mem_map = mem_map;
188                 return(0);
189         }
190         else if(region->mem_map != NULL) return(0);
191
192         n = region->len >> PAGE_SHIFT;
193         len = n * sizeof(struct page);
194         if(kmalloc_ok){
195                 map = kmalloc(len, GFP_KERNEL);
196                 if(map == NULL) map = vmalloc(len);
197         }
198         else map = alloc_bootmem_low_pages(len);
199
200         if(map == NULL)
201                 return(-ENOMEM);
202         for(i = 0; i < n; i++){
203                 p = &map[i];
204                 set_page_count(p, 0);
205                 SetPageReserved(p);
206                 INIT_LIST_HEAD(&p->list);
207         }
208         region->mem_map = map;
209         return(0);
210 }
211
212 DECLARE_MUTEX(regions_sem);
213
214 static int setup_one_range(int fd, char *driver, unsigned long start, 
215                            unsigned long pfn, int len, 
216                            struct mem_region *region)
217 {
218         int i;
219
220         down(&regions_sem);
221         for(i = 0; i < NREGIONS; i++){
222                 if(regions[i] == NULL) break;           
223         }
224         if(i == NREGIONS){
225                 printk("setup_range : no free regions\n");
226                 i = -1;
227                 goto out;
228         }
229
230         if(fd == -1)
231                 fd = create_mem_file(len);
232
233         if(region == NULL){
234                 region = alloc_bootmem_low_pages(sizeof(*region));
235                 if(region == NULL)
236                         panic("Failed to allocating mem_region");
237         }
238
239         *region = ((struct mem_region) { .driver        = driver,
240                                          .start_pfn     = pfn,
241                                          .start         = start, 
242                                          .len           = len, 
243                                          .fd            = fd } );
244         regions[i] = region;
245  out:
246         up(&regions_sem);
247         return(i);
248 }
249
250 #ifdef CONFIG_HIGHMEM
251 static void init_highmem(void)
252 {
253         pgd_t *pgd;
254         pmd_t *pmd;
255         pte_t *pte;
256         unsigned long vaddr;
257
258         /*
259          * Permanent kmaps:
260          */
261         vaddr = PKMAP_BASE;
262         fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
263
264         pgd = swapper_pg_dir + pgd_index(vaddr);
265         pmd = pmd_offset(pgd, vaddr);
266         pte = pte_offset_kernel(pmd, vaddr);
267         pkmap_page_table = pte;
268
269         kmap_init();
270 }
271
272 void setup_highmem(unsigned long len)
273 {
274         struct mem_region *region;
275         struct page *page, *map;
276         unsigned long phys;
277         int i, cur, index;
278
279         phys = physmem_size;
280         do {
281                 cur = min(len, (unsigned long) REGION_SIZE);
282                 i = setup_one_range(-1, NULL, -1, phys >> PAGE_SHIFT, cur, 
283                                     NULL);
284                 if(i == -1){
285                         printk("setup_highmem - setup_one_range failed\n");
286                         return;
287                 }
288                 region = regions[i];
289                 index = phys / PAGE_SIZE;
290                 region->mem_map = &mem_map[index];
291
292                 map = region->mem_map;
293                 for(i = 0; i < (cur >> PAGE_SHIFT); i++){
294                         page = &map[i];
295                         ClearPageReserved(page);
296                         set_bit(PG_highmem, &page->flags);
297                         set_page_count(page, 1);
298                         __free_page(page);
299                 }
300                 phys += cur;
301                 len -= cur;
302         } while(len > 0);
303 }
304 #endif
305
306 void paging_init(void)
307 {
308         struct mem_region *region;
309         unsigned long zones_size[MAX_NR_ZONES], start, end, vaddr;
310         int i, index;
311
312         empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
313         empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
314         for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) 
315                 zones_size[i] = 0;
316         zones_size[0] = (high_physmem >> PAGE_SHIFT) - 
317                 (uml_physmem >> PAGE_SHIFT);
318         zones_size[2] = highmem >> PAGE_SHIFT;
319         free_area_init(zones_size);
320         start = phys_region_index(__pa(uml_physmem));
321         end = phys_region_index(__pa(high_physmem - 1));
322         for(i = start; i <= end; i++){
323                 region = regions[i];
324                 index = (region->start - uml_physmem) / PAGE_SIZE;
325                 region->mem_map = &mem_map[index];
326                 if(i > start) free_bootmem(__pa(region->start), region->len);
327         }
328
329         /*
330          * Fixed mappings, only the page table structure has to be
331          * created - mappings will be set by set_fixmap():
332          */
333         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
334         fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
335
336 #ifdef CONFIG_HIGHMEM
337         init_highmem();
338         setup_highmem(highmem);
339 #endif
340 }
341
342 pte_t __bad_page(void)
343 {
344         clear_page(empty_bad_page);
345         return pte_mkdirty(mk_pte((struct page *) empty_bad_page, 
346                                   PAGE_SHARED));
347 }
348
349 /* This can't do anything because nothing in the kernel image can be freed
350  * since it's not in kernel physical memory.
351  */
352
353 void free_initmem(void)
354 {
355 }
356
357 #ifdef CONFIG_BLK_DEV_INITRD
358
359 void free_initrd_mem(unsigned long start, unsigned long end)
360 {
361         if (start < end)
362                 printk ("Freeing initrd memory: %ldk freed\n", 
363                         (end - start) >> 10);
364         for (; start < end; start += PAGE_SIZE) {
365                 ClearPageReserved(virt_to_page(start));
366                 set_page_count(virt_to_page(start), 1);
367                 free_page(start);
368                 totalram_pages++;
369         }
370 }
371         
372 #endif
373
374 void show_mem(void)
375 {
376         int pfn, total = 0, reserved = 0;
377         int shared = 0, cached = 0;
378         int highmem = 0;
379         struct page *page;
380
381         printk("Mem-info:\n");
382         show_free_areas();
383         printk("Free swap:       %6dkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
384         pfn = max_mapnr;
385         while(pfn-- > 0) {
386                 page = pfn_to_page(pfn);
387                 total++;
388                 if(PageHighMem(page))
389                         highmem++;
390                 if(PageReserved(page))
391                         reserved++;
392                 else if(PageSwapCache(page))
393                         cached++;
394                 else if(page_count(page))
395                         shared += page_count(page) - 1;
396         }
397         printk("%d pages of RAM\n", total);
398         printk("%d pages of HIGHMEM\n", highmem);
399         printk("%d reserved pages\n", reserved);
400         printk("%d pages shared\n", shared);
401         printk("%d pages swap cached\n", cached);
402 }
403
404 static int __init uml_mem_setup(char *line, int *add)
405 {
406         char *retptr;
407         physmem_size = memparse(line,&retptr);
408         return 0;
409 }
410 __uml_setup("mem=", uml_mem_setup,
411 "mem=<Amount of desired ram>\n"
412 "    This controls how much \"physical\" memory the kernel allocates\n"
413 "    for the system. The size is specified as a number followed by\n"
414 "    one of 'k', 'K', 'm', 'M', which have the obvious meanings.\n"
415 "    This is not related to the amount of memory in the physical\n"
416 "    machine. It can be more, and the excess, if it's ever used, will\n"
417 "    just be swapped out.\n        Example: mem=64M\n\n"
418 );
419
420 struct page *arch_validate(struct page *page, int mask, int order)
421 {
422         unsigned long addr, zero = 0;
423         int i;
424
425  again:
426         if(page == NULL) return(page);
427         if(PageHighMem(page)) return(page);
428
429         addr = (unsigned long) page_address(page);
430         for(i = 0; i < (1 << order); i++){
431                 current->thread.fault_addr = (void *) addr;
432                 if(__do_copy_to_user((void *) addr, &zero, 
433                                      sizeof(zero),
434                                      &current->thread.fault_addr,
435                                      &current->thread.fault_catcher)){
436                         if(!(mask & __GFP_WAIT)) return(NULL);
437                         else break;
438                 }
439                 addr += PAGE_SIZE;
440         }
441         if(i == (1 << order)) return(page);
442         page = alloc_pages(mask, order);
443         goto again;
444 }
445
446 DECLARE_MUTEX(vm_reserved_sem);
447 static struct list_head vm_reserved = LIST_HEAD_INIT(vm_reserved);
448
449 /* Static structures, linked in to the list in early boot */
450 static struct vm_reserved head = {
451         .list           = LIST_HEAD_INIT(head.list),
452         .start          = 0,
453         .end            = 0xffffffff
454 };
455
456 static struct vm_reserved tail = {
457         .list           = LIST_HEAD_INIT(tail.list),
458         .start          = 0,
459         .end            = 0xffffffff
460 };
461
462 void set_usable_vm(unsigned long start, unsigned long end)
463 {
464         list_add(&head.list, &vm_reserved);
465         list_add(&tail.list, &head.list);
466         head.end = start;
467         tail.start = end;
468 }
469
470 int reserve_vm(unsigned long start, unsigned long end, void *e)
471                
472 {
473         struct vm_reserved *entry = e, *reserved, *prev;
474         struct list_head *ele;
475         int err;
476
477         down(&vm_reserved_sem);
478         list_for_each(ele, &vm_reserved){
479                 reserved = list_entry(ele, struct vm_reserved, list);
480                 if(reserved->start >= end) goto found;
481         }
482         panic("Reserved vm out of range");
483  found:
484         prev = list_entry(ele->prev, struct vm_reserved, list);
485         if(prev->end > start)
486                 panic("Can't reserve vm");
487         if(entry == NULL)
488                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
489         if(entry == NULL){
490                 printk("reserve_vm : Failed to allocate entry\n");
491                 err = -ENOMEM;
492                 goto out;
493         }
494         *entry = ((struct vm_reserved) 
495                 { .list         = LIST_HEAD_INIT(entry->list),
496                   .start        = start,
497                   .end          = end });
498         list_add(&entry->list, &prev->list);
499         err = 0;
500  out:
501         up(&vm_reserved_sem);
502         return(0);
503 }
504
505 unsigned long get_vm(unsigned long len)
506 {
507         struct vm_reserved *this, *next;
508         struct list_head *ele;
509         unsigned long start;
510         int err;
511         
512         down(&vm_reserved_sem);
513         list_for_each(ele, &vm_reserved){
514                 this = list_entry(ele, struct vm_reserved, list);
515                 next = list_entry(ele->next, struct vm_reserved, list);
516                 if((this->start < next->start) && 
517                    (this->end + len + PAGE_SIZE <= next->start))
518                         goto found;
519         }
520         up(&vm_reserved_sem);
521         return(0);
522  found:
523         up(&vm_reserved_sem);
524         start = (unsigned long) UML_ROUND_UP(this->end) + PAGE_SIZE;
525         err = reserve_vm(start, start + len, NULL);
526         if(err) return(0);
527         return(start);
528 }
529
530 int nregions(void)
531 {
532         return(NREGIONS);
533 }
534
535 void setup_range(int fd, char *driver, unsigned long start, unsigned long pfn,
536                  unsigned long len, int need_vm, struct mem_region *region, 
537                  void *reserved)
538 {
539         int i, cur;
540
541         do {
542                 cur = min(len, (unsigned long) REGION_SIZE);
543                 i = setup_one_range(fd, driver, start, pfn, cur, region);
544                 region = regions[i];
545                 if(need_vm && setup_region(region, reserved)){
546                         kfree(region);
547                         regions[i] = NULL;
548                         return;
549                 }
550                 start += cur;
551                 if(pfn != -1) pfn += cur;
552                 len -= cur;
553         } while(len > 0);
554 }
555
556 struct iomem {
557         char *name;
558         int fd;
559         unsigned long size;
560 };
561
562 /* iomem regions can only be added on the command line at the moment.  
563  * Locking will be needed when they can be added via mconsole.
564  */
565
566 struct iomem iomem_regions[NREGIONS] = { [ 0 ... NREGIONS - 1 ] =
567                                          { .name        = NULL,
568                                            .fd          = -1,
569                                            .size        = 0 } };
570
571 int num_iomem_regions = 0;
572
573 void add_iomem(char *name, int fd, unsigned long size)
574 {
575         if(num_iomem_regions == sizeof(iomem_regions)/sizeof(iomem_regions[0]))
576                 return;
577         size = (size + PAGE_SIZE - 1) & PAGE_MASK;
578         iomem_regions[num_iomem_regions++] = 
579                 ((struct iomem) { .name         = name,
580                                   .fd           = fd,
581                                   .size         = size } );
582 }
583
584 int setup_iomem(void)
585 {
586         struct iomem *iomem;
587         int i;
588
589         for(i = 0; i < num_iomem_regions; i++){
590                 iomem = &iomem_regions[i];
591                 setup_range(iomem->fd, iomem->name, -1, -1, iomem->size, 1, 
592                             NULL, NULL);
593         }
594         return(0);
595 }
596
597 __initcall(setup_iomem);
598
599 #define PFN_UP(x)       (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
600 #define PFN_DOWN(x)     ((x) >> PAGE_SHIFT)
601
602 /* Changed during early boot */
603 static struct mem_region physmem_region;
604 static struct vm_reserved physmem_reserved;
605
606 void setup_physmem(unsigned long start, unsigned long reserve_end,
607                    unsigned long len)
608 {
609         struct mem_region *region = &physmem_region;
610         struct vm_reserved *reserved = &physmem_reserved;
611         unsigned long cur, pfn = 0;
612         int do_free = 1, bootmap_size;
613
614         do {
615                 cur = min(len, (unsigned long) REGION_SIZE);
616                 if(region == NULL) 
617                         region = alloc_bootmem_low_pages(sizeof(*region));
618                 if(reserved == NULL) 
619                         reserved = alloc_bootmem_low_pages(sizeof(*reserved));
620                 if((region == NULL) || (reserved == NULL))
621                         panic("Couldn't allocate physmem region or vm "
622                               "reservation\n");
623                 setup_range(-1, NULL, start, pfn, cur, 1, region, reserved);
624
625                 if(do_free){
626                         unsigned long reserve = reserve_end - start;
627                         int pfn = PFN_UP(__pa(reserve_end));
628                         int delta = (len - reserve) >> PAGE_SHIFT;
629
630                         bootmap_size = init_bootmem(pfn, pfn + delta);
631                         free_bootmem(__pa(reserve_end) + bootmap_size,
632                                      cur - bootmap_size - reserve);
633                         do_free = 0;
634                 }
635                 start += cur;
636                 pfn += cur >> PAGE_SHIFT;
637                 len -= cur;
638                 region = NULL;
639                 reserved = NULL;
640         } while(len > 0);
641 }
642
643 struct mem_region *phys_region(unsigned long phys)
644 {
645         unsigned int n = phys_region_index(phys);
646
647         if(regions[n] == NULL) 
648                 panic("Physical address in uninitialized region");
649         return(regions[n]);
650 }
651
652 unsigned long phys_offset(unsigned long phys)
653 {
654         return(phys_addr(phys));
655 }
656
657 struct page *phys_mem_map(unsigned long phys)
658 {
659         return((struct page *) phys_region(phys)->mem_map);
660 }
661
662 struct page *pte_mem_map(pte_t pte)
663 {
664         return(phys_mem_map(pte_val(pte)));
665 }
666
667 struct mem_region *page_region(struct page *page, int *index_out)
668 {
669         int i;
670         struct mem_region *region;
671         struct page *map;
672
673         for(i = 0; i < NREGIONS; i++){
674                 region = regions[i];
675                 if(region == NULL) continue;
676                 map = region->mem_map;
677                 if((page >= map) && (page < &map[region->len >> PAGE_SHIFT])){
678                         if(index_out != NULL) *index_out = i;
679                         return(region);
680                 }
681         }
682         panic("No region found for page");
683         return(NULL);
684 }
685
686 unsigned long page_to_pfn(struct page *page)
687 {
688         struct mem_region *region = page_region(page, NULL);
689
690         return(region->start_pfn + (page - (struct page *) region->mem_map));
691 }
692
693 struct mem_region *pfn_to_region(unsigned long pfn, int *index_out)
694 {
695         struct mem_region *region;
696         int i;
697
698         for(i = 0; i < NREGIONS; i++){
699                 region = regions[i];
700                 if(region == NULL)
701                         continue;
702
703                 if((region->start_pfn <= pfn) &&
704                    (region->start_pfn + (region->len >> PAGE_SHIFT) > pfn)){
705                         if(index_out != NULL) 
706                                 *index_out = i;
707                         return(region);
708                 }
709         }
710         return(NULL);
711 }
712
713 struct page *pfn_to_page(unsigned long pfn)
714 {
715         struct mem_region *region = pfn_to_region(pfn, NULL);
716         struct page *mem_map = (struct page *) region->mem_map;
717
718         return(&mem_map[pfn - region->start_pfn]);
719 }
720
721 unsigned long phys_to_pfn(unsigned long p)
722 {
723         struct mem_region *region = regions[phys_region_index(p)];
724
725         return(region->start_pfn + (phys_addr(p) >> PAGE_SHIFT));
726 }
727
728 unsigned long pfn_to_phys(unsigned long pfn)
729 {
730         int n;
731         struct mem_region *region = pfn_to_region(pfn, &n);
732
733         return(mk_phys((pfn - region->start_pfn) << PAGE_SHIFT, n));
734 }
735
736 struct page *page_mem_map(struct page *page)
737 {
738         return((struct page *) page_region(page, NULL)->mem_map);
739 }
740
741 extern unsigned long region_pa(void *virt)
742 {
743         struct mem_region *region;
744         unsigned long addr = (unsigned long) virt;
745         int i;
746
747         for(i = 0; i < NREGIONS; i++){
748                 region = regions[i];
749                 if(region == NULL) continue;
750                 if((region->start <= addr) && 
751                    (addr <= region->start + region->len))
752                         return(mk_phys(addr - region->start, i));
753         }
754         panic("region_pa : no region for virtual address");
755         return(0);
756 }
757
758 extern void *region_va(unsigned long phys)
759 {
760         return((void *) (phys_region(phys)->start + phys_addr(phys)));
761 }
762
763 unsigned long page_to_phys(struct page *page)
764 {
765         int n;
766         struct mem_region *region = page_region(page, &n);
767         struct page *map = region->mem_map;
768         return(mk_phys((page - map) << PAGE_SHIFT, n));
769 }
770
771 struct page *phys_to_page(unsigned long phys)
772 {
773         struct page *mem_map;
774
775         mem_map = phys_mem_map(phys);
776         return(mem_map + (phys_offset(phys) >> PAGE_SHIFT));
777 }
778
779 static int setup_mem_maps(void)
780 {
781         struct mem_region *region;
782         int i;
783
784         for(i = 0; i < NREGIONS; i++){
785                 region = regions[i];
786                 if((region != NULL) && (region->fd > 0)) init_maps(region);
787         }
788         return(0);
789 }
790
791 __initcall(setup_mem_maps);
792
793 /*
794  * Allocate and free page tables.
795  */
796
797 pgd_t *pgd_alloc(struct mm_struct *mm)
798 {
799         pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
800
801         if (pgd) {
802                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
803                 memcpy(pgd + USER_PTRS_PER_PGD, 
804                        swapper_pg_dir + USER_PTRS_PER_PGD, 
805                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
806         }
807         return pgd;
808 }
809
810 void pgd_free(pgd_t *pgd)
811 {
812         free_page((unsigned long) pgd);
813 }
814
815 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
816 {
817         pte_t *pte;
818
819         pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
820         if (pte)
821                 clear_page(pte);
822         return pte;
823 }
824
825 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
826 {
827         struct page *pte;
828    
829         pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
830         if (pte)
831                 clear_highpage(pte);
832         return pte;
833 }
834
835 /*
836  * Overrides for Emacs so that we follow Linus's tabbing style.
837  * Emacs will notice this stuff at the end of the file and automatically
838  * adjust the settings for this buffer only.  This must remain at the end
839  * of the file.
840  * ---------------------------------------------------------------------------
841  * Local variables:
842  * c-file-style: "linux"
843  * End:
844  */