vserver 1.9.3
[linux-2.6.git] / arch / um / kernel / mem.c
1 /* 
2  * Copyright (C) 2000 - 2003 Jeff Dike (jdike@addtoit.com)
3  * Licensed under the GPL
4  */
5
6 #include "linux/stddef.h"
7 #include "linux/kernel.h"
8 #include "linux/mm.h"
9 #include "linux/bootmem.h"
10 #include "linux/swap.h"
11 #include "linux/highmem.h"
12 #include "linux/gfp.h"
13 #include "asm/page.h"
14 #include "asm/fixmap.h"
15 #include "asm/pgalloc.h"
16 #include "user_util.h"
17 #include "kern_util.h"
18 #include "kern.h"
19 #include "mem_user.h"
20 #include "uml_uaccess.h"
21 #include "os.h"
22
23 extern char __binary_start;
24
25 /* Changed during early boot */
26 unsigned long *empty_zero_page = NULL;
27 unsigned long *empty_bad_page = NULL;
28 pgd_t swapper_pg_dir[1024];
29 unsigned long highmem;
30 int kmalloc_ok = 0;
31
32 static unsigned long brk_end;
33
34 void unmap_physmem(void)
35 {
36         os_unmap_memory((void *) brk_end, uml_reserved - brk_end);
37 }
38
39 static void map_cb(void *unused)
40 {
41         map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
42 }
43
44 #ifdef CONFIG_HIGHMEM
45 static void setup_highmem(unsigned long highmem_start,
46                           unsigned long highmem_len)
47 {
48         struct page *page;
49         unsigned long highmem_pfn;
50         int i;
51
52         highmem_start_page = virt_to_page(highmem_start);
53
54         highmem_pfn = __pa(highmem_start) >> PAGE_SHIFT;
55         for(i = 0; i < highmem_len >> PAGE_SHIFT; i++){
56                 page = &mem_map[highmem_pfn + i];
57                 ClearPageReserved(page);
58                 set_bit(PG_highmem, &page->flags);
59                 set_page_count(page, 1);
60                 __free_page(page);
61         }
62 }
63 #endif
64
65 void mem_init(void)
66 {
67         unsigned long start;
68
69         max_low_pfn = (high_physmem - uml_physmem) >> PAGE_SHIFT;
70 #ifdef CONFIG_HIGHMEM
71         highmem_start_page = phys_page(__pa(high_physmem));
72 #endif
73
74         /* clear the zero-page */
75         memset((void *) empty_zero_page, 0, PAGE_SIZE);
76
77         /* Map in the area just after the brk now that kmalloc is about
78          * to be turned on.
79          */
80         brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
81         map_cb(NULL);
82         initial_thread_cb(map_cb, NULL);
83         free_bootmem(__pa(brk_end), uml_reserved - brk_end);
84         uml_reserved = brk_end;
85
86         /* Fill in any hole at the start of the binary */
87         start = (unsigned long) &__binary_start;
88         if(uml_physmem != start){
89                 map_memory(uml_physmem, __pa(uml_physmem), start - uml_physmem,
90                            1, 1, 0);
91         }
92
93         /* this will put all low memory onto the freelists */
94         totalram_pages = free_all_bootmem();
95         totalhigh_pages = highmem >> PAGE_SHIFT;
96         totalram_pages += totalhigh_pages;
97         num_physpages = totalram_pages;
98         max_pfn = totalram_pages;
99         printk(KERN_INFO "Memory: %luk available\n", 
100                (unsigned long) nr_free_pages() << (PAGE_SHIFT-10));
101         kmalloc_ok = 1;
102
103 #ifdef CONFIG_HIGHMEM
104         setup_highmem(end_iomem, highmem);
105 #endif
106 }
107
108 static void __init fixrange_init(unsigned long start, unsigned long end, 
109                                  pgd_t *pgd_base)
110 {
111         pgd_t *pgd;
112         pmd_t *pmd;
113         pte_t *pte;
114         int i, j;
115         unsigned long vaddr;
116
117         vaddr = start;
118         i = pgd_index(vaddr);
119         j = pmd_index(vaddr);
120         pgd = pgd_base + i;
121
122         for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
123                 pmd = (pmd_t *)pgd;
124                 for (; (j < PTRS_PER_PMD) && (vaddr != end); pmd++, j++) {
125                         if (pmd_none(*pmd)) {
126                                 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
127                                 set_pmd(pmd, __pmd(_KERNPG_TABLE + 
128                                                    (unsigned long) __pa(pte)));
129                                 if (pte != pte_offset_kernel(pmd, 0))
130                                         BUG();
131                         }
132                         vaddr += PMD_SIZE;
133                 }
134                 j = 0;
135         }
136 }
137
138 #if CONFIG_HIGHMEM
139 pte_t *kmap_pte;
140 pgprot_t kmap_prot;
141
142 #define kmap_get_fixmap_pte(vaddr)                                      \
143         pte_offset(pmd_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr))
144
145 void __init kmap_init(void)
146 {
147         unsigned long kmap_vstart;
148
149         /* cache the first kmap pte */
150         kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
151         kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
152
153         kmap_prot = PAGE_KERNEL;
154 }
155
156 static void init_highmem(void)
157 {
158         pgd_t *pgd;
159         pmd_t *pmd;
160         pte_t *pte;
161         unsigned long vaddr;
162
163         /*
164          * Permanent kmaps:
165          */
166         vaddr = PKMAP_BASE;
167         fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
168
169         pgd = swapper_pg_dir + pgd_index(vaddr);
170         pmd = pmd_offset(pgd, vaddr);
171         pte = pte_offset_kernel(pmd, vaddr);
172         pkmap_page_table = pte;
173
174         kmap_init();
175 }
176 #endif /* CONFIG_HIGHMEM */
177
178 void paging_init(void)
179 {
180         unsigned long zones_size[MAX_NR_ZONES], vaddr;
181         int i;
182
183         empty_zero_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
184         empty_bad_page = (unsigned long *) alloc_bootmem_low_pages(PAGE_SIZE);
185         for(i=0;i<sizeof(zones_size)/sizeof(zones_size[0]);i++) 
186                 zones_size[i] = 0;
187         zones_size[0] = (end_iomem >> PAGE_SHIFT) - (uml_physmem >> PAGE_SHIFT);
188         zones_size[2] = highmem >> PAGE_SHIFT;
189         free_area_init(zones_size);
190
191         /*
192          * Fixed mappings, only the page table structure has to be
193          * created - mappings will be set by set_fixmap():
194          */
195         vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
196         fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
197
198 #ifdef CONFIG_HIGHMEM
199         init_highmem();
200 #endif
201 }
202
203 struct page *arch_validate(struct page *page, int mask, int order)
204 {
205         unsigned long addr, zero = 0;
206         int i;
207
208  again:
209         if(page == NULL) return(page);
210         if(PageHighMem(page)) return(page);
211
212         addr = (unsigned long) page_address(page);
213         for(i = 0; i < (1 << order); i++){
214                 current->thread.fault_addr = (void *) addr;
215                 if(__do_copy_to_user((void *) addr, &zero,
216                                      sizeof(zero),
217                                      &current->thread.fault_addr,
218                                      &current->thread.fault_catcher)){
219                         if(!(mask & __GFP_WAIT)) return(NULL);
220                         else break;
221                 }
222                 addr += PAGE_SIZE;
223         }
224         if(i == (1 << order)) return(page);
225         page = alloc_pages(mask, order);
226         goto again;
227 }
228
229 /* This can't do anything because nothing in the kernel image can be freed
230  * since it's not in kernel physical memory.
231  */
232
233 void free_initmem(void)
234 {
235 }
236
237 #ifdef CONFIG_BLK_DEV_INITRD
238
239 void free_initrd_mem(unsigned long start, unsigned long end)
240 {
241         if (start < end)
242                 printk ("Freeing initrd memory: %ldk freed\n", 
243                         (end - start) >> 10);
244         for (; start < end; start += PAGE_SIZE) {
245                 ClearPageReserved(virt_to_page(start));
246                 set_page_count(virt_to_page(start), 1);
247                 free_page(start);
248                 totalram_pages++;
249         }
250 }
251         
252 #endif
253
254 void show_mem(void)
255 {
256         int pfn, total = 0, reserved = 0;
257         int shared = 0, cached = 0;
258         int highmem = 0;
259         struct page *page;
260
261         printk("Mem-info:\n");
262         show_free_areas();
263         printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
264         pfn = max_mapnr;
265         while(pfn-- > 0) {
266                 page = pfn_to_page(pfn);
267                 total++;
268                 if(PageHighMem(page))
269                         highmem++;
270                 if(PageReserved(page))
271                         reserved++;
272                 else if(PageSwapCache(page))
273                         cached++;
274                 else if(page_count(page))
275                         shared += page_count(page) - 1;
276         }
277         printk("%d pages of RAM\n", total);
278         printk("%d pages of HIGHMEM\n", highmem);
279         printk("%d reserved pages\n", reserved);
280         printk("%d pages shared\n", shared);
281         printk("%d pages swap cached\n", cached);
282 }
283
284 /*
285  * Allocate and free page tables.
286  */
287
288 pgd_t *pgd_alloc(struct mm_struct *mm)
289 {
290         pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
291
292         if (pgd) {
293                 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
294                 memcpy(pgd + USER_PTRS_PER_PGD, 
295                        swapper_pg_dir + USER_PTRS_PER_PGD, 
296                        (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
297         }
298         return pgd;
299 }
300
301 void pgd_free(pgd_t *pgd)
302 {
303         free_page((unsigned long) pgd);
304 }
305
306 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
307 {
308         pte_t *pte;
309
310         pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
311         if (pte)
312                 clear_page(pte);
313         return pte;
314 }
315
316 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
317 {
318         struct page *pte;
319    
320         pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
321         if (pte)
322                 clear_highpage(pte);
323         return pte;
324 }
325
326 /*
327  * Overrides for Emacs so that we follow Linus's tabbing style.
328  * Emacs will notice this stuff at the end of the file and automatically
329  * adjust the settings for this buffer only.  This must remain at the end
330  * of the file.
331  * ---------------------------------------------------------------------------
332  * Local variables:
333  * c-file-style: "linux"
334  * End:
335  */