VServer 1.9.2 (patch-2.6.8.1-vs1.9.2.diff)
[linux-2.6.git] / mm / vmalloc.c
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  */
9
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/vmalloc.h>
18
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
21
22
23 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
24 struct vm_struct *vmlist;
25
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
27                                   unsigned long size)
28 {
29         unsigned long end;
30         pte_t *pte;
31
32         if (pmd_none(*pmd))
33                 return;
34         if (pmd_bad(*pmd)) {
35                 pmd_ERROR(*pmd);
36                 pmd_clear(pmd);
37                 return;
38         }
39
40         pte = pte_offset_kernel(pmd, address);
41         address &= ~PMD_MASK;
42         end = address + size;
43         if (end > PMD_SIZE)
44                 end = PMD_SIZE;
45
46         do {
47                 pte_t page;
48                 page = ptep_get_and_clear(pte);
49                 address += PAGE_SIZE;
50                 pte++;
51                 if (pte_none(page))
52                         continue;
53                 if (pte_present(page))
54                         continue;
55                 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56         } while (address < end);
57 }
58
59 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
60                                   unsigned long size)
61 {
62         unsigned long end;
63         pmd_t *pmd;
64
65         if (pgd_none(*dir))
66                 return;
67         if (pgd_bad(*dir)) {
68                 pgd_ERROR(*dir);
69                 pgd_clear(dir);
70                 return;
71         }
72
73         pmd = pmd_offset(dir, address);
74         address &= ~PGDIR_MASK;
75         end = address + size;
76         if (end > PGDIR_SIZE)
77                 end = PGDIR_SIZE;
78
79         do {
80                 unmap_area_pte(pmd, address, end - address);
81                 address = (address + PMD_SIZE) & PMD_MASK;
82                 pmd++;
83         } while (address < end);
84 }
85
86 static int map_area_pte(pte_t *pte, unsigned long address,
87                                unsigned long size, pgprot_t prot,
88                                struct page ***pages)
89 {
90         unsigned long end;
91
92         address &= ~PMD_MASK;
93         end = address + size;
94         if (end > PMD_SIZE)
95                 end = PMD_SIZE;
96
97         do {
98                 struct page *page = **pages;
99
100                 WARN_ON(!pte_none(*pte));
101                 if (!page)
102                         return -ENOMEM;
103
104                 set_pte(pte, mk_pte(page, prot));
105                 address += PAGE_SIZE;
106                 pte++;
107                 (*pages)++;
108         } while (address < end);
109         return 0;
110 }
111
112 static int map_area_pmd(pmd_t *pmd, unsigned long address,
113                                unsigned long size, pgprot_t prot,
114                                struct page ***pages)
115 {
116         unsigned long base, end;
117
118         base = address & PGDIR_MASK;
119         address &= ~PGDIR_MASK;
120         end = address + size;
121         if (end > PGDIR_SIZE)
122                 end = PGDIR_SIZE;
123
124         do {
125                 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
126                 if (!pte)
127                         return -ENOMEM;
128                 if (map_area_pte(pte, address, end - address, prot, pages))
129                         return -ENOMEM;
130                 address = (address + PMD_SIZE) & PMD_MASK;
131                 pmd++;
132         } while (address < end);
133
134         return 0;
135 }
136
137 void unmap_vm_area(struct vm_struct *area)
138 {
139         unsigned long address = (unsigned long) area->addr;
140         unsigned long end = (address + area->size);
141         pgd_t *dir;
142
143         dir = pgd_offset_k(address);
144         flush_cache_vunmap(address, end);
145         do {
146                 unmap_area_pmd(dir, address, end - address);
147                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
148                 dir++;
149         } while (address && (address < end));
150         flush_tlb_kernel_range((unsigned long) area->addr, end);
151 }
152
153 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
154 {
155         unsigned long address = (unsigned long) area->addr;
156         unsigned long end = address + (area->size-PAGE_SIZE);
157         pgd_t *dir;
158         int err = 0;
159
160         dir = pgd_offset_k(address);
161         spin_lock(&init_mm.page_table_lock);
162         do {
163                 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
164                 if (!pmd) {
165                         err = -ENOMEM;
166                         break;
167                 }
168                 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
169                         err = -ENOMEM;
170                         break;
171                 }
172
173                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
174                 dir++;
175         } while (address && (address < end));
176
177         spin_unlock(&init_mm.page_table_lock);
178         flush_cache_vmap((unsigned long) area->addr, end);
179         return err;
180 }
181
182 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
183                                 unsigned long start, unsigned long end)
184 {
185         struct vm_struct **p, *tmp, *area;
186         unsigned long addr = start;
187
188         area = kmalloc(sizeof(*area), GFP_KERNEL);
189         if (unlikely(!area))
190                 return NULL;
191
192         /*
193          * We always allocate a guard page.
194          */
195         size += PAGE_SIZE;
196         if (unlikely(!size)) {
197                 kfree (area);
198                 return NULL;
199         }
200
201         write_lock(&vmlist_lock);
202         for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
203                 if ((unsigned long)tmp->addr < addr)
204                         continue;
205                 if ((size + addr) < addr)
206                         goto out;
207                 if (size + addr <= (unsigned long)tmp->addr)
208                         goto found;
209                 addr = tmp->size + (unsigned long)tmp->addr;
210                 if (addr > end - size)
211                         goto out;
212         }
213
214 found:
215         area->next = *p;
216         *p = area;
217
218         area->flags = flags;
219         area->addr = (void *)addr;
220         area->size = size;
221         area->pages = NULL;
222         area->nr_pages = 0;
223         area->phys_addr = 0;
224         write_unlock(&vmlist_lock);
225
226         return area;
227
228 out:
229         write_unlock(&vmlist_lock);
230         kfree(area);
231         return NULL;
232 }
233
234 /**
235  *      get_vm_area  -  reserve a contingous kernel virtual area
236  *
237  *      @size:          size of the area
238  *      @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC
239  *
240  *      Search an area of @size in the kernel virtual mapping area,
241  *      and reserved it for out purposes.  Returns the area descriptor
242  *      on success or %NULL on failure.
243  */
244 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
245 {
246         return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
247 }
248
249 /**
250  *      remove_vm_area  -  find and remove a contingous kernel virtual area
251  *
252  *      @addr:          base address
253  *
254  *      Search for the kernel VM area starting at @addr, and remove it.
255  *      This function returns the found VM area, but using it is NOT safe
256  *      on SMP machines.
257  */
258 struct vm_struct *remove_vm_area(void *addr)
259 {
260         struct vm_struct **p, *tmp;
261
262         write_lock(&vmlist_lock);
263         for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
264                  if (tmp->addr == addr)
265                          goto found;
266         }
267         write_unlock(&vmlist_lock);
268         return NULL;
269
270 found:
271         unmap_vm_area(tmp);
272         *p = tmp->next;
273         write_unlock(&vmlist_lock);
274         return tmp;
275 }
276
277 void __vunmap(void *addr, int deallocate_pages)
278 {
279         struct vm_struct *area;
280
281         if (!addr)
282                 return;
283
284         if ((PAGE_SIZE-1) & (unsigned long)addr) {
285                 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
286                 WARN_ON(1);
287                 return;
288         }
289
290         area = remove_vm_area(addr);
291         if (unlikely(!area)) {
292                 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
293                                 addr);
294                 WARN_ON(1);
295                 return;
296         }
297         
298         if (deallocate_pages) {
299                 int i;
300
301                 for (i = 0; i < area->nr_pages; i++) {
302                         if (unlikely(!area->pages[i]))
303                                 BUG();
304                         __free_page(area->pages[i]);
305                 }
306
307                 kfree(area->pages);
308         }
309
310         kfree(area);
311         return;
312 }
313
314 /**
315  *      vfree  -  release memory allocated by vmalloc()
316  *
317  *      @addr:          memory base address
318  *
319  *      Free the virtually contiguous memory area starting at @addr, as
320  *      obtained from vmalloc(), vmalloc_32() or __vmalloc().
321  *
322  *      May not be called in interrupt context.
323  */
324 void vfree(void *addr)
325 {
326         BUG_ON(in_interrupt());
327         __vunmap(addr, 1);
328 }
329
330 EXPORT_SYMBOL(vfree);
331
332 /**
333  *      vunmap  -  release virtual mapping obtained by vmap()
334  *
335  *      @addr:          memory base address
336  *
337  *      Free the virtually contiguous memory area starting at @addr,
338  *      which was created from the page array passed to vmap().
339  *
340  *      May not be called in interrupt context.
341  */
342 void vunmap(void *addr)
343 {
344         BUG_ON(in_interrupt());
345         __vunmap(addr, 0);
346 }
347
348 EXPORT_SYMBOL(vunmap);
349
350 /**
351  *      vmap  -  map an array of pages into virtually contiguous space
352  *
353  *      @pages:         array of page pointers
354  *      @count:         number of pages to map
355  *      @flags:         vm_area->flags
356  *      @prot:          page protection for the mapping
357  *
358  *      Maps @count pages from @pages into contiguous kernel virtual
359  *      space.
360  */
361 void *vmap(struct page **pages, unsigned int count,
362                 unsigned long flags, pgprot_t prot)
363 {
364         struct vm_struct *area;
365
366         if (count > num_physpages)
367                 return NULL;
368
369         area = get_vm_area((count << PAGE_SHIFT), flags);
370         if (!area)
371                 return NULL;
372         if (map_vm_area(area, prot, &pages)) {
373                 vunmap(area->addr);
374                 return NULL;
375         }
376
377         return area->addr;
378 }
379
380 EXPORT_SYMBOL(vmap);
381
382 /**
383  *      __vmalloc  -  allocate virtually contiguous memory
384  *
385  *      @size:          allocation size
386  *      @gfp_mask:      flags for the page level allocator
387  *      @prot:          protection mask for the allocated pages
388  *
389  *      Allocate enough pages to cover @size from the page level
390  *      allocator with @gfp_mask flags.  Map them into contiguous
391  *      kernel virtual space, using a pagetable protection of @prot.
392  */
393 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
394 {
395         struct vm_struct *area;
396         struct page **pages;
397         unsigned int nr_pages, array_size, i;
398
399         size = PAGE_ALIGN(size);
400         if (!size || (size >> PAGE_SHIFT) > num_physpages)
401                 return NULL;
402
403         area = get_vm_area(size, VM_ALLOC);
404         if (!area)
405                 return NULL;
406
407         nr_pages = size >> PAGE_SHIFT;
408         array_size = (nr_pages * sizeof(struct page *));
409
410         area->nr_pages = nr_pages;
411         area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
412         if (!area->pages) {
413                 remove_vm_area(area->addr);
414                 kfree(area);
415                 return NULL;
416         }
417         memset(area->pages, 0, array_size);
418
419         for (i = 0; i < area->nr_pages; i++) {
420                 area->pages[i] = alloc_page(gfp_mask);
421                 if (unlikely(!area->pages[i])) {
422                         /* Successfully allocated i pages, free them in __vunmap() */
423                         area->nr_pages = i;
424                         goto fail;
425                 }
426         }
427         
428         if (map_vm_area(area, prot, &pages))
429                 goto fail;
430         return area->addr;
431
432 fail:
433         vfree(area->addr);
434         return NULL;
435 }
436
437 EXPORT_SYMBOL(__vmalloc);
438
439 /**
440  *      vmalloc  -  allocate virtually contiguous memory
441  *
442  *      @size:          allocation size
443  *
444  *      Allocate enough pages to cover @size from the page level
445  *      allocator and map them into contiguous kernel virtual space.
446  *
447  *      For tight cotrol over page level allocator and protection flags
448  *      use __vmalloc() instead.
449  */
450 void *vmalloc(unsigned long size)
451 {
452        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
453 }
454
455 EXPORT_SYMBOL(vmalloc);
456
457 /**
458  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
459  *
460  *      @size:          allocation size
461  *
462  *      Kernel-internal function to allocate enough pages to cover @size
463  *      the page level allocator and map them into contiguous and
464  *      executable kernel virtual space.
465  *
466  *      For tight cotrol over page level allocator and protection flags
467  *      use __vmalloc() instead.
468  */
469
470 #ifndef PAGE_KERNEL_EXEC
471 # define PAGE_KERNEL_EXEC PAGE_KERNEL
472 #endif
473
474 void *vmalloc_exec(unsigned long size)
475 {
476         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
477 }
478
479 /**
480  *      vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
481  *
482  *      @size:          allocation size
483  *
484  *      Allocate enough 32bit PA addressable pages to cover @size from the
485  *      page level allocator and map them into contiguous kernel virtual space.
486  */
487 void *vmalloc_32(unsigned long size)
488 {
489         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
490 }
491
492 EXPORT_SYMBOL(vmalloc_32);
493
494 long vread(char *buf, char *addr, unsigned long count)
495 {
496         struct vm_struct *tmp;
497         char *vaddr, *buf_start = buf;
498         unsigned long n;
499
500         /* Don't allow overflow */
501         if ((unsigned long) addr + count < count)
502                 count = -(unsigned long) addr;
503
504         read_lock(&vmlist_lock);
505         for (tmp = vmlist; tmp; tmp = tmp->next) {
506                 vaddr = (char *) tmp->addr;
507                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
508                         continue;
509                 while (addr < vaddr) {
510                         if (count == 0)
511                                 goto finished;
512                         *buf = '\0';
513                         buf++;
514                         addr++;
515                         count--;
516                 }
517                 n = vaddr + tmp->size - PAGE_SIZE - addr;
518                 do {
519                         if (count == 0)
520                                 goto finished;
521                         *buf = *addr;
522                         buf++;
523                         addr++;
524                         count--;
525                 } while (--n > 0);
526         }
527 finished:
528         read_unlock(&vmlist_lock);
529         return buf - buf_start;
530 }
531
532 long vwrite(char *buf, char *addr, unsigned long count)
533 {
534         struct vm_struct *tmp;
535         char *vaddr, *buf_start = buf;
536         unsigned long n;
537
538         /* Don't allow overflow */
539         if ((unsigned long) addr + count < count)
540                 count = -(unsigned long) addr;
541
542         read_lock(&vmlist_lock);
543         for (tmp = vmlist; tmp; tmp = tmp->next) {
544                 vaddr = (char *) tmp->addr;
545                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
546                         continue;
547                 while (addr < vaddr) {
548                         if (count == 0)
549                                 goto finished;
550                         buf++;
551                         addr++;
552                         count--;
553                 }
554                 n = vaddr + tmp->size - PAGE_SIZE - addr;
555                 do {
556                         if (count == 0)
557                                 goto finished;
558                         *addr = *buf;
559                         buf++;
560                         addr++;
561                         count--;
562                 } while (--n > 0);
563         }
564 finished:
565         read_unlock(&vmlist_lock);
566         return buf - buf_start;
567 }