vserver 1.9.3
[linux-2.6.git] / mm / vmalloc.c
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  */
9
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/vmalloc.h>
18
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
21
22
23 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
24 struct vm_struct *vmlist;
25
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
27                                   unsigned long size)
28 {
29         unsigned long end;
30         pte_t *pte;
31
32         if (pmd_none(*pmd))
33                 return;
34         if (pmd_bad(*pmd)) {
35                 pmd_ERROR(*pmd);
36                 pmd_clear(pmd);
37                 return;
38         }
39
40         pte = pte_offset_kernel(pmd, address);
41         address &= ~PMD_MASK;
42         end = address + size;
43         if (end > PMD_SIZE)
44                 end = PMD_SIZE;
45
46         do {
47                 pte_t page;
48                 page = ptep_get_and_clear(pte);
49                 address += PAGE_SIZE;
50                 pte++;
51                 if (pte_none(page))
52                         continue;
53                 if (pte_present(page))
54                         continue;
55                 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56         } while (address < end);
57 }
58
59 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
60                                   unsigned long size)
61 {
62         unsigned long end;
63         pmd_t *pmd;
64
65         if (pgd_none(*dir))
66                 return;
67         if (pgd_bad(*dir)) {
68                 pgd_ERROR(*dir);
69                 pgd_clear(dir);
70                 return;
71         }
72
73         pmd = pmd_offset(dir, address);
74         address &= ~PGDIR_MASK;
75         end = address + size;
76         if (end > PGDIR_SIZE)
77                 end = PGDIR_SIZE;
78
79         do {
80                 unmap_area_pte(pmd, address, end - address);
81                 address = (address + PMD_SIZE) & PMD_MASK;
82                 pmd++;
83         } while (address < end);
84 }
85
86 static int map_area_pte(pte_t *pte, unsigned long address,
87                                unsigned long size, pgprot_t prot,
88                                struct page ***pages)
89 {
90         unsigned long end;
91
92         address &= ~PMD_MASK;
93         end = address + size;
94         if (end > PMD_SIZE)
95                 end = PMD_SIZE;
96
97         do {
98                 struct page *page = **pages;
99
100                 WARN_ON(!pte_none(*pte));
101                 if (!page)
102                         return -ENOMEM;
103
104                 set_pte(pte, mk_pte(page, prot));
105                 address += PAGE_SIZE;
106                 pte++;
107                 (*pages)++;
108         } while (address < end);
109         return 0;
110 }
111
112 static int map_area_pmd(pmd_t *pmd, unsigned long address,
113                                unsigned long size, pgprot_t prot,
114                                struct page ***pages)
115 {
116         unsigned long base, end;
117
118         base = address & PGDIR_MASK;
119         address &= ~PGDIR_MASK;
120         end = address + size;
121         if (end > PGDIR_SIZE)
122                 end = PGDIR_SIZE;
123
124         do {
125                 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
126                 if (!pte)
127                         return -ENOMEM;
128                 if (map_area_pte(pte, address, end - address, prot, pages))
129                         return -ENOMEM;
130                 address = (address + PMD_SIZE) & PMD_MASK;
131                 pmd++;
132         } while (address < end);
133
134         return 0;
135 }
136
137 void unmap_vm_area(struct vm_struct *area)
138 {
139         unsigned long address = (unsigned long) area->addr;
140         unsigned long end = (address + area->size);
141         pgd_t *dir;
142
143         dir = pgd_offset_k(address);
144         flush_cache_vunmap(address, end);
145         do {
146                 unmap_area_pmd(dir, address, end - address);
147                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
148                 dir++;
149         } while (address && (address < end));
150         flush_tlb_kernel_range((unsigned long) area->addr, end);
151 }
152
153 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
154 {
155         unsigned long address = (unsigned long) area->addr;
156         unsigned long end = address + (area->size-PAGE_SIZE);
157         pgd_t *dir;
158         int err = 0;
159
160         dir = pgd_offset_k(address);
161         spin_lock(&init_mm.page_table_lock);
162         do {
163                 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
164                 if (!pmd) {
165                         err = -ENOMEM;
166                         break;
167                 }
168                 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
169                         err = -ENOMEM;
170                         break;
171                 }
172
173                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
174                 dir++;
175         } while (address && (address < end));
176
177         spin_unlock(&init_mm.page_table_lock);
178         flush_cache_vmap((unsigned long) area->addr, end);
179         return err;
180 }
181
182 #define IOREMAP_MAX_ORDER       (7 + PAGE_SHIFT)        /* 128 pages */
183
184 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
185                                 unsigned long start, unsigned long end)
186 {
187         struct vm_struct **p, *tmp, *area;
188         unsigned long align = 1;
189         unsigned long addr;
190
191         if (flags & VM_IOREMAP) {
192                 int bit = fls(size);
193
194                 if (bit > IOREMAP_MAX_ORDER)
195                         bit = IOREMAP_MAX_ORDER;
196                 else if (bit < PAGE_SHIFT)
197                         bit = PAGE_SHIFT;
198
199                 align = 1ul << bit;
200         }
201         addr = ALIGN(start, align);
202
203         area = kmalloc(sizeof(*area), GFP_KERNEL);
204         if (unlikely(!area))
205                 return NULL;
206
207         /*
208          * We always allocate a guard page.
209          */
210         size += PAGE_SIZE;
211         if (unlikely(!size)) {
212                 kfree (area);
213                 return NULL;
214         }
215
216         write_lock(&vmlist_lock);
217         for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
218                 if ((unsigned long)tmp->addr < addr) {
219                         if((unsigned long)tmp->addr + tmp->size >= addr)
220                                 addr = ALIGN(tmp->size + 
221                                              (unsigned long)tmp->addr, align);
222                         continue;
223                 }
224                 if ((size + addr) < addr)
225                         goto out;
226                 if (size + addr <= (unsigned long)tmp->addr)
227                         goto found;
228                 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
229                 if (addr > end - size)
230                         goto out;
231         }
232
233 found:
234         area->next = *p;
235         *p = area;
236
237         area->flags = flags;
238         area->addr = (void *)addr;
239         area->size = size;
240         area->pages = NULL;
241         area->nr_pages = 0;
242         area->phys_addr = 0;
243         write_unlock(&vmlist_lock);
244
245         return area;
246
247 out:
248         write_unlock(&vmlist_lock);
249         kfree(area);
250         if (printk_ratelimit())
251                 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
252         return NULL;
253 }
254
255 /**
256  *      get_vm_area  -  reserve a contingous kernel virtual area
257  *
258  *      @size:          size of the area
259  *      @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC
260  *
261  *      Search an area of @size in the kernel virtual mapping area,
262  *      and reserved it for out purposes.  Returns the area descriptor
263  *      on success or %NULL on failure.
264  */
265 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
266 {
267         return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
268 }
269
270 /**
271  *      remove_vm_area  -  find and remove a contingous kernel virtual area
272  *
273  *      @addr:          base address
274  *
275  *      Search for the kernel VM area starting at @addr, and remove it.
276  *      This function returns the found VM area, but using it is NOT safe
277  *      on SMP machines.
278  */
279 struct vm_struct *remove_vm_area(void *addr)
280 {
281         struct vm_struct **p, *tmp;
282
283         write_lock(&vmlist_lock);
284         for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
285                  if (tmp->addr == addr)
286                          goto found;
287         }
288         write_unlock(&vmlist_lock);
289         return NULL;
290
291 found:
292         unmap_vm_area(tmp);
293         *p = tmp->next;
294         write_unlock(&vmlist_lock);
295         return tmp;
296 }
297
298 void __vunmap(void *addr, int deallocate_pages)
299 {
300         struct vm_struct *area;
301
302         if (!addr)
303                 return;
304
305         if ((PAGE_SIZE-1) & (unsigned long)addr) {
306                 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
307                 WARN_ON(1);
308                 return;
309         }
310
311         area = remove_vm_area(addr);
312         if (unlikely(!area)) {
313                 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
314                                 addr);
315                 WARN_ON(1);
316                 return;
317         }
318         
319         if (deallocate_pages) {
320                 int i;
321
322                 for (i = 0; i < area->nr_pages; i++) {
323                         if (unlikely(!area->pages[i]))
324                                 BUG();
325                         __free_page(area->pages[i]);
326                 }
327
328                 kfree(area->pages);
329         }
330
331         kfree(area);
332         return;
333 }
334
335 /**
336  *      vfree  -  release memory allocated by vmalloc()
337  *
338  *      @addr:          memory base address
339  *
340  *      Free the virtually contiguous memory area starting at @addr, as
341  *      obtained from vmalloc(), vmalloc_32() or __vmalloc().
342  *
343  *      May not be called in interrupt context.
344  */
345 void vfree(void *addr)
346 {
347         BUG_ON(in_interrupt());
348         __vunmap(addr, 1);
349 }
350
351 EXPORT_SYMBOL(vfree);
352
353 /**
354  *      vunmap  -  release virtual mapping obtained by vmap()
355  *
356  *      @addr:          memory base address
357  *
358  *      Free the virtually contiguous memory area starting at @addr,
359  *      which was created from the page array passed to vmap().
360  *
361  *      May not be called in interrupt context.
362  */
363 void vunmap(void *addr)
364 {
365         BUG_ON(in_interrupt());
366         __vunmap(addr, 0);
367 }
368
369 EXPORT_SYMBOL(vunmap);
370
371 /**
372  *      vmap  -  map an array of pages into virtually contiguous space
373  *
374  *      @pages:         array of page pointers
375  *      @count:         number of pages to map
376  *      @flags:         vm_area->flags
377  *      @prot:          page protection for the mapping
378  *
379  *      Maps @count pages from @pages into contiguous kernel virtual
380  *      space.
381  */
382 void *vmap(struct page **pages, unsigned int count,
383                 unsigned long flags, pgprot_t prot)
384 {
385         struct vm_struct *area;
386
387         if (count > num_physpages)
388                 return NULL;
389
390         area = get_vm_area((count << PAGE_SHIFT), flags);
391         if (!area)
392                 return NULL;
393         if (map_vm_area(area, prot, &pages)) {
394                 vunmap(area->addr);
395                 return NULL;
396         }
397
398         return area->addr;
399 }
400
401 EXPORT_SYMBOL(vmap);
402
403 /**
404  *      __vmalloc  -  allocate virtually contiguous memory
405  *
406  *      @size:          allocation size
407  *      @gfp_mask:      flags for the page level allocator
408  *      @prot:          protection mask for the allocated pages
409  *
410  *      Allocate enough pages to cover @size from the page level
411  *      allocator with @gfp_mask flags.  Map them into contiguous
412  *      kernel virtual space, using a pagetable protection of @prot.
413  */
414 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
415 {
416         struct vm_struct *area;
417         struct page **pages;
418         unsigned int nr_pages, array_size, i;
419
420         size = PAGE_ALIGN(size);
421         if (!size || (size >> PAGE_SHIFT) > num_physpages)
422                 return NULL;
423
424         area = get_vm_area(size, VM_ALLOC);
425         if (!area)
426                 return NULL;
427
428         nr_pages = size >> PAGE_SHIFT;
429         array_size = (nr_pages * sizeof(struct page *));
430
431         area->nr_pages = nr_pages;
432         area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
433         if (!area->pages) {
434                 remove_vm_area(area->addr);
435                 kfree(area);
436                 return NULL;
437         }
438         memset(area->pages, 0, array_size);
439
440         for (i = 0; i < area->nr_pages; i++) {
441                 area->pages[i] = alloc_page(gfp_mask);
442                 if (unlikely(!area->pages[i])) {
443                         /* Successfully allocated i pages, free them in __vunmap() */
444                         area->nr_pages = i;
445                         goto fail;
446                 }
447         }
448         
449         if (map_vm_area(area, prot, &pages))
450                 goto fail;
451         return area->addr;
452
453 fail:
454         vfree(area->addr);
455         return NULL;
456 }
457
458 EXPORT_SYMBOL(__vmalloc);
459
460 /**
461  *      vmalloc  -  allocate virtually contiguous memory
462  *
463  *      @size:          allocation size
464  *
465  *      Allocate enough pages to cover @size from the page level
466  *      allocator and map them into contiguous kernel virtual space.
467  *
468  *      For tight cotrol over page level allocator and protection flags
469  *      use __vmalloc() instead.
470  */
471 void *vmalloc(unsigned long size)
472 {
473        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
474 }
475
476 EXPORT_SYMBOL(vmalloc);
477
478 /**
479  *      vmalloc_exec  -  allocate virtually contiguous, executable memory
480  *
481  *      @size:          allocation size
482  *
483  *      Kernel-internal function to allocate enough pages to cover @size
484  *      the page level allocator and map them into contiguous and
485  *      executable kernel virtual space.
486  *
487  *      For tight cotrol over page level allocator and protection flags
488  *      use __vmalloc() instead.
489  */
490
491 #ifndef PAGE_KERNEL_EXEC
492 # define PAGE_KERNEL_EXEC PAGE_KERNEL
493 #endif
494
495 void *vmalloc_exec(unsigned long size)
496 {
497         return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
498 }
499
500 /**
501  *      vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
502  *
503  *      @size:          allocation size
504  *
505  *      Allocate enough 32bit PA addressable pages to cover @size from the
506  *      page level allocator and map them into contiguous kernel virtual space.
507  */
508 void *vmalloc_32(unsigned long size)
509 {
510         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
511 }
512
513 EXPORT_SYMBOL(vmalloc_32);
514
515 long vread(char *buf, char *addr, unsigned long count)
516 {
517         struct vm_struct *tmp;
518         char *vaddr, *buf_start = buf;
519         unsigned long n;
520
521         /* Don't allow overflow */
522         if ((unsigned long) addr + count < count)
523                 count = -(unsigned long) addr;
524
525         read_lock(&vmlist_lock);
526         for (tmp = vmlist; tmp; tmp = tmp->next) {
527                 vaddr = (char *) tmp->addr;
528                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
529                         continue;
530                 while (addr < vaddr) {
531                         if (count == 0)
532                                 goto finished;
533                         *buf = '\0';
534                         buf++;
535                         addr++;
536                         count--;
537                 }
538                 n = vaddr + tmp->size - PAGE_SIZE - addr;
539                 do {
540                         if (count == 0)
541                                 goto finished;
542                         *buf = *addr;
543                         buf++;
544                         addr++;
545                         count--;
546                 } while (--n > 0);
547         }
548 finished:
549         read_unlock(&vmlist_lock);
550         return buf - buf_start;
551 }
552
553 long vwrite(char *buf, char *addr, unsigned long count)
554 {
555         struct vm_struct *tmp;
556         char *vaddr, *buf_start = buf;
557         unsigned long n;
558
559         /* Don't allow overflow */
560         if ((unsigned long) addr + count < count)
561                 count = -(unsigned long) addr;
562
563         read_lock(&vmlist_lock);
564         for (tmp = vmlist; tmp; tmp = tmp->next) {
565                 vaddr = (char *) tmp->addr;
566                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
567                         continue;
568                 while (addr < vaddr) {
569                         if (count == 0)
570                                 goto finished;
571                         buf++;
572                         addr++;
573                         count--;
574                 }
575                 n = vaddr + tmp->size - PAGE_SIZE - addr;
576                 do {
577                         if (count == 0)
578                                 goto finished;
579                         *addr = *buf;
580                         buf++;
581                         addr++;
582                         count--;
583                 } while (--n > 0);
584         }
585 finished:
586         read_unlock(&vmlist_lock);
587         return buf - buf_start;
588 }