patch-2_6_7-vs1_9_1_12
[linux-2.6.git] / mm / vmalloc.c
1 /*
2  *  linux/mm/vmalloc.c
3  *
4  *  Copyright (C) 1993  Linus Torvalds
5  *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6  *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7  *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
8  */
9
10 #include <linux/mm.h>
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16
17 #include <linux/vmalloc.h>
18
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
22
23
24 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
25 struct vm_struct *vmlist;
26
27 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
28                                   unsigned long size)
29 {
30         unsigned long end;
31         pte_t *pte;
32
33         if (pmd_none(*pmd))
34                 return;
35         if (pmd_bad(*pmd)) {
36                 pmd_ERROR(*pmd);
37                 pmd_clear(pmd);
38                 return;
39         }
40
41         pte = pte_offset_kernel(pmd, address);
42         address &= ~PMD_MASK;
43         end = address + size;
44         if (end > PMD_SIZE)
45                 end = PMD_SIZE;
46
47         do {
48                 pte_t page;
49                 page = ptep_get_and_clear(pte);
50                 address += PAGE_SIZE;
51                 pte++;
52                 if (pte_none(page))
53                         continue;
54                 if (pte_present(page))
55                         continue;
56                 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
57         } while (address < end);
58 }
59
60 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
61                                   unsigned long size)
62 {
63         unsigned long end;
64         pmd_t *pmd;
65
66         if (pgd_none(*dir))
67                 return;
68         if (pgd_bad(*dir)) {
69                 pgd_ERROR(*dir);
70                 pgd_clear(dir);
71                 return;
72         }
73
74         pmd = pmd_offset(dir, address);
75         address &= ~PGDIR_MASK;
76         end = address + size;
77         if (end > PGDIR_SIZE)
78                 end = PGDIR_SIZE;
79
80         do {
81                 unmap_area_pte(pmd, address, end - address);
82                 address = (address + PMD_SIZE) & PMD_MASK;
83                 pmd++;
84         } while (address < end);
85 }
86
87 static int map_area_pte(pte_t *pte, unsigned long address,
88                                unsigned long size, pgprot_t prot,
89                                struct page ***pages)
90 {
91         unsigned long end;
92
93         address &= ~PMD_MASK;
94         end = address + size;
95         if (end > PMD_SIZE)
96                 end = PMD_SIZE;
97
98         do {
99                 struct page *page = **pages;
100
101                 WARN_ON(!pte_none(*pte));
102                 if (!page)
103                         return -ENOMEM;
104
105                 set_pte(pte, mk_pte(page, prot));
106                 address += PAGE_SIZE;
107                 pte++;
108                 (*pages)++;
109         } while (address < end);
110         return 0;
111 }
112
113 static int map_area_pmd(pmd_t *pmd, unsigned long address,
114                                unsigned long size, pgprot_t prot,
115                                struct page ***pages)
116 {
117         unsigned long base, end;
118
119         base = address & PGDIR_MASK;
120         address &= ~PGDIR_MASK;
121         end = address + size;
122         if (end > PGDIR_SIZE)
123                 end = PGDIR_SIZE;
124
125         do {
126                 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
127                 if (!pte)
128                         return -ENOMEM;
129                 if (map_area_pte(pte, address, end - address, prot, pages))
130                         return -ENOMEM;
131                 address = (address + PMD_SIZE) & PMD_MASK;
132                 pmd++;
133         } while (address < end);
134
135         return 0;
136 }
137
138 void unmap_vm_area(struct vm_struct *area)
139 {
140         unsigned long address = (unsigned long) area->addr;
141         unsigned long end = (address + area->size);
142         pgd_t *dir;
143
144         dir = pgd_offset_k(address);
145         flush_cache_vunmap(address, end);
146         do {
147                 unmap_area_pmd(dir, address, end - address);
148                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
149                 dir++;
150         } while (address && (address < end));
151         flush_tlb_kernel_range((unsigned long) area->addr, end);
152 }
153
154 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
155 {
156         unsigned long address = (unsigned long) area->addr;
157         unsigned long end = address + (area->size-PAGE_SIZE);
158         pgd_t *dir;
159         int err = 0;
160
161         dir = pgd_offset_k(address);
162         spin_lock(&init_mm.page_table_lock);
163         do {
164                 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
165                 if (!pmd) {
166                         err = -ENOMEM;
167                         break;
168                 }
169                 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
170                         err = -ENOMEM;
171                         break;
172                 }
173
174                 address = (address + PGDIR_SIZE) & PGDIR_MASK;
175                 dir++;
176         } while (address && (address < end));
177
178         spin_unlock(&init_mm.page_table_lock);
179         flush_cache_vmap((unsigned long) area->addr, end);
180         return err;
181 }
182
183 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
184                                 unsigned long start, unsigned long end)
185 {
186         struct vm_struct **p, *tmp, *area;
187         unsigned long addr = start;
188
189         area = kmalloc(sizeof(*area), GFP_KERNEL);
190         if (unlikely(!area))
191                 return NULL;
192
193         /*
194          * We always allocate a guard page.
195          */
196         size += PAGE_SIZE;
197         if (unlikely(!size)) {
198                 kfree (area);
199                 return NULL;
200         }
201
202         write_lock(&vmlist_lock);
203         for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
204                 if ((unsigned long)tmp->addr < addr)
205                         continue;
206                 if ((size + addr) < addr)
207                         goto out;
208                 if (size + addr <= (unsigned long)tmp->addr)
209                         goto found;
210                 addr = tmp->size + (unsigned long)tmp->addr;
211                 if (addr > end - size)
212                         goto out;
213         }
214
215 found:
216         area->next = *p;
217         *p = area;
218
219         area->flags = flags;
220         area->addr = (void *)addr;
221         area->size = size;
222         area->pages = NULL;
223         area->nr_pages = 0;
224         area->phys_addr = 0;
225         write_unlock(&vmlist_lock);
226
227         return area;
228
229 out:
230         write_unlock(&vmlist_lock);
231         kfree(area);
232         return NULL;
233 }
234
235 /**
236  *      get_vm_area  -  reserve a contingous kernel virtual area
237  *
238  *      @size:          size of the area
239  *      @flags:         %VM_IOREMAP for I/O mappings or VM_ALLOC
240  *
241  *      Search an area of @size in the kernel virtual mapping area,
242  *      and reserved it for out purposes.  Returns the area descriptor
243  *      on success or %NULL on failure.
244  */
245 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 {
247         return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
248 }
249
250 /**
251  *      remove_vm_area  -  find and remove a contingous kernel virtual area
252  *
253  *      @addr:          base address
254  *
255  *      Search for the kernel VM area starting at @addr, and remove it.
256  *      This function returns the found VM area, but using it is NOT safe
257  *      on SMP machines.
258  */
259 struct vm_struct *remove_vm_area(void *addr)
260 {
261         struct vm_struct **p, *tmp;
262
263         write_lock(&vmlist_lock);
264         for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
265                  if (tmp->addr == addr)
266                          goto found;
267         }
268         write_unlock(&vmlist_lock);
269         return NULL;
270
271 found:
272         unmap_vm_area(tmp);
273         *p = tmp->next;
274         write_unlock(&vmlist_lock);
275         return tmp;
276 }
277
278 void __vunmap(void *addr, int deallocate_pages)
279 {
280         struct vm_struct *area;
281
282         if (!addr)
283                 return;
284
285         if ((PAGE_SIZE-1) & (unsigned long)addr) {
286                 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
287                 WARN_ON(1);
288                 return;
289         }
290
291         area = remove_vm_area(addr);
292         if (unlikely(!area)) {
293                 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
294                                 addr);
295                 WARN_ON(1);
296                 return;
297         }
298         
299         if (deallocate_pages) {
300                 int i;
301
302                 for (i = 0; i < area->nr_pages; i++) {
303                         if (unlikely(!area->pages[i]))
304                                 BUG();
305                         __free_page(area->pages[i]);
306                 }
307
308                 kfree(area->pages);
309         }
310
311         kfree(area);
312         return;
313 }
314
315 /**
316  *      vfree  -  release memory allocated by vmalloc()
317  *
318  *      @addr:          memory base address
319  *
320  *      Free the virtually contiguous memory area starting at @addr, as
321  *      obtained from vmalloc(), vmalloc_32() or __vmalloc().
322  *
323  *      May not be called in interrupt context.
324  */
325 void vfree(void *addr)
326 {
327         BUG_ON(in_interrupt());
328         __vunmap(addr, 1);
329 }
330
331 EXPORT_SYMBOL(vfree);
332
333 /**
334  *      vunmap  -  release virtual mapping obtained by vmap()
335  *
336  *      @addr:          memory base address
337  *
338  *      Free the virtually contiguous memory area starting at @addr,
339  *      which was created from the page array passed to vmap().
340  *
341  *      May not be called in interrupt context.
342  */
343 void vunmap(void *addr)
344 {
345         BUG_ON(in_interrupt());
346         __vunmap(addr, 0);
347 }
348
349 EXPORT_SYMBOL(vunmap);
350
351 /**
352  *      vmap  -  map an array of pages into virtually contiguous space
353  *
354  *      @pages:         array of page pointers
355  *      @count:         number of pages to map
356  *      @flags:         vm_area->flags
357  *      @prot:          page protection for the mapping
358  *
359  *      Maps @count pages from @pages into contiguous kernel virtual
360  *      space.
361  */
362 void *vmap(struct page **pages, unsigned int count,
363                 unsigned long flags, pgprot_t prot)
364 {
365         struct vm_struct *area;
366
367         if (count > num_physpages)
368                 return NULL;
369
370         area = get_vm_area((count << PAGE_SHIFT), flags);
371         if (!area)
372                 return NULL;
373         if (map_vm_area(area, prot, &pages)) {
374                 vunmap(area->addr);
375                 return NULL;
376         }
377
378         return area->addr;
379 }
380
381 EXPORT_SYMBOL(vmap);
382
383 /**
384  *      __vmalloc  -  allocate virtually contiguous memory
385  *
386  *      @size:          allocation size
387  *      @gfp_mask:      flags for the page level allocator
388  *      @prot:          protection mask for the allocated pages
389  *
390  *      Allocate enough pages to cover @size from the page level
391  *      allocator with @gfp_mask flags.  Map them into contiguous
392  *      kernel virtual space, using a pagetable protection of @prot.
393  */
394 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
395 {
396         struct vm_struct *area;
397         struct page **pages;
398         unsigned int nr_pages, array_size, i;
399
400         size = PAGE_ALIGN(size);
401         if (!size || (size >> PAGE_SHIFT) > num_physpages)
402                 return NULL;
403
404         area = get_vm_area(size, VM_ALLOC);
405         if (!area)
406                 return NULL;
407
408         nr_pages = size >> PAGE_SHIFT;
409         array_size = (nr_pages * sizeof(struct page *));
410
411         area->nr_pages = nr_pages;
412         area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
413         if (!area->pages) {
414                 remove_vm_area(area->addr);
415                 kfree(area);
416                 return NULL;
417         }
418         memset(area->pages, 0, array_size);
419
420         for (i = 0; i < area->nr_pages; i++) {
421                 area->pages[i] = alloc_page(gfp_mask);
422                 if (unlikely(!area->pages[i])) {
423                         /* Successfully allocated i pages, free them in __vunmap() */
424                         area->nr_pages = i;
425                         goto fail;
426                 }
427         }
428         
429         if (map_vm_area(area, prot, &pages))
430                 goto fail;
431         return area->addr;
432
433 fail:
434         vfree(area->addr);
435         return NULL;
436 }
437
438 EXPORT_SYMBOL(__vmalloc);
439
440 /**
441  *      vmalloc  -  allocate virtually contiguous memory
442  *
443  *      @size:          allocation size
444  *
445  *      Allocate enough pages to cover @size from the page level
446  *      allocator and map them into contiguous kernel virtual space.
447  *
448  *      For tight cotrol over page level allocator and protection flags
449  *      use __vmalloc() instead.
450  */
451 void *vmalloc(unsigned long size)
452 {
453        return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
454 }
455
456 EXPORT_SYMBOL(vmalloc);
457
458 /**
459  *      vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
460  *
461  *      @size:          allocation size
462  *
463  *      Allocate enough 32bit PA addressable pages to cover @size from the
464  *      page level allocator and map them into contiguous kernel virtual space.
465  */
466 void *vmalloc_32(unsigned long size)
467 {
468         return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
469 }
470
471 EXPORT_SYMBOL(vmalloc_32);
472
473 long vread(char *buf, char *addr, unsigned long count)
474 {
475         struct vm_struct *tmp;
476         char *vaddr, *buf_start = buf;
477         unsigned long n;
478
479         /* Don't allow overflow */
480         if ((unsigned long) addr + count < count)
481                 count = -(unsigned long) addr;
482
483         read_lock(&vmlist_lock);
484         for (tmp = vmlist; tmp; tmp = tmp->next) {
485                 vaddr = (char *) tmp->addr;
486                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
487                         continue;
488                 while (addr < vaddr) {
489                         if (count == 0)
490                                 goto finished;
491                         *buf = '\0';
492                         buf++;
493                         addr++;
494                         count--;
495                 }
496                 n = vaddr + tmp->size - PAGE_SIZE - addr;
497                 do {
498                         if (count == 0)
499                                 goto finished;
500                         *buf = *addr;
501                         buf++;
502                         addr++;
503                         count--;
504                 } while (--n > 0);
505         }
506 finished:
507         read_unlock(&vmlist_lock);
508         return buf - buf_start;
509 }
510
511 long vwrite(char *buf, char *addr, unsigned long count)
512 {
513         struct vm_struct *tmp;
514         char *vaddr, *buf_start = buf;
515         unsigned long n;
516
517         /* Don't allow overflow */
518         if ((unsigned long) addr + count < count)
519                 count = -(unsigned long) addr;
520
521         read_lock(&vmlist_lock);
522         for (tmp = vmlist; tmp; tmp = tmp->next) {
523                 vaddr = (char *) tmp->addr;
524                 if (addr >= vaddr + tmp->size - PAGE_SIZE)
525                         continue;
526                 while (addr < vaddr) {
527                         if (count == 0)
528                                 goto finished;
529                         buf++;
530                         addr++;
531                         count--;
532                 }
533                 n = vaddr + tmp->size - PAGE_SIZE - addr;
534                 do {
535                         if (count == 0)
536                                 goto finished;
537                         *addr = *buf;
538                         buf++;
539                         addr++;
540                         count--;
541                 } while (--n > 0);
542         }
543 finished:
544         read_unlock(&vmlist_lock);
545         return buf - buf_start;
546 }