4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 DEFINE_RWLOCK(vmlist_lock);
24 struct vm_struct *vmlist;
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
40 pte = pte_offset_kernel(pmd, address);
48 page = ptep_get_and_clear(pte);
53 if (pte_present(page))
55 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56 } while (address < end);
59 static void unmap_area_pmd(pud_t *pud, unsigned long address,
73 pmd = pmd_offset(pud, address);
80 unmap_area_pte(pmd, address, end - address);
81 address = (address + PMD_SIZE) & PMD_MASK;
83 } while (address < end);
86 static void unmap_area_pud(pgd_t *pgd, unsigned long address,
100 pud = pud_offset(pgd, address);
101 address &= ~PGDIR_MASK;
102 end = address + size;
103 if (end > PGDIR_SIZE)
107 unmap_area_pmd(pud, address, end - address);
108 address = (address + PUD_SIZE) & PUD_MASK;
110 } while (address && (address < end));
113 static int map_area_pte(pte_t *pte, unsigned long address,
114 unsigned long size, pgprot_t prot,
115 struct page ***pages)
119 address &= ~PMD_MASK;
120 end = address + size;
125 struct page *page = **pages;
126 WARN_ON(!pte_none(*pte));
130 set_pte(pte, mk_pte(page, prot));
131 address += PAGE_SIZE;
134 } while (address < end);
138 static int map_area_pmd(pmd_t *pmd, unsigned long address,
139 unsigned long size, pgprot_t prot,
140 struct page ***pages)
142 unsigned long base, end;
144 base = address & PUD_MASK;
145 address &= ~PUD_MASK;
146 end = address + size;
151 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
154 if (map_area_pte(pte, address, end - address, prot, pages))
156 address = (address + PMD_SIZE) & PMD_MASK;
158 } while (address < end);
163 static int map_area_pud(pud_t *pud, unsigned long address,
164 unsigned long end, pgprot_t prot,
165 struct page ***pages)
168 pmd_t *pmd = pmd_alloc(&init_mm, pud, address);
171 if (map_area_pmd(pmd, address, end - address, prot, pages))
173 address = (address + PUD_SIZE) & PUD_MASK;
175 } while (address && address < end);
180 void unmap_vm_area(struct vm_struct *area)
182 unsigned long address = (unsigned long) area->addr;
183 unsigned long end = (address + area->size);
188 pgd = pgd_offset_k(address);
189 flush_cache_vunmap(address, end);
190 for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
191 next = (address + PGDIR_SIZE) & PGDIR_MASK;
192 if (next <= address || next > end)
194 unmap_area_pud(pgd, address, next - address);
198 flush_tlb_kernel_range((unsigned long) area->addr, end);
201 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
203 unsigned long address = (unsigned long) area->addr;
204 unsigned long end = address + (area->size-PAGE_SIZE);
210 pgd = pgd_offset_k(address);
211 spin_lock(&init_mm.page_table_lock);
212 for (i = pgd_index(address); i <= pgd_index(end-1); i++) {
213 pud_t *pud = pud_alloc(&init_mm, pgd, address);
218 next = (address + PGDIR_SIZE) & PGDIR_MASK;
219 if (next < address || next > end)
221 if (map_area_pud(pud, address, next, prot, pages)) {
230 spin_unlock(&init_mm.page_table_lock);
231 flush_cache_vmap((unsigned long) area->addr, end);
235 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
237 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
238 unsigned long start, unsigned long end)
240 struct vm_struct **p, *tmp, *area;
241 unsigned long align = 1;
244 if (flags & VM_IOREMAP) {
247 if (bit > IOREMAP_MAX_ORDER)
248 bit = IOREMAP_MAX_ORDER;
249 else if (bit < PAGE_SHIFT)
254 addr = ALIGN(start, align);
256 area = kmalloc(sizeof(*area), GFP_KERNEL);
261 * We always allocate a guard page.
264 if (unlikely(!size)) {
269 write_lock(&vmlist_lock);
270 for (p = &vmlist; (tmp = *p) != NULL ;p = &tmp->next) {
271 if ((unsigned long)tmp->addr < addr) {
272 if((unsigned long)tmp->addr + tmp->size >= addr)
273 addr = ALIGN(tmp->size +
274 (unsigned long)tmp->addr, align);
277 if ((size + addr) < addr)
279 if (size + addr <= (unsigned long)tmp->addr)
281 addr = ALIGN(tmp->size + (unsigned long)tmp->addr, align);
282 if (addr > end - size)
291 area->addr = (void *)addr;
296 write_unlock(&vmlist_lock);
301 write_unlock(&vmlist_lock);
303 if (printk_ratelimit())
304 printk(KERN_WARNING "allocation failed: out of vmalloc space - use vmalloc=<size> to increase size.\n");
309 * get_vm_area - reserve a contingous kernel virtual area
311 * @size: size of the area
312 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
314 * Search an area of @size in the kernel virtual mapping area,
315 * and reserved it for out purposes. Returns the area descriptor
316 * on success or %NULL on failure.
318 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
320 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
324 * remove_vm_area - find and remove a contingous kernel virtual area
326 * @addr: base address
328 * Search for the kernel VM area starting at @addr, and remove it.
329 * This function returns the found VM area, but using it is NOT safe
332 struct vm_struct *remove_vm_area(void *addr)
334 struct vm_struct **p, *tmp;
336 write_lock(&vmlist_lock);
337 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
338 if (tmp->addr == addr)
341 write_unlock(&vmlist_lock);
347 write_unlock(&vmlist_lock);
351 void __vunmap(void *addr, int deallocate_pages)
353 struct vm_struct *area;
358 if ((PAGE_SIZE-1) & (unsigned long)addr) {
359 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
364 area = remove_vm_area(addr);
365 if (unlikely(!area)) {
366 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
372 if (deallocate_pages) {
375 for (i = 0; i < area->nr_pages; i++) {
376 if (unlikely(!area->pages[i]))
378 __free_page(area->pages[i]);
381 if (area->nr_pages > PAGE_SIZE/sizeof(struct page *))
392 * vfree - release memory allocated by vmalloc()
394 * @addr: memory base address
396 * Free the virtually contiguous memory area starting at @addr, as
397 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
399 * May not be called in interrupt context.
401 void vfree(void *addr)
403 BUG_ON(in_interrupt());
407 EXPORT_SYMBOL(vfree);
410 * vunmap - release virtual mapping obtained by vmap()
412 * @addr: memory base address
414 * Free the virtually contiguous memory area starting at @addr,
415 * which was created from the page array passed to vmap().
417 * May not be called in interrupt context.
419 void vunmap(void *addr)
421 BUG_ON(in_interrupt());
425 EXPORT_SYMBOL(vunmap);
428 * vmap - map an array of pages into virtually contiguous space
430 * @pages: array of page pointers
431 * @count: number of pages to map
432 * @flags: vm_area->flags
433 * @prot: page protection for the mapping
435 * Maps @count pages from @pages into contiguous kernel virtual
438 void *vmap(struct page **pages, unsigned int count,
439 unsigned long flags, pgprot_t prot)
441 struct vm_struct *area;
443 if (count > num_physpages)
446 area = get_vm_area((count << PAGE_SHIFT), flags);
449 if (map_vm_area(area, prot, &pages)) {
460 * __vmalloc - allocate virtually contiguous memory
462 * @size: allocation size
463 * @gfp_mask: flags for the page level allocator
464 * @prot: protection mask for the allocated pages
466 * Allocate enough pages to cover @size from the page level
467 * allocator with @gfp_mask flags. Map them into contiguous
468 * kernel virtual space, using a pagetable protection of @prot.
470 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
472 struct vm_struct *area;
474 unsigned int nr_pages, array_size, i;
476 size = PAGE_ALIGN(size);
477 if (!size || (size >> PAGE_SHIFT) > num_physpages)
480 area = get_vm_area(size, VM_ALLOC);
484 nr_pages = size >> PAGE_SHIFT;
485 array_size = (nr_pages * sizeof(struct page *));
487 area->nr_pages = nr_pages;
488 /* Please note that the recursion is strictly bounded. */
489 if (array_size > PAGE_SIZE)
490 pages = __vmalloc(array_size, gfp_mask, PAGE_KERNEL);
492 pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
495 remove_vm_area(area->addr);
499 memset(area->pages, 0, array_size);
501 for (i = 0; i < area->nr_pages; i++) {
502 area->pages[i] = alloc_page(gfp_mask);
503 if (unlikely(!area->pages[i])) {
504 /* Successfully allocated i pages, free them in __vunmap() */
510 if (map_vm_area(area, prot, &pages))
519 EXPORT_SYMBOL(__vmalloc);
522 * vmalloc - allocate virtually contiguous memory
524 * @size: allocation size
526 * Allocate enough pages to cover @size from the page level
527 * allocator and map them into contiguous kernel virtual space.
529 * For tight cotrol over page level allocator and protection flags
530 * use __vmalloc() instead.
532 void *vmalloc(unsigned long size)
534 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
537 EXPORT_SYMBOL(vmalloc);
540 * vmalloc_exec - allocate virtually contiguous, executable memory
542 * @size: allocation size
544 * Kernel-internal function to allocate enough pages to cover @size
545 * the page level allocator and map them into contiguous and
546 * executable kernel virtual space.
548 * For tight cotrol over page level allocator and protection flags
549 * use __vmalloc() instead.
552 #ifndef PAGE_KERNEL_EXEC
553 # define PAGE_KERNEL_EXEC PAGE_KERNEL
556 void *vmalloc_exec(unsigned long size)
558 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
562 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
564 * @size: allocation size
566 * Allocate enough 32bit PA addressable pages to cover @size from the
567 * page level allocator and map them into contiguous kernel virtual space.
569 void *vmalloc_32(unsigned long size)
571 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
574 EXPORT_SYMBOL(vmalloc_32);
576 long vread(char *buf, char *addr, unsigned long count)
578 struct vm_struct *tmp;
579 char *vaddr, *buf_start = buf;
582 /* Don't allow overflow */
583 if ((unsigned long) addr + count < count)
584 count = -(unsigned long) addr;
586 read_lock(&vmlist_lock);
587 for (tmp = vmlist; tmp; tmp = tmp->next) {
588 vaddr = (char *) tmp->addr;
589 if (addr >= vaddr + tmp->size - PAGE_SIZE)
591 while (addr < vaddr) {
599 n = vaddr + tmp->size - PAGE_SIZE - addr;
610 read_unlock(&vmlist_lock);
611 return buf - buf_start;
614 long vwrite(char *buf, char *addr, unsigned long count)
616 struct vm_struct *tmp;
617 char *vaddr, *buf_start = buf;
620 /* Don't allow overflow */
621 if ((unsigned long) addr + count < count)
622 count = -(unsigned long) addr;
624 read_lock(&vmlist_lock);
625 for (tmp = vmlist; tmp; tmp = tmp->next) {
626 vaddr = (char *) tmp->addr;
627 if (addr >= vaddr + tmp->size - PAGE_SIZE)
629 while (addr < vaddr) {
636 n = vaddr + tmp->size - PAGE_SIZE - addr;
647 read_unlock(&vmlist_lock);
648 return buf - buf_start;