4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/pgalloc.h>
21 #include <asm/tlbflush.h>
24 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
25 struct vm_struct *vmlist;
27 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
41 pte = pte_offset_kernel(pmd, address);
49 page = ptep_get_and_clear(pte);
54 if (pte_present(page))
56 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
57 } while (address < end);
60 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
74 pmd = pmd_offset(dir, address);
75 address &= ~PGDIR_MASK;
81 unmap_area_pte(pmd, address, end - address);
82 address = (address + PMD_SIZE) & PMD_MASK;
84 } while (address < end);
87 static int map_area_pte(pte_t *pte, unsigned long address,
88 unsigned long size, pgprot_t prot,
99 struct page *page = **pages;
101 WARN_ON(!pte_none(*pte));
105 set_pte(pte, mk_pte(page, prot));
106 address += PAGE_SIZE;
109 } while (address < end);
113 static int map_area_pmd(pmd_t *pmd, unsigned long address,
114 unsigned long size, pgprot_t prot,
115 struct page ***pages)
117 unsigned long base, end;
119 base = address & PGDIR_MASK;
120 address &= ~PGDIR_MASK;
121 end = address + size;
122 if (end > PGDIR_SIZE)
126 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
129 if (map_area_pte(pte, address, end - address, prot, pages))
131 address = (address + PMD_SIZE) & PMD_MASK;
133 } while (address < end);
138 void unmap_vm_area(struct vm_struct *area)
140 unsigned long address = (unsigned long) area->addr;
141 unsigned long end = (address + area->size);
144 dir = pgd_offset_k(address);
145 flush_cache_vunmap(address, end);
147 unmap_area_pmd(dir, address, end - address);
148 address = (address + PGDIR_SIZE) & PGDIR_MASK;
150 } while (address && (address < end));
151 flush_tlb_kernel_range((unsigned long) area->addr, end);
154 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
156 unsigned long address = (unsigned long) area->addr;
157 unsigned long end = address + (area->size-PAGE_SIZE);
161 dir = pgd_offset_k(address);
162 spin_lock(&init_mm.page_table_lock);
164 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
169 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
174 address = (address + PGDIR_SIZE) & PGDIR_MASK;
176 } while (address && (address < end));
178 spin_unlock(&init_mm.page_table_lock);
179 flush_cache_vmap((unsigned long) area->addr, end);
183 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
184 unsigned long start, unsigned long end)
186 struct vm_struct **p, *tmp, *area;
187 unsigned long addr = start;
189 area = kmalloc(sizeof(*area), GFP_KERNEL);
194 * We always allocate a guard page.
197 if (unlikely(!size)) {
202 write_lock(&vmlist_lock);
203 for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
204 if ((unsigned long)tmp->addr < addr)
206 if ((size + addr) < addr)
208 if (size + addr <= (unsigned long)tmp->addr)
210 addr = tmp->size + (unsigned long)tmp->addr;
211 if (addr > end - size)
220 area->addr = (void *)addr;
225 write_unlock(&vmlist_lock);
230 write_unlock(&vmlist_lock);
236 * get_vm_area - reserve a contingous kernel virtual area
238 * @size: size of the area
239 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
241 * Search an area of @size in the kernel virtual mapping area,
242 * and reserved it for out purposes. Returns the area descriptor
243 * on success or %NULL on failure.
245 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
247 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
251 * remove_vm_area - find and remove a contingous kernel virtual area
253 * @addr: base address
255 * Search for the kernel VM area starting at @addr, and remove it.
256 * This function returns the found VM area, but using it is NOT safe
259 struct vm_struct *remove_vm_area(void *addr)
261 struct vm_struct **p, *tmp;
263 write_lock(&vmlist_lock);
264 for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
265 if (tmp->addr == addr)
268 write_unlock(&vmlist_lock);
274 write_unlock(&vmlist_lock);
278 void __vunmap(void *addr, int deallocate_pages)
280 struct vm_struct *area;
285 if ((PAGE_SIZE-1) & (unsigned long)addr) {
286 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
290 area = remove_vm_area(addr);
291 if (unlikely(!area)) {
292 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
297 if (deallocate_pages) {
300 for (i = 0; i < area->nr_pages; i++) {
301 if (unlikely(!area->pages[i]))
303 __free_page(area->pages[i]);
314 * vfree - release memory allocated by vmalloc()
316 * @addr: memory base address
318 * Free the virtually contiguous memory area starting at @addr, as
319 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
321 * May not be called in interrupt context.
323 void vfree(void *addr)
325 BUG_ON(in_interrupt());
329 EXPORT_SYMBOL(vfree);
332 * vunmap - release virtual mapping obtained by vmap()
334 * @addr: memory base address
336 * Free the virtually contiguous memory area starting at @addr,
337 * which was created from the page array passed to vmap().
339 * May not be called in interrupt context.
341 void vunmap(void *addr)
343 BUG_ON(in_interrupt());
347 EXPORT_SYMBOL(vunmap);
350 * vmap - map an array of pages into virtually contiguous space
352 * @pages: array of page pointers
353 * @count: number of pages to map
354 * @flags: vm_area->flags
355 * @prot: page protection for the mapping
357 * Maps @count pages from @pages into contiguous kernel virtual
360 void *vmap(struct page **pages, unsigned int count,
361 unsigned long flags, pgprot_t prot)
363 struct vm_struct *area;
365 if (count > num_physpages)
368 area = get_vm_area((count << PAGE_SHIFT), flags);
371 if (map_vm_area(area, prot, &pages)) {
382 * __vmalloc - allocate virtually contiguous memory
384 * @size: allocation size
385 * @gfp_mask: flags for the page level allocator
386 * @prot: protection mask for the allocated pages
388 * Allocate enough pages to cover @size from the page level
389 * allocator with @gfp_mask flags. Map them into contiguous
390 * kernel virtual space, using a pagetable protection of @prot.
392 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
394 struct vm_struct *area;
396 unsigned int nr_pages, array_size, i;
398 size = PAGE_ALIGN(size);
399 if (!size || (size >> PAGE_SHIFT) > num_physpages)
402 area = get_vm_area(size, VM_ALLOC);
406 nr_pages = size >> PAGE_SHIFT;
407 array_size = (nr_pages * sizeof(struct page *));
409 area->nr_pages = nr_pages;
410 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
412 remove_vm_area(area->addr);
416 memset(area->pages, 0, array_size);
418 for (i = 0; i < area->nr_pages; i++) {
419 area->pages[i] = alloc_page(gfp_mask);
420 if (unlikely(!area->pages[i])) {
421 /* Successfully allocated i pages, free them in __vunmap() */
427 if (map_vm_area(area, prot, &pages))
436 EXPORT_SYMBOL(__vmalloc);
439 * vmalloc - allocate virtually contiguous memory
441 * @size: allocation size
443 * Allocate enough pages to cover @size from the page level
444 * allocator and map them into contiguous kernel virtual space.
446 * For tight cotrol over page level allocator and protection flags
447 * use __vmalloc() instead.
449 void *vmalloc(unsigned long size)
451 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
454 EXPORT_SYMBOL(vmalloc);
457 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
459 * @size: allocation size
461 * Allocate enough 32bit PA addressable pages to cover @size from the
462 * page level allocator and map them into contiguous kernel virtual space.
464 void *vmalloc_32(unsigned long size)
466 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
469 EXPORT_SYMBOL(vmalloc_32);
471 long vread(char *buf, char *addr, unsigned long count)
473 struct vm_struct *tmp;
474 char *vaddr, *buf_start = buf;
477 /* Don't allow overflow */
478 if ((unsigned long) addr + count < count)
479 count = -(unsigned long) addr;
481 read_lock(&vmlist_lock);
482 for (tmp = vmlist; tmp; tmp = tmp->next) {
483 vaddr = (char *) tmp->addr;
484 if (addr >= vaddr + tmp->size - PAGE_SIZE)
486 while (addr < vaddr) {
494 n = vaddr + tmp->size - PAGE_SIZE - addr;
505 read_unlock(&vmlist_lock);
506 return buf - buf_start;
509 long vwrite(char *buf, char *addr, unsigned long count)
511 struct vm_struct *tmp;
512 char *vaddr, *buf_start = buf;
515 /* Don't allow overflow */
516 if ((unsigned long) addr + count < count)
517 count = -(unsigned long) addr;
519 read_lock(&vmlist_lock);
520 for (tmp = vmlist; tmp; tmp = tmp->next) {
521 vaddr = (char *) tmp->addr;
522 if (addr >= vaddr + tmp->size - PAGE_SIZE)
524 while (addr < vaddr) {
531 n = vaddr + tmp->size - PAGE_SIZE - addr;
542 read_unlock(&vmlist_lock);
543 return buf - buf_start;