4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
11 #include <linux/module.h>
12 #include <linux/highmem.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
17 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/tlbflush.h>
23 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
24 struct vm_struct *vmlist;
26 static void unmap_area_pte(pmd_t *pmd, unsigned long address,
40 pte = pte_offset_kernel(pmd, address);
48 page = ptep_get_and_clear(pte);
53 if (pte_present(page))
55 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
56 } while (address < end);
59 static void unmap_area_pmd(pgd_t *dir, unsigned long address,
73 pmd = pmd_offset(dir, address);
74 address &= ~PGDIR_MASK;
80 unmap_area_pte(pmd, address, end - address);
81 address = (address + PMD_SIZE) & PMD_MASK;
83 } while (address < end);
86 static int map_area_pte(pte_t *pte, unsigned long address,
87 unsigned long size, pgprot_t prot,
98 struct page *page = **pages;
100 WARN_ON(!pte_none(*pte));
104 set_pte(pte, mk_pte(page, prot));
105 address += PAGE_SIZE;
108 } while (address < end);
112 static int map_area_pmd(pmd_t *pmd, unsigned long address,
113 unsigned long size, pgprot_t prot,
114 struct page ***pages)
116 unsigned long base, end;
118 base = address & PGDIR_MASK;
119 address &= ~PGDIR_MASK;
120 end = address + size;
121 if (end > PGDIR_SIZE)
125 pte_t * pte = pte_alloc_kernel(&init_mm, pmd, base + address);
128 if (map_area_pte(pte, address, end - address, prot, pages))
130 address = (address + PMD_SIZE) & PMD_MASK;
132 } while (address < end);
137 void unmap_vm_area(struct vm_struct *area)
139 unsigned long address = (unsigned long) area->addr;
140 unsigned long end = (address + area->size);
143 dir = pgd_offset_k(address);
144 flush_cache_vunmap(address, end);
146 unmap_area_pmd(dir, address, end - address);
147 address = (address + PGDIR_SIZE) & PGDIR_MASK;
149 } while (address && (address < end));
150 flush_tlb_kernel_range((unsigned long) area->addr, end);
153 int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
155 unsigned long address = (unsigned long) area->addr;
156 unsigned long end = address + (area->size-PAGE_SIZE);
160 dir = pgd_offset_k(address);
161 spin_lock(&init_mm.page_table_lock);
163 pmd_t *pmd = pmd_alloc(&init_mm, dir, address);
168 if (map_area_pmd(pmd, address, end - address, prot, pages)) {
173 address = (address + PGDIR_SIZE) & PGDIR_MASK;
175 } while (address && (address < end));
177 spin_unlock(&init_mm.page_table_lock);
178 flush_cache_vmap((unsigned long) area->addr, end);
182 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
183 unsigned long start, unsigned long end)
185 struct vm_struct **p, *tmp, *area;
186 unsigned long addr = start;
188 area = kmalloc(sizeof(*area), GFP_KERNEL);
193 * We always allocate a guard page.
196 if (unlikely(!size)) {
201 write_lock(&vmlist_lock);
202 for (p = &vmlist; (tmp = *p) ;p = &tmp->next) {
203 if ((unsigned long)tmp->addr < addr)
205 if ((size + addr) < addr)
207 if (size + addr <= (unsigned long)tmp->addr)
209 addr = tmp->size + (unsigned long)tmp->addr;
210 if (addr > end - size)
219 area->addr = (void *)addr;
224 write_unlock(&vmlist_lock);
229 write_unlock(&vmlist_lock);
235 * get_vm_area - reserve a contingous kernel virtual area
237 * @size: size of the area
238 * @flags: %VM_IOREMAP for I/O mappings or VM_ALLOC
240 * Search an area of @size in the kernel virtual mapping area,
241 * and reserved it for out purposes. Returns the area descriptor
242 * on success or %NULL on failure.
244 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
246 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
250 * remove_vm_area - find and remove a contingous kernel virtual area
252 * @addr: base address
254 * Search for the kernel VM area starting at @addr, and remove it.
255 * This function returns the found VM area, but using it is NOT safe
258 struct vm_struct *remove_vm_area(void *addr)
260 struct vm_struct **p, *tmp;
262 write_lock(&vmlist_lock);
263 for (p = &vmlist ; (tmp = *p) ;p = &tmp->next) {
264 if (tmp->addr == addr)
267 write_unlock(&vmlist_lock);
273 write_unlock(&vmlist_lock);
277 void __vunmap(void *addr, int deallocate_pages)
279 struct vm_struct *area;
284 if ((PAGE_SIZE-1) & (unsigned long)addr) {
285 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
290 area = remove_vm_area(addr);
291 if (unlikely(!area)) {
292 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
298 if (deallocate_pages) {
301 for (i = 0; i < area->nr_pages; i++) {
302 if (unlikely(!area->pages[i]))
304 __free_page(area->pages[i]);
315 * vfree - release memory allocated by vmalloc()
317 * @addr: memory base address
319 * Free the virtually contiguous memory area starting at @addr, as
320 * obtained from vmalloc(), vmalloc_32() or __vmalloc().
322 * May not be called in interrupt context.
324 void vfree(void *addr)
326 BUG_ON(in_interrupt());
330 EXPORT_SYMBOL(vfree);
333 * vunmap - release virtual mapping obtained by vmap()
335 * @addr: memory base address
337 * Free the virtually contiguous memory area starting at @addr,
338 * which was created from the page array passed to vmap().
340 * May not be called in interrupt context.
342 void vunmap(void *addr)
344 BUG_ON(in_interrupt());
348 EXPORT_SYMBOL(vunmap);
351 * vmap - map an array of pages into virtually contiguous space
353 * @pages: array of page pointers
354 * @count: number of pages to map
355 * @flags: vm_area->flags
356 * @prot: page protection for the mapping
358 * Maps @count pages from @pages into contiguous kernel virtual
361 void *vmap(struct page **pages, unsigned int count,
362 unsigned long flags, pgprot_t prot)
364 struct vm_struct *area;
366 if (count > num_physpages)
369 area = get_vm_area((count << PAGE_SHIFT), flags);
372 if (map_vm_area(area, prot, &pages)) {
383 * __vmalloc - allocate virtually contiguous memory
385 * @size: allocation size
386 * @gfp_mask: flags for the page level allocator
387 * @prot: protection mask for the allocated pages
389 * Allocate enough pages to cover @size from the page level
390 * allocator with @gfp_mask flags. Map them into contiguous
391 * kernel virtual space, using a pagetable protection of @prot.
393 void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
395 struct vm_struct *area;
397 unsigned int nr_pages, array_size, i;
399 size = PAGE_ALIGN(size);
400 if (!size || (size >> PAGE_SHIFT) > num_physpages)
403 area = get_vm_area(size, VM_ALLOC);
407 nr_pages = size >> PAGE_SHIFT;
408 array_size = (nr_pages * sizeof(struct page *));
410 area->nr_pages = nr_pages;
411 area->pages = pages = kmalloc(array_size, (gfp_mask & ~__GFP_HIGHMEM));
413 remove_vm_area(area->addr);
417 memset(area->pages, 0, array_size);
419 for (i = 0; i < area->nr_pages; i++) {
420 area->pages[i] = alloc_page(gfp_mask);
421 if (unlikely(!area->pages[i])) {
422 /* Successfully allocated i pages, free them in __vunmap() */
428 if (map_vm_area(area, prot, &pages))
437 EXPORT_SYMBOL(__vmalloc);
440 * vmalloc - allocate virtually contiguous memory
442 * @size: allocation size
444 * Allocate enough pages to cover @size from the page level
445 * allocator and map them into contiguous kernel virtual space.
447 * For tight cotrol over page level allocator and protection flags
448 * use __vmalloc() instead.
450 void *vmalloc(unsigned long size)
452 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
455 EXPORT_SYMBOL(vmalloc);
458 * vmalloc_exec - allocate virtually contiguous, executable memory
460 * @size: allocation size
462 * Kernel-internal function to allocate enough pages to cover @size
463 * the page level allocator and map them into contiguous and
464 * executable kernel virtual space.
466 * For tight cotrol over page level allocator and protection flags
467 * use __vmalloc() instead.
470 #ifndef PAGE_KERNEL_EXEC
471 # define PAGE_KERNEL_EXEC PAGE_KERNEL
474 void *vmalloc_exec(unsigned long size)
476 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
480 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
482 * @size: allocation size
484 * Allocate enough 32bit PA addressable pages to cover @size from the
485 * page level allocator and map them into contiguous kernel virtual space.
487 void *vmalloc_32(unsigned long size)
489 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
492 EXPORT_SYMBOL(vmalloc_32);
494 long vread(char *buf, char *addr, unsigned long count)
496 struct vm_struct *tmp;
497 char *vaddr, *buf_start = buf;
500 /* Don't allow overflow */
501 if ((unsigned long) addr + count < count)
502 count = -(unsigned long) addr;
504 read_lock(&vmlist_lock);
505 for (tmp = vmlist; tmp; tmp = tmp->next) {
506 vaddr = (char *) tmp->addr;
507 if (addr >= vaddr + tmp->size - PAGE_SIZE)
509 while (addr < vaddr) {
517 n = vaddr + tmp->size - PAGE_SIZE - addr;
528 read_unlock(&vmlist_lock);
529 return buf - buf_start;
532 long vwrite(char *buf, char *addr, unsigned long count)
534 struct vm_struct *tmp;
535 char *vaddr, *buf_start = buf;
538 /* Don't allow overflow */
539 if ((unsigned long) addr + count < count)
540 count = -(unsigned long) addr;
542 read_lock(&vmlist_lock);
543 for (tmp = vmlist; tmp; tmp = tmp->next) {
544 vaddr = (char *) tmp->addr;
545 if (addr >= vaddr + tmp->size - PAGE_SIZE)
547 while (addr < vaddr) {
554 n = vaddr + tmp->size - PAGE_SIZE - addr;
565 read_unlock(&vmlist_lock);
566 return buf - buf_start;