#include <linux/mount.h>
#include <linux/mempolicy.h>
#include <linux/rmap.h>
-#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
-pgprot_t vm_get_page_prot(unsigned long vm_flags)
-{
- return protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
-}
-EXPORT_SYMBOL(vm_get_page_prot);
-
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
int sysctl_overcommit_ratio = 50; /* default is 50% */
int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
unsigned long n;
- free = global_page_state(NR_FILE_PAGES);
+ free = get_page_cache_size();
free += nr_swap_pages;
/*
* only call if we're about to fail.
*/
n = nr_free_pages();
-
- /*
- * Leave reserved pages. The pages are not for anonymous pages.
- */
- if (n <= totalreserve_pages)
- goto error;
- else
- n -= totalreserve_pages;
-
- /*
- * Leave the last 3% for root
- */
if (!cap_sys_admin)
n -= n / 32;
free += n;
if (free > pages)
return 0;
-
- goto error;
+ vm_unacct_memory(pages);
+ return -ENOMEM;
}
allowed = (totalram_pages - hugetlb_total_pages())
*/
if (atomic_read(&vm_committed_space) < (long)allowed)
return 0;
-error:
+
vm_unacct_memory(pages);
return -ENOMEM;
if (brk < mm->end_code)
goto out;
-
- /*
- * Check against rlimit here. If this check is done later after the test
- * of oldbrk with newbrk then it can escape the test and let the data
- * segment grow beyond its set limit the in case where the limit is
- * not page aligned -Ram Gupta
- */
- rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
- if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
- goto out;
-
newbrk = PAGE_ALIGN(brk);
oldbrk = PAGE_ALIGN(mm->brk);
if (oldbrk == newbrk)
goto out;
}
+ /* Check against rlimit.. */
+ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
/* Check against existing mmap mappings. */
if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
goto out;
i = browse_rb(&mm->mm_rb);
if (i != mm->map_count)
printk("map_count %d rb %d\n", mm->map_count, i), bug = 1;
- BUG_ON(bug);
+ if (bug)
+ BUG();
}
#else
#define validate_mm(mm) do { } while (0)
__vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
struct vm_area_struct *prev, struct rb_node *rb_parent)
{
- if (vma->vm_flags & VM_EXEC)
- arch_add_exec_range(mm, vma->vm_end);
if (prev) {
vma->vm_next = prev->vm_next;
prev->vm_next = vma;
struct rb_node ** rb_link, * rb_parent;
__vma = find_vma_prepare(mm, vma->vm_start,&prev, &rb_link, &rb_parent);
- BUG_ON(__vma && __vma->vm_start < vma->vm_end);
+ if (__vma && __vma->vm_start < vma->vm_end)
+ BUG();
__vma_link(mm, vma, prev, rb_link, rb_parent);
mm->map_count++;
}
rb_erase(&vma->vm_rb, &mm->mm_rb);
if (mm->mmap_cache == vma)
mm->mmap_cache = prev;
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, vma->vm_end);
}
/*
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
} else /* cases 2, 5, 7 */
vma_adjust(prev, prev->vm_start,
end, prev->vm_pgoff, NULL);
- if (prev->vm_flags & VM_EXEC)
- arch_add_exec_range(mm, prev->vm_end);
return prev;
}
* (e.g. stash info in next's anon_vma_node when assigning
* an anon_vma, or when trying vma_merge). Another time.
*/
- BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
+ if (find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma)
+ BUG();
if (!near)
goto none;
const unsigned long stack_flags
= VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
+#ifdef CONFIG_HUGETLB
+ if (flags & VM_HUGETLB) {
+ if (!(flags & VM_DONTCOPY))
+ mm->shared_vm += pages;
+ return;
+ }
+#endif /* CONFIG_HUGETLB */
+
if (file) {
mm->shared_vm += pages;
if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
/* Obtain the address to map to. we verify (or select) it and ensure
* that it represents a valid section of the address space.
*/
- addr = get_unmapped_area_prot(file, addr, len, pgoff, flags, prot & PROT_EXEC);
+ addr = get_unmapped_area(file, addr, len, pgoff, flags);
if (addr & ~PAGE_MASK)
return addr;
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}
+ memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
- vma->vm_page_prot = protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
vma->vm_pgoff = pgoff;
if (file) {
pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
- if (vma_wants_writenotify(vma))
- vma->vm_page_prot =
- protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC)];
-
if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
file = vma->vm_file;
mm->free_area_cache = mm->mmap_base;
}
-
unsigned long
-get_unmapped_area_prot(struct file *file, unsigned long addr, unsigned long len,
- unsigned long pgoff, unsigned long flags, int exec)
+get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
+ unsigned long pgoff, unsigned long flags)
{
unsigned long ret;
if (!(flags & MAP_FIXED)) {
unsigned long (*get_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
- if (exec && current->mm->get_unmapped_exec_area)
- get_area = current->mm->get_unmapped_exec_area;
- else
- get_area = current->mm->get_unmapped_area;
-
+ get_area = current->mm->get_unmapped_area;
if (file && file->f_op && file->f_op->get_unmapped_area)
get_area = file->f_op->get_unmapped_area;
addr = get_area(file, addr, len, pgoff, flags);
return addr;
}
-EXPORT_SYMBOL(get_unmapped_area_prot);
-
-#define SHLIB_BASE 0x00110000
-
-unsigned long arch_get_unmapped_exec_area(struct file *filp, unsigned long addr0,
- unsigned long len0, unsigned long pgoff, unsigned long flags)
-{
- unsigned long addr = addr0, len = len0;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long tmp;
-
- if (len > TASK_SIZE)
- return -ENOMEM;
-
- if (!addr && !(flags & MAP_FIXED))
- addr = randomize_range(SHLIB_BASE, 0x01000000, len);
-
- if (addr) {
- addr = PAGE_ALIGN(addr);
- vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vma->vm_start)) {
- return addr;
- }
- }
-
- addr = SHLIB_BASE;
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr)
- return -ENOMEM;
-
- if (!vma || addr + len <= vma->vm_start) {
- /*
- * Must not let a PROT_EXEC mapping get into the
- * brk area:
- */
- if (addr + len > mm->brk)
- goto failed;
-
- /*
- * Up until the brk area we randomize addresses
- * as much as possible:
- */
- if (addr >= 0x01000000) {
- tmp = randomize_range(0x01000000, PAGE_ALIGN(max(mm->start_brk, (unsigned long)0x08000000)), len);
- vma = find_vma(mm, tmp);
- if (TASK_SIZE - len >= tmp &&
- (!vma || tmp + len <= vma->vm_start))
- return tmp;
- }
- /*
- * Ok, randomization didnt work out - return
- * the result of the linear search:
- */
- return addr;
- }
- addr = vma->vm_end;
- }
-
-failed:
- return current->mm->get_unmapped_area(filp, addr0, len0, pgoff, flags);
-}
-
+EXPORT_SYMBOL(get_unmapped_area);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
return prev ? prev->vm_next : vma;
}
-static int over_stack_limit(unsigned long sz)
-{
- if (sz < EXEC_STACK_BIAS)
- return 0;
- return (sz - EXEC_STACK_BIAS) >
- current->signal->rlim[RLIMIT_STACK].rlim_cur;
-}
-
/*
* Verify that the stack growth is acceptable and
* update accounting. This is shared with both the
return -ENOMEM;
/* Stack limit test */
- if (over_stack_limit(size))
+ if (size > rlim[RLIMIT_STACK].rlim_cur)
return -ENOMEM;
/* mlock limit tests */
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- if (new_below) {
- unsigned long old_end = vma->vm_end;
-
+ if (new_below)
vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
((addr - new->vm_start) >> PAGE_SHIFT), new);
- if (vma->vm_flags & VM_EXEC)
- arch_remove_exec_range(mm, old_end);
- } else
+ else
vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
return 0;
/*
* create a vma struct for an anonymous mapping
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
+ memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
- vma->vm_page_prot = protection_map[flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+ vma->vm_page_prot = protection_map[flags & 0x0f];
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
vx_vmpages_add(mm, len >> PAGE_SHIFT);
unsigned long nr_accounted = 0;
unsigned long end;
-#ifdef arch_exit_mmap
- arch_exit_mmap(mm);
-#endif
-
lru_add_drain();
flush_cache_mm(mm);
tlb = tlb_gather_mmu(mm, 1);
vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
- arch_flush_exec_range(mm);
set_mm_counter(mm, file_rss, 0);
set_mm_counter(mm, anon_rss, 0);
return 0;
return 1;
}
-
-
-static struct page *
-special_mapping_nopage(struct vm_area_struct *vma,
- unsigned long address, int *type)
-{
- struct page **pages;
-
- BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-
- address -= vma->vm_start;
- for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
- address -= PAGE_SIZE;
-
- if (*pages) {
- get_page(*pages);
- return *pages;
- }
-
- return NOPAGE_SIGBUS;
-}
-
-static struct vm_operations_struct special_mapping_vmops = {
- .nopage = special_mapping_nopage,
-};
-
-unsigned int vdso_populate = 1;
-
-/*
- * Insert a new vma covering the given region, with the given flags and
- * protections. Its pages are supplied by the given null-terminated array.
- * The region past the last page supplied will always produce SIGBUS.
- * The array pointer and the pages it points to are assumed to stay alive
- * for as long as this mapping might exist.
- */
-int install_special_mapping(struct mm_struct *mm,
- unsigned long addr, unsigned long len,
- unsigned long vm_flags, pgprot_t pgprot,
- struct page **pages)
-{
- struct vm_area_struct *vma;
- int err;
-
- vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
- if (unlikely(vma == NULL))
- return -ENOMEM;
- memset(vma, 0, sizeof(*vma));
-
- vma->vm_mm = mm;
- vma->vm_start = addr;
- vma->vm_end = addr + len;
-
- vma->vm_flags = vm_flags;
- vma->vm_page_prot = pgprot;
-
- vma->vm_ops = &special_mapping_vmops;
- vma->vm_private_data = pages;
-
- insert_vm_struct(mm, vma);
- vx_vmpages_add(mm, len >> PAGE_SHIFT);
-
- if (!vdso_populate)
- return 0;
-
- err = 0;
- while (*pages) {
- struct page *page = *pages++;
- get_page(page);
- err = install_page(mm, vma, addr, page, vma->vm_page_prot);
- if (err) {
- put_page(page);
- break;
- }
- addr += PAGE_SIZE;
- }
-
- return err;
-}