int heap_stack_gap = 0;
EXPORT_SYMBOL(mem_map);
-EXPORT_SYMBOL(sysctl_max_map_count);
-EXPORT_SYMBOL(sysctl_overcommit_memory);
-EXPORT_SYMBOL(sysctl_overcommit_ratio);
-EXPORT_SYMBOL(vm_committed_space);
EXPORT_SYMBOL(__vm_enough_memory);
/* list of shareable VMAs */
struct vm_operations_struct generic_file_vm_ops = {
};
+EXPORT_SYMBOL(vfree);
+EXPORT_SYMBOL(vmalloc_to_page);
+EXPORT_SYMBOL(vmalloc_32);
+EXPORT_SYMBOL(vmap);
+EXPORT_SYMBOL(vunmap);
+
/*
* Handle all mappings that got truncated by a "truncate()"
* system call.
return(i);
}
+EXPORT_SYMBOL(get_user_pages);
+
DEFINE_RWLOCK(vmlist_lock);
struct vm_struct *vmlist;
kfree(addr);
}
-void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
- pgprot_t prot)
+void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
{
/*
* kmalloc doesn't like __GFP_HIGHMEM for some reason
*/
- return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
+ return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
}
struct page * vmalloc_to_page(void *addr)
{
return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
}
+EXPORT_SYMBOL(vmalloc);
+
+void *vmalloc_node(unsigned long size, int node)
+{
+ return vmalloc(size);
+}
+EXPORT_SYMBOL(vmalloc_node);
/*
* vmalloc_32 - allocate virtually continguos memory (32bit addressable)
* - note that this may not return a page-aligned address if the object
* we're allocating is smaller than a page
*/
- base = kmalloc(len, GFP_KERNEL);
+ base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
if (!base)
goto enomem;
error_getting_vma:
up_write(&nommu_vma_sem);
kfree(vml);
- printk("Allocation of vml for %lu byte allocation from process %d failed\n",
+ printk("Allocation of vma for %lu byte allocation from process %d failed\n",
len, current->pid);
show_free_areas();
return -ENOMEM;
for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
if ((*parent)->vma->vm_start == addr &&
- (*parent)->vma->vm_end == end)
+ ((len == 0) || ((*parent)->vma->vm_end == end)))
goto found;
printk("munmap of non-mmaped memory by process %d (%s): %p\n",
realalloc -= kobjsize(vml);
askedalloc -= sizeof(*vml);
kfree(vml);
+
+ update_hiwater_vm(mm);
vx_vmpages_sub(mm, len >> PAGE_SHIFT);
#ifdef DEBUG
EXPORT_SYMBOL(find_vma);
-struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
+struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
+ unsigned int foll_flags)
{
return NULL;
}
int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long to, unsigned long size, pgprot_t prot)
{
- return -EPERM;
+ vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
+ return 0;
}
+EXPORT_SYMBOL(remap_pfn_range);
void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
return -ENOMEM;
}
-void arch_unmap_area(struct vm_area_struct *area)
+void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
{
}
-void update_mem_hiwater(struct task_struct *tsk)
-{
- unsigned long rss = get_mm_counter(tsk->mm, rss);
-
- if (likely(tsk->mm)) {
- if (tsk->mm->hiwater_rss < rss)
- tsk->mm->hiwater_rss = rss;
- if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
- tsk->mm->hiwater_vm = tsk->mm->total_vm;
- }
-}
-
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen,
int even_cows)
{
}
+EXPORT_SYMBOL(unmap_mapping_range);
/*
* Check that a process has enough memory to allocate a new virtual
if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
unsigned long n;
- free = get_page_cache_size();
+ free = global_page_state(NR_FILE_PAGES);
free += nr_swap_pages;
/*
* only call if we're about to fail.
*/
n = nr_free_pages();
+
+ /*
+ * Leave reserved pages. The pages are not for anonymous pages.
+ */
+ if (n <= totalreserve_pages)
+ goto error;
+ else
+ n -= totalreserve_pages;
+
+ /*
+ * Leave the last 3% for root
+ */
if (!cap_sys_admin)
n -= n / 32;
free += n;
if (free > pages)
return 0;
- vm_unacct_memory(pages);
- return -ENOMEM;
+
+ goto error;
}
allowed = totalram_pages * sysctl_overcommit_ratio / 100;
leave 3% of the size of this process for other processes */
allowed -= current->mm->total_vm / 32;
- if (atomic_read(&vm_committed_space) < allowed)
+ /*
+ * cast `allowed' as a signed long because vm_committed_space
+ * sometimes has a negative value
+ */
+ if (atomic_read(&vm_committed_space) < (long)allowed)
return 0;
-
+error:
vm_unacct_memory(pages);
return -ENOMEM;
{
return 0;
}
+
+struct page *filemap_nopage(struct vm_area_struct *area,
+ unsigned long address, int *type)
+{
+ BUG();
+ return NULL;
+}