extern unsigned long vmalloc_earlyreserve;
extern int page_cluster;
-extern int sysctl_legacy_va_layout;
-
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
void *virtual; /* Kernel virtual address (NULL if
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
-#ifdef CONFIG_CKRM_RES_MEM
- void *memclass;
-#endif // CONFIG_CKRM_RES_MEM
};
/*
struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
-int shmem_lock(struct file *file, int lock, struct user_struct *user);
+int shmem_lock(struct file * file, int lock, struct user_struct *);
int shmem_zero_setup(struct vm_area_struct *);
static inline int can_do_mlock(void)
return 1;
return 0;
}
-extern int user_shm_lock(size_t, struct user_struct *);
-extern void user_shm_unlock(size_t, struct user_struct *);
+
/*
* Parameter block passed down to zap_pte_range in exceptional cases.
unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
-extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int);
-
-
-static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr,
- unsigned long len, unsigned long pgoff, unsigned long flags)
-{
- return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0);
-}
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_mmap_pgoff(struct mm_struct *mm, struct file *file,
unsigned long addr, unsigned long len,
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
+extern unsigned int nr_used_zone_pages(void);
+
extern struct page * vmalloc_to_page(void *addr);
extern struct page * follow_page(struct mm_struct *mm, unsigned long address,
int write);