X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fmm.h;h=e4183fa67d2232d3e446a47b566c161f509b32bf;hb=43bc926fffd92024b46cafaf7350d669ba9ca884;hp=8f8a8a3a31b790d8484e22d008f9502cce94cdff;hpb=9213980e6a70d8473e0ffd4b39ab5b6caaba9ff5;p=linux-2.6.git diff --git a/include/linux/mm.h b/include/linux/mm.h index 8f8a8a3a3..e4183fa67 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3,6 +3,7 @@ #include #include +#include #ifdef __KERNEL__ @@ -13,6 +14,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -23,16 +25,21 @@ extern unsigned long max_mapnr; extern unsigned long num_physpages; extern void * high_memory; +extern unsigned long vmalloc_earlyreserve; extern int page_cluster; +#ifdef CONFIG_SYSCTL +extern int sysctl_legacy_va_layout; +#else +#define sysctl_legacy_va_layout 0 +#endif + #include #include #include #include -#ifndef MM_VM_SIZE -#define MM_VM_SIZE(mm) TASK_SIZE -#endif +#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) /* * Linux kernel virtual memory manager primitives. @@ -76,7 +83,7 @@ struct vm_area_struct { struct vm_area_struct *head; } vm_set; - struct prio_tree_node prio_tree_node; + struct raw_prio_tree_node prio_tree_node; } shared; /* @@ -96,12 +103,33 @@ struct vm_area_struct { units, *not* PAGE_CACHE_SIZE */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ + unsigned long vm_truncate_count;/* truncate_count or restart_addr */ +#ifndef CONFIG_MMU + atomic_t vm_usage; /* refcount (VMAs shared if !MMU) */ +#endif #ifdef CONFIG_NUMA struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif }; +/* + * This struct defines the per-mm list of VMAs for uClinux. If CONFIG_MMU is + * disabled, then there's a single shared list of VMAs maintained by the + * system, and mm's subscribe to these individually + */ +struct vm_list_struct { + struct vm_list_struct *next; + struct vm_area_struct *vma; +}; + +#ifndef CONFIG_MMU +extern struct rb_root nommu_vma_tree; +extern struct rw_semaphore nommu_vma_sem; + +extern unsigned int kobjsize(const void *objp); +#endif + /* * vm_flags.. */ @@ -110,6 +138,7 @@ struct vm_area_struct { #define VM_EXEC 0x00000004 #define VM_SHARED 0x00000008 +/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */ #define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */ #define VM_MAYWRITE 0x00000020 #define VM_MAYEXEC 0x00000040 @@ -117,7 +146,8 @@ struct vm_area_struct { #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_GROWSUP 0x00000200 -#define VM_SHM 0x00000400 /* shared memory area, don't swap out */ +#define VM_SHM 0x00000000 /* Means nothing: delete it later */ +#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_EXECUTABLE 0x00001000 @@ -130,10 +160,12 @@ struct vm_area_struct { #define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */ -#define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */ +#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */ #define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ +#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ +#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS @@ -178,12 +210,6 @@ struct vm_operations_struct { struct mmu_gather; struct inode; -#ifdef ARCH_HAS_ATOMIC_UNSIGNED -typedef unsigned page_flags_t; -#else -typedef unsigned long page_flags_t; -#endif - /* * Each physical page in the system has a struct page associated with * it to keep track of whatever it is we are using the page for at the @@ -191,25 +217,34 @@ typedef unsigned long page_flags_t; * a page. */ struct page { - page_flags_t flags; /* Atomic flags, some possibly + unsigned long flags; /* Atomic flags, some possibly * updated asynchronously */ atomic_t _count; /* Usage count, see below. */ - unsigned int mapcount; /* Count of ptes mapped in mms, + atomic_t _mapcount; /* Count of ptes mapped in mms, * to show when page is mapped - * & limit reverse map searches, - * protected by PG_maplock. - */ - unsigned long private; /* Mapping-private opaque data: - * usually used for buffer_heads - * if PagePrivate set; used for - * swp_entry_t if PageSwapCache - */ - struct address_space *mapping; /* If PG_anon clear, points to - * inode address_space, or NULL. - * If page mapped as anonymous - * memory, PG_anon is set, and - * it points to anon_vma object. + * & limit reverse map searches. */ + union { + struct { + unsigned long private; /* Mapping-private opaque data: + * usually used for buffer_heads + * if PagePrivate set; used for + * swp_entry_t if PageSwapCache; + * indicates order in the buddy + * system if PG_buddy is set. + */ + struct address_space *mapping; /* If low bit clear, points to + * inode address_space, or NULL. + * If page mapped as anonymous + * memory, low bit is set, and + * it points to anon_vma object: + * see PAGE_MAPPING_ANON below. + */ + }; +#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS + spinlock_t ptl; +#endif + }; pgoff_t index; /* Our offset within mapping. */ struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! @@ -230,6 +265,9 @@ struct page { #endif /* WANT_PAGE_VIRTUAL */ }; +#define page_private(page) ((page)->private) +#define set_page_private(page, v) ((page)->private = (v)) + /* * FIXME: take this include out, include page-flags.h in * files which need it (119 of them) @@ -247,72 +285,55 @@ struct page { * * Also, many kernel routines increase the page count before a critical * routine so they can be sure the page doesn't go away from under them. - * - * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we - * can use atomic_add_negative(-1, page->_count) to detect when the page - * becomes free and so that we can also use atomic_inc_and_test to atomically - * detect when we just tried to grab a ref on a page which some other CPU has - * already deemed to be freeable. - * - * NO code should make assumptions about this internal detail! Use the provided - * macros which retain the old rules: page_count(page) == 0 is a free page. */ /* * Drop a ref, return true if the logical refcount fell to zero (the page has * no users) */ -#define put_page_testzero(p) \ - ({ \ - BUG_ON(page_count(p) == 0); \ - atomic_add_negative(-1, &(p)->_count); \ - }) +static inline int put_page_testzero(struct page *page) +{ + BUG_ON(atomic_read(&page->_count) == 0); + return atomic_dec_and_test(&page->_count); +} /* - * Grab a ref, return true if the page previously had a logical refcount of - * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page + * Try to grab a ref unless the page has a refcount of zero, return false if + * that is the case. */ -#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) - -#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1) -#define __put_page(p) atomic_dec(&(p)->_count) +static inline int get_page_unless_zero(struct page *page) +{ + return atomic_inc_not_zero(&page->_count); +} extern void FASTCALL(__page_cache_release(struct page *)); -#ifdef CONFIG_HUGETLB_PAGE - -static inline int page_count(struct page *p) +static inline int page_count(struct page *page) { - if (PageCompound(p)) - p = (struct page *)p->private; - return atomic_read(&(p)->_count) + 1; + if (unlikely(PageCompound(page))) + page = (struct page *)page_private(page); + return atomic_read(&page->_count); } static inline void get_page(struct page *page) { if (unlikely(PageCompound(page))) - page = (struct page *)page->private; + page = (struct page *)page_private(page); atomic_inc(&page->_count); } -void put_page(struct page *page); - -#else /* CONFIG_HUGETLB_PAGE */ - -#define page_count(p) (atomic_read(&(p)->_count) + 1) - -static inline void get_page(struct page *page) +/* + * Setup the page count before being freed into the page allocator for + * the first time (boot or memory hotplug) + */ +static inline void init_page_count(struct page *page) { - atomic_inc(&page->_count); + atomic_set(&page->_count, 1); } -static inline void put_page(struct page *page) -{ - if (!PageReserved(page) && put_page_testzero(page)) - __page_cache_release(page); -} +void put_page(struct page *page); -#endif /* CONFIG_HUGETLB_PAGE */ +void split_page(struct page *page, unsigned int order); /* * Multiple processes may "see" the same page. E.g. for untouched @@ -321,7 +342,8 @@ static inline void put_page(struct page *page) * only one copy in memory, at most, normally. * * For the non-reserved pages, page_count(page) denotes a reference count. - * page_count() == 0 means the page is free. + * page_count() == 0 means the page is free. page->lru is then used for + * freelist management in the buddy allocator. * page_count() == 1 means the page is used for exactly one purpose * (e.g. a private data page of one process). * @@ -347,10 +369,8 @@ static inline void put_page(struct page *page) * attaches, plus 1 if `private' contains something, plus one for * the page cache itself. * - * All pages belonging to an inode are in these doubly linked lists: - * mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages; - * using the page->list list_head. These fields are also used for - * freelist managemet (when page_count()==0). + * Instead of keeping dirty/clean pages in per address-space lists, we instead + * now tag pages as dirty/under writeback in the radix tree. * * There is also a per-mapping radix tree mapping index to the page * in memory if present. The tree is rooted at mapping->root. @@ -366,19 +386,81 @@ static inline void put_page(struct page *page) /* * The zone field is never updated after free_area_init_core() * sets it, so none of the operations on it need to be atomic. - * We'll have up to (MAX_NUMNODES * MAX_NR_ZONES) zones total, - * so we use (MAX_NODES_SHIFT + MAX_ZONES_SHIFT) here to get enough bits. */ -#define NODEZONE_SHIFT (sizeof(page_flags_t)*8 - MAX_NODES_SHIFT - MAX_ZONES_SHIFT) -#define NODEZONE(node, zone) ((node << ZONES_SHIFT) | zone) + + +/* + * page->flags layout: + * + * There are three possibilities for how page->flags get + * laid out. The first is for the normal case, without + * sparsemem. The second is for sparsemem when there is + * plenty of space for node and section. The last is when + * we have run out of space and have to fall back to an + * alternate (slower) way of determining the node. + * + * No sparsemem: | NODE | ZONE | ... | FLAGS | + * with space for node: | SECTION | NODE | ZONE | ... | FLAGS | + * no space for node: | SECTION | ZONE | ... | FLAGS | + */ +#ifdef CONFIG_SPARSEMEM +#define SECTIONS_WIDTH SECTIONS_SHIFT +#else +#define SECTIONS_WIDTH 0 +#endif + +#define ZONES_WIDTH ZONES_SHIFT + +#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED +#define NODES_WIDTH NODES_SHIFT +#else +#define NODES_WIDTH 0 +#endif + +/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */ +#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH) +#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH) +#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH) + +/* + * We are going to use the flags for the page to node mapping if its in + * there. This includes the case where there is no node, so it is implicit. + */ +#define FLAGS_HAS_NODE (NODES_WIDTH > 0 || NODES_SHIFT == 0) + +#ifndef PFN_SECTION_SHIFT +#define PFN_SECTION_SHIFT 0 +#endif + +/* + * Define the bit shifts to access each section. For non-existant + * sections we define the shift as 0; that plus a 0 mask ensures + * the compiler will optimise away reference to them. + */ +#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0)) +#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) +#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) + +/* NODE:ZONE or SECTION:ZONE is used to lookup the zone from a page. */ +#if FLAGS_HAS_NODE +#define ZONETABLE_SHIFT (NODES_SHIFT + ZONES_SHIFT) +#else +#define ZONETABLE_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) +#endif +#define ZONETABLE_PGSHIFT ZONES_PGSHIFT + +#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED +#endif + +#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1) +#define NODES_MASK ((1UL << NODES_WIDTH) - 1) +#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1) +#define ZONETABLE_MASK ((1UL << ZONETABLE_SHIFT) - 1) static inline unsigned long page_zonenum(struct page *page) { - return (page->flags >> NODEZONE_SHIFT) & (~(~0UL << ZONES_SHIFT)); -} -static inline unsigned long page_to_nid(struct page *page) -{ - return (page->flags >> (NODEZONE_SHIFT + ZONES_SHIFT)); + return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK; } struct zone; @@ -386,13 +468,44 @@ extern struct zone *zone_table[]; static inline struct zone *page_zone(struct page *page) { - return zone_table[page->flags >> NODEZONE_SHIFT]; + return zone_table[(page->flags >> ZONETABLE_PGSHIFT) & + ZONETABLE_MASK]; +} + +static inline unsigned long page_to_nid(struct page *page) +{ + if (FLAGS_HAS_NODE) + return (page->flags >> NODES_PGSHIFT) & NODES_MASK; + else + return page_zone(page)->zone_pgdat->node_id; +} +static inline unsigned long page_to_section(struct page *page) +{ + return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; +} + +static inline void set_page_zone(struct page *page, unsigned long zone) +{ + page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); + page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; +} +static inline void set_page_node(struct page *page, unsigned long node) +{ + page->flags &= ~(NODES_MASK << NODES_PGSHIFT); + page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; +} +static inline void set_page_section(struct page *page, unsigned long section) +{ + page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); + page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; } -static inline void set_page_zone(struct page *page, unsigned long nodezone_num) +static inline void set_page_links(struct page *page, unsigned long zone, + unsigned long node, unsigned long pfn) { - page->flags &= ~(~0UL << NODEZONE_SHIFT); - page->flags |= nodezone_num << NODEZONE_SHIFT; + set_page_zone(page, zone); + set_page_node(page, node); + set_page_section(page, pfn_to_section_nr(pfn)); } #ifndef CONFIG_DISCONTIGMEM @@ -400,7 +513,7 @@ static inline void set_page_zone(struct page *page, unsigned long nodezone_num) extern struct page *mem_map; #endif -static inline void *lowmem_page_address(struct page *page) +static __always_inline void *lowmem_page_address(struct page *page) { return __va(page_to_pfn(page) << PAGE_SHIFT); } @@ -432,24 +545,32 @@ void page_address_init(void); /* * On an anonymous page mapped into a user virtual memory area, - * page->mapping points to its anon_vma, not to a struct address_space. + * page->mapping points to its anon_vma, not to a struct address_space; + * with the PAGE_MAPPING_ANON bit set to distinguish it. * * Please note that, confusingly, "page_mapping" refers to the inode * address_space which maps the page from disk; whereas "page_mapped" * refers to user virtual address space into which the page is mapped. */ +#define PAGE_MAPPING_ANON 1 + extern struct address_space swapper_space; static inline struct address_space *page_mapping(struct page *page) { - struct address_space *mapping = NULL; + struct address_space *mapping = page->mapping; if (unlikely(PageSwapCache(page))) mapping = &swapper_space; - else if (likely(!PageAnon(page))) - mapping = page->mapping; + else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON)) + mapping = NULL; return mapping; } +static inline int PageAnon(struct page *page) +{ + return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0; +} + /* * Return the pagecache index of the passed page. Regular pagecache pages * use ->index whereas swapcache pages use ->private @@ -457,16 +578,31 @@ static inline struct address_space *page_mapping(struct page *page) static inline pgoff_t page_index(struct page *page) { if (unlikely(PageSwapCache(page))) - return page->private; + return page_private(page); return page->index; } +/* + * The atomic page->_mapcount, like _count, starts from -1: + * so that transitions both from it and to it can be tracked, + * using atomic_inc_and_test and atomic_add_negative(-1). + */ +static inline void reset_page_mapcount(struct page *page) +{ + atomic_set(&(page)->_mapcount, -1); +} + +static inline int page_mapcount(struct page *page) +{ + return atomic_read(&(page)->_mapcount) + 1; +} + /* * Return true if this page is mapped into pagetables. */ static inline int page_mapped(struct page *page) { - return page->mapcount != 0; + return atomic_read(&(page)->_mapcount) >= 0; } /* @@ -480,24 +616,73 @@ static inline int page_mapped(struct page *page) * Used to decide whether a process gets delivered SIGBUS or * just gets major/minor fault counters bumped up. */ -#define VM_FAULT_OOM (-1) -#define VM_FAULT_SIGBUS 0 -#define VM_FAULT_MINOR 1 -#define VM_FAULT_MAJOR 2 +#define VM_FAULT_OOM 0x00 +#define VM_FAULT_SIGBUS 0x01 +#define VM_FAULT_MINOR 0x02 +#define VM_FAULT_MAJOR 0x03 + +/* + * Special case for get_user_pages. + * Must be in a distinct bit from the above VM_FAULT_ flags. + */ +#define VM_FAULT_WRITE 0x10 #define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK) extern void show_free_areas(void); -struct page *shmem_nopage(struct vm_area_struct * vma, +#ifdef CONFIG_SHMEM +struct page *shmem_nopage(struct vm_area_struct *vma, unsigned long address, int *type); int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new); struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, unsigned long addr); -struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags); -void shmem_lock(struct file * file, int lock); +int shmem_lock(struct file *file, int lock, struct user_struct *user); +#else +#define shmem_nopage filemap_nopage + +static inline int shmem_lock(struct file *file, int lock, + struct user_struct *user) +{ + return 0; +} + +static inline int shmem_set_policy(struct vm_area_struct *vma, + struct mempolicy *new) +{ + return 0; +} + +static inline struct mempolicy *shmem_get_policy(struct vm_area_struct *vma, + unsigned long addr) +{ + return NULL; +} +#endif +struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags); +extern int shmem_mmap(struct file *file, struct vm_area_struct *vma); + int shmem_zero_setup(struct vm_area_struct *); +#ifndef CONFIG_MMU +extern unsigned long shmem_get_unmapped_area(struct file *file, + unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags); +#endif + +static inline int can_do_mlock(void) +{ + if (capable(CAP_IPC_LOCK)) + return 1; + if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0) + return 1; + return 0; +} +extern int user_shm_lock(size_t, struct user_struct *); +extern void user_shm_unlock(size_t, struct user_struct *); + /* * Parameter block passed down to zap_pte_range in exceptional cases. */ @@ -506,16 +691,21 @@ struct zap_details { struct address_space *check_mapping; /* Check page->mapping if set */ pgoff_t first_index; /* Lowest page->index to unmap */ pgoff_t last_index; /* Highest page->index to unmap */ - int atomic; /* May not schedule() */ + spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */ + unsigned long truncate_count; /* Compare vm_truncate_count */ }; -void zap_page_range(struct vm_area_struct *vma, unsigned long address, +struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); +unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, unsigned long size, struct zap_details *); -int unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm, +unsigned long unmap_vmas(struct mmu_gather **tlb, struct vm_area_struct *start_vma, unsigned long start_addr, unsigned long end_addr, unsigned long *nr_accounted, struct zap_details *); -void clear_page_tables(struct mmu_gather *tlb, unsigned long first, int nr); +void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, + unsigned long end, unsigned long floor, unsigned long ceiling); +void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, + unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma); int zeromap_page_range(struct vm_area_struct *vma, unsigned long from, @@ -530,18 +720,39 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, } extern int vmtruncate(struct inode * inode, loff_t offset); -extern pmd_t *FASTCALL(__pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)); -extern pte_t *FASTCALL(pte_alloc_kernel(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); -extern pte_t *FASTCALL(pte_alloc_map(struct mm_struct *mm, pmd_t *pmd, unsigned long address)); +extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end); extern int install_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, struct page *page, pgprot_t prot); extern int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long pgoff, pgprot_t prot); -extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access); + +#ifdef CONFIG_MMU +extern int __handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, + unsigned long address, int write_access); + +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) +{ + return __handle_mm_fault(mm, vma, address, write_access) & + (~VM_FAULT_WRITE); +} +#else +static inline int handle_mm_fault(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + int write_access) +{ + /* should never happen if there's no MMU */ + BUG(); + return VM_FAULT_SIGBUS; +} +#endif + extern int make_pages_present(unsigned long addr, unsigned long end); extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); void install_arg_page(struct vm_area_struct *, struct page *, unsigned long); int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start, int len, int write, int force, struct page **pages, struct vm_area_struct **vmas); +void print_bad_pte(struct vm_area_struct *, pte_t, unsigned long); int __set_page_dirty_buffers(struct page *page); int __set_page_dirty_nobuffers(struct page *page); @@ -551,18 +762,22 @@ int FASTCALL(set_page_dirty(struct page *page)); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); +extern unsigned long do_mremap(unsigned long addr, + unsigned long old_len, unsigned long new_len, + unsigned long flags, unsigned long new_addr); + /* * Prototype to add a shrinker callback for ageable caches. * * These functions are passed a count `nr_to_scan' and a gfpmask. They should * scan `nr_to_scan' objects, attempting to free them. * - * The callback must the number of objects which remain in the cache. + * The callback must return the number of objects which remain in the cache. * - * The callback will be passes nr_to_scan == 0 when the VM is querying the + * The callback will be passed nr_to_scan == 0 when the VM is querying the * cache size, so a fastpath for that case is appropriate. */ -typedef int (*shrinker_t)(int nr_to_scan, unsigned int gfp_mask); +typedef int (*shrinker_t)(int nr_to_scan, gfp_t gfp_mask); /* * Add an aging callback. The int is the number of 'seeks' it takes @@ -574,46 +789,116 @@ struct shrinker; extern struct shrinker *set_shrinker(int, shrinker_t); extern void remove_shrinker(struct shrinker *shrinker); +extern pte_t *FASTCALL(get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl)); + +int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address); +int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address); +int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +int __pte_alloc_kernel(pmd_t *pmd, unsigned long address); + /* - * On a two-level page table, this ends up being trivial. Thus the - * inlining and the symmetry break with pte_alloc_map() that does all - * of this out-of-line. + * The following ifdef needed to get the 4level-fixup.h header to work. + * Remove it when 4level-fixup.h has been removed. */ -static inline pmd_t *pmd_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) +#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK) +static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) { - if (pgd_none(*pgd)) - return __pmd_alloc(mm, pgd, address); - return pmd_offset(pgd, address); + return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))? + NULL: pud_offset(pgd, address); } +static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) +{ + return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))? + NULL: pmd_offset(pud, address); +} +#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */ + +#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS +/* + * We tuck a spinlock to guard each pagetable page into its struct page, + * at page->private, with BUILD_BUG_ON to make sure that this will not + * overflow into the next struct page (as it might with DEBUG_SPINLOCK). + * When freeing, reset page->mapping so free_pages_check won't complain. + */ +#define __pte_lockptr(page) &((page)->ptl) +#define pte_lock_init(_page) do { \ + spin_lock_init(__pte_lockptr(_page)); \ +} while (0) +#define pte_lock_deinit(page) ((page)->mapping = NULL) +#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));}) +#else +/* + * We use mm->page_table_lock to guard all pagetable pages of the mm. + */ +#define pte_lock_init(page) do {} while (0) +#define pte_lock_deinit(page) do {} while (0) +#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;}) +#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */ + +#define pte_offset_map_lock(mm, pmd, address, ptlp) \ +({ \ + spinlock_t *__ptl = pte_lockptr(mm, pmd); \ + pte_t *__pte = pte_offset_map(pmd, address); \ + *(ptlp) = __ptl; \ + spin_lock(__ptl); \ + __pte; \ +}) + +#define pte_unmap_unlock(pte, ptl) do { \ + spin_unlock(ptl); \ + pte_unmap(pte); \ +} while (0) + +#define pte_alloc_map(mm, pmd, address) \ + ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ + NULL: pte_offset_map(pmd, address)) + +#define pte_alloc_map_lock(mm, pmd, address, ptlp) \ + ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \ + NULL: pte_offset_map_lock(mm, pmd, address, ptlp)) + +#define pte_alloc_kernel(pmd, address) \ + ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \ + NULL: pte_offset_kernel(pmd, address)) + extern void free_area_init(unsigned long * zones_size); -extern void free_area_init_node(int nid, pg_data_t *pgdat, struct page *pmap, +extern void free_area_init_node(int nid, pg_data_t *pgdat, unsigned long * zones_size, unsigned long zone_start_pfn, unsigned long *zholes_size); -extern void memmap_init_zone(struct page *, unsigned long, int, - unsigned long, unsigned long); +extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long); +extern void setup_per_zone_pages_min(void); extern void mem_init(void); extern void show_mem(void); extern void si_meminfo(struct sysinfo * val); extern void si_meminfo_node(struct sysinfo *val, int nid); -static inline void vma_prio_tree_init(struct vm_area_struct *vma) -{ - vma->shared.vm_set.list.next = NULL; - vma->shared.vm_set.list.prev = NULL; - vma->shared.vm_set.parent = NULL; - vma->shared.vm_set.head = NULL; -} +#ifdef CONFIG_NUMA +extern void setup_per_cpu_pageset(void); +#else +static inline void setup_per_cpu_pageset(void) {} +#endif /* prio_tree.c */ void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old); void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *); void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *); -struct vm_area_struct *vma_prio_tree_next( - struct vm_area_struct *, struct prio_tree_root *, - struct prio_tree_iter *, pgoff_t begin, pgoff_t end); +struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma, + struct prio_tree_iter *iter); + +#define vma_prio_tree_foreach(vma, iter, root, begin, end) \ + for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \ + (vma = vma_prio_tree_next(vma, iter)); ) + +static inline void vma_nonlinear_insert(struct vm_area_struct *vma, + struct list_head *list) +{ + vma->shared.vm_set.parent = NULL; + list_add_tail(&vma->shared.vm_set.list, list); +} /* mmap.c */ +extern int __vm_enough_memory(long pages, int cap_sys_admin); extern void vma_adjust(struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert); extern struct vm_area_struct *vma_merge(struct mm_struct *, @@ -623,14 +908,28 @@ extern struct vm_area_struct *vma_merge(struct mm_struct *, extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int split_vma(struct mm_struct *, struct vm_area_struct *, unsigned long addr, int new_below); -extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *); +extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, struct rb_node **, struct rb_node *); +extern void unlink_file_vma(struct vm_area_struct *); extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); + +extern unsigned long get_unmapped_area_prot(struct file *, unsigned long, unsigned long, unsigned long, unsigned long, int); + -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); +static inline unsigned long get_unmapped_area(struct file * file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + return get_unmapped_area_prot(file, addr, len, pgoff, flags, 0); +} + +extern int install_special_mapping(struct mm_struct *mm, + unsigned long addr, unsigned long len, + unsigned long vm_flags, pgprot_t pgprot, + struct page **pages); extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -656,9 +955,13 @@ extern unsigned long do_brk(unsigned long, unsigned long); /* filemap.c */ extern unsigned long page_unuse(struct page *); extern void truncate_inode_pages(struct address_space *, loff_t); +extern void truncate_inode_pages_range(struct address_space *, + loff_t lstart, loff_t lend); /* generic vm_area_ops exported for stackable file systems */ -struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); +extern struct page *filemap_nopage(struct vm_area_struct *, unsigned long, int *); +extern int filemap_populate(struct vm_area_struct *, unsigned long, + unsigned long, pgprot_t, unsigned long, int); /* mm/page-writeback.c */ int write_one_page(struct page *page, int wait); @@ -666,21 +969,27 @@ int write_one_page(struct page *page, int wait); /* readahead.c */ #define VM_MAX_READAHEAD 128 /* kbytes */ #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ +#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before + * turning readahead off */ int do_page_cache_readahead(struct address_space *mapping, struct file *filp, - unsigned long offset, unsigned long nr_to_read); + pgoff_t offset, unsigned long nr_to_read); int force_page_cache_readahead(struct address_space *mapping, struct file *filp, - unsigned long offset, unsigned long nr_to_read); -void page_cache_readahead(struct address_space *mapping, + pgoff_t offset, unsigned long nr_to_read); +unsigned long page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, struct file *filp, - unsigned long offset); -void handle_ra_miss(struct address_space *mapping, + pgoff_t offset, + unsigned long size); +void handle_ra_miss(struct address_space *mapping, struct file_ra_state *ra, pgoff_t offset); unsigned long max_sane_readahead(unsigned long nr); /* Do stack extension */ -extern int expand_stack(struct vm_area_struct * vma, unsigned long address); +extern int expand_stack(struct vm_area_struct *vma, unsigned long address); +#ifdef CONFIG_IA64 +extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); @@ -703,26 +1012,62 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; } -extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr); - -extern unsigned int nr_used_zone_pages(void); - -extern struct page * vmalloc_to_page(void *addr); -extern struct page * follow_page(struct mm_struct *mm, unsigned long address, - int write); -extern int remap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long to, unsigned long size, pgprot_t prot); +struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); +struct page *vmalloc_to_page(void *addr); +unsigned long vmalloc_to_pfn(void *addr); +int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); + +struct page *follow_page(struct vm_area_struct *, unsigned long address, + unsigned int foll_flags); +#define FOLL_WRITE 0x01 /* check pte is writable */ +#define FOLL_TOUCH 0x02 /* mark page accessed */ +#define FOLL_GET 0x04 /* do get_page on page */ +#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */ + +#ifdef CONFIG_PROC_FS +void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long); +#else +static inline void vm_stat_account(struct mm_struct *mm, + unsigned long flags, struct file *file, long pages) +{ +} +#endif /* CONFIG_PROC_FS */ #ifndef CONFIG_DEBUG_PAGEALLOC static inline void kernel_map_pages(struct page *page, int numpages, int enable) { + if (!PageHighMem(page) && !enable) + mutex_debug_check_no_locks_freed(page_address(page), + numpages * PAGE_SIZE); } #endif -#ifndef CONFIG_ARCH_GATE_AREA extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); +#ifdef __HAVE_ARCH_GATE_AREA +int in_gate_area_no_task(unsigned long addr); int in_gate_area(struct task_struct *task, unsigned long addr); +#else +int in_gate_area_no_task(unsigned long addr); +#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) +#endif /* __HAVE_ARCH_GATE_AREA */ + +/* /proc//oom_adj set to -17 protects from the oom-killer */ +#define OOM_DISABLE -17 + +int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, + void __user *, size_t *, loff_t *); +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, + unsigned long lru_pages); +void drop_pagecache(void); +void drop_slab(void); + +#ifndef CONFIG_MMU +#define randomize_va_space 0 +#else +extern int randomize_va_space; #endif #endif /* __KERNEL__ */