#include <linux/list.h>
#include <linux/mmzone.h>
#include <linux/rbtree.h>
-#include <linux/vinline.h>
+#include <linux/prio_tree.h>
#include <linux/fs.h>
+struct mempolicy;
+struct anon_vma;
+
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
#endif
* per VM-area/task. A VM area is any part of the process virtual memory
* space that has a special rule for the page-fault handlers (ie a shared
* library, the executable area etc).
- *
- * This structure is exactly 64 bytes on ia32. Please think very, very hard
- * before adding anything to it.
*/
struct vm_area_struct {
struct mm_struct * vm_mm; /* The address space we belong to. */
/*
* For areas with an address space and backing store,
- * one of the address_space->i_mmap{,shared} lists,
- * for shm areas, the list of attaches, otherwise unused.
+ * linkage into the address_space->i_mmap prio tree, or
+ * linkage to the list of like vmas hanging off its node, or
+ * linkage of vma in the address_space->i_mmap_nonlinear list.
+ */
+ union {
+ struct {
+ struct list_head list;
+ void *parent; /* aligns with prio_tree_node parent */
+ struct vm_area_struct *head;
+ } vm_set;
+
+ struct prio_tree_node prio_tree_node;
+ } shared;
+
+ /*
+ * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
+ * list, after a COW of one of the file pages. A MAP_SHARED vma
+ * can only be in the i_mmap tree. An anonymous MAP_PRIVATE, stack
+ * or brk vma (with NULL file) can only be in an anon_vma list.
*/
- struct list_head shared;
+ struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
+ struct anon_vma *anon_vma; /* Serialized by page_table_lock */
/* Function pointers to deal with this struct. */
struct vm_operations_struct * vm_ops;
units, *not* PAGE_CACHE_SIZE */
struct file * vm_file; /* File we map to (can be NULL). */
void * vm_private_data; /* was vm_pte (shared mem) */
+
+#ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+#endif
};
/*
#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
-/* It makes sense to apply VM_ACCOUNT to this vma. */
-#define VM_MAYACCT(vma) (!!((vma)->vm_flags & VM_HUGETLB))
-
#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
void (*close)(struct vm_area_struct * area);
struct page * (*nopage)(struct vm_area_struct * area, unsigned long address, int *type);
int (*populate)(struct vm_area_struct * area, unsigned long address, unsigned long len, pgprot_t prot, unsigned long pgoff, int nonblock);
+#ifdef CONFIG_NUMA
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+ unsigned long addr);
+#endif
};
-/* forward declaration; pte_chain is meant to be internal to rmap.c */
-struct pte_chain;
struct mmu_gather;
struct inode;
* it to keep track of whatever it is we are using the page for at the
* moment. Note that we have no way to track which tasks are using
* a page.
- *
- * Try to keep the most commonly accessed fields in single cache lines
- * here (16 bytes or greater). This ordering should be particularly
- * beneficial on 32-bit processors.
- *
- * The first line is data used in page cache lookup, the second line
- * is used for linear searches (eg. clock algorithm scans).
- *
- * TODO: make this structure smaller, it could be as small as 32 bytes.
*/
struct page {
- page_flags_t flags; /* atomic flags, some possibly
- updated asynchronously */
- atomic_t count; /* Usage count, see below. */
- struct address_space *mapping; /* The inode (or ...) we belong to. */
- pgoff_t index; /* Our offset within mapping. */
- struct list_head lru; /* Pageout list, eg. active_list;
- protected by zone->lru_lock !! */
- union {
- struct pte_chain *chain;/* Reverse pte mapping pointer.
- * protected by PG_chainlock */
- pte_addr_t direct;
- } pte;
+ page_flags_t flags; /* Atomic flags, some possibly
+ * updated asynchronously */
+ atomic_t _count; /* Usage count, see below. */
+ unsigned int mapcount; /* Count of ptes mapped in mms,
+ * to show when page is mapped
+ * & limit reverse map searches,
+ * protected by PG_maplock.
+ */
unsigned long private; /* Mapping-private opaque data:
* usually used for buffer_heads
* if PagePrivate set; used for
* swp_entry_t if PageSwapCache
*/
+ struct address_space *mapping; /* If PG_anon clear, points to
+ * inode address_space, or NULL.
+ * If page mapped as anonymous
+ * memory, PG_anon is set, and
+ * it points to anon_vma object.
+ */
+ pgoff_t index; /* Our offset within mapping. */
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone->lru_lock !
+ */
/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
*
* Also, many kernel routines increase the page count before a critical
* routine so they can be sure the page doesn't go away from under them.
+ *
+ * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
+ * can use atomic_add_negative(-1, page->_count) to detect when the page
+ * becomes free and so that we can also use atomic_inc_and_test to atomically
+ * detect when we just tried to grab a ref on a page which some other CPU has
+ * already deemed to be freeable.
+ *
+ * NO code should make assumptions about this internal detail! Use the provided
+ * macros which retain the old rules: page_count(page) == 0 is a free page.
+ */
+
+/*
+ * Drop a ref, return true if the logical refcount fell to zero (the page has
+ * no users)
*/
#define put_page_testzero(p) \
({ \
BUG_ON(page_count(p) == 0); \
- atomic_dec_and_test(&(p)->count); \
+ atomic_add_negative(-1, &(p)->_count); \
})
-#define set_page_count(p,v) atomic_set(&(p)->count, v)
-#define __put_page(p) atomic_dec(&(p)->count)
+/*
+ * Grab a ref, return true if the page previously had a logical refcount of
+ * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page
+ */
+#define get_page_testone(p) atomic_inc_and_test(&(p)->_count)
+
+#define set_page_count(p,v) atomic_set(&(p)->_count, v - 1)
+#define __put_page(p) atomic_dec(&(p)->_count)
extern void FASTCALL(__page_cache_release(struct page *));
{
if (PageCompound(p))
p = (struct page *)p->private;
- return atomic_read(&(p)->count);
+ return atomic_read(&(p)->_count) + 1;
}
static inline void get_page(struct page *page)
{
if (unlikely(PageCompound(page)))
page = (struct page *)page->private;
- atomic_inc(&page->count);
+ atomic_inc(&page->_count);
}
void put_page(struct page *page);
#else /* CONFIG_HUGETLB_PAGE */
-#define page_count(p) atomic_read(&(p)->count)
+#define page_count(p) (atomic_read(&(p)->_count) + 1)
static inline void get_page(struct page *page)
{
- atomic_inc(&page->count);
+ atomic_inc(&page->_count);
}
static inline void put_page(struct page *page)
* zeroes, and text pages of executables and shared libraries have
* only one copy in memory, at most, normally.
*
- * For the non-reserved pages, page->count denotes a reference count.
- * page->count == 0 means the page is free.
- * page->count == 1 means the page is used for exactly one purpose
+ * For the non-reserved pages, page_count(page) denotes a reference count.
+ * page_count() == 0 means the page is free.
+ * page_count() == 1 means the page is used for exactly one purpose
* (e.g. a private data page of one process).
*
* A page may be used for kmalloc() or anyone else who does a
- * __get_free_page(). In this case the page->count is at least 1, and
+ * __get_free_page(). In this case the page_count() is at least 1, and
* all other fields are unused but should be 0 or NULL. The
* management of this page is the responsibility of the one who uses
* it.
* page's address_space. Usually, this is the address of a circular
* list of the page's disk buffers.
*
- * For pages belonging to inodes, the page->count is the number of
+ * For pages belonging to inodes, the page_count() is the number of
* attaches, plus 1 if `private' contains something, plus one for
* the page cache itself.
*
* All pages belonging to an inode are in these doubly linked lists:
* mapping->clean_pages, mapping->dirty_pages and mapping->locked_pages;
* using the page->list list_head. These fields are also used for
- * freelist managemet (when page->count==0).
+ * freelist managemet (when page_count()==0).
*
* There is also a per-mapping radix tree mapping index to the page
* in memory if present. The tree is rooted at mapping->root.
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
+extern struct address_space swapper_space;
static inline struct address_space *page_mapping(struct page *page)
{
- return PageAnon(page)? NULL: page->mapping;
+ struct address_space *mapping = NULL;
+
+ if (unlikely(PageSwapCache(page)))
+ mapping = &swapper_space;
+ else if (likely(!PageAnon(page)))
+ mapping = page->mapping;
+ return mapping;
+}
+
+/*
+ * Return the pagecache index of the passed page. Regular pagecache pages
+ * use ->index whereas swapcache pages use ->private
+ */
+static inline pgoff_t page_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return page->private;
+ return page->index;
}
/*
- * Return true if this page is mapped into pagetables. Subtle: test pte.direct
- * rather than pte.chain. Because sometimes pte.direct is 64-bit, and .chain
- * is only 32-bit.
+ * Return true if this page is mapped into pagetables.
*/
static inline int page_mapped(struct page *page)
{
- return page->pte.direct != 0;
+ return page->mapcount != 0;
}
/*
struct page *shmem_nopage(struct vm_area_struct * vma,
unsigned long address, int *type);
+int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new);
+struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
+ unsigned long addr);
struct file *shmem_file_setup(char * name, loff_t size, unsigned long flags);
void shmem_lock(struct file * file, int lock);
int shmem_zero_setup(struct vm_area_struct *);
struct address_space *check_mapping; /* Check page->mapping if set */
pgoff_t first_index; /* Lowest page->index to unmap */
pgoff_t last_index; /* Highest page->index to unmap */
+ int atomic; /* May not schedule() */
};
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
extern int handle_mm_fault(struct mm_struct *mm,struct vm_area_struct *vma, unsigned long address, int write_access);
extern int make_pages_present(unsigned long addr, unsigned long end);
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
-void put_dirty_page(struct task_struct *tsk, struct page *page,
- unsigned long address, pgprot_t prot);
+void install_arg_page(struct vm_area_struct *, struct page *, unsigned long);
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
extern void si_meminfo(struct sysinfo * val);
extern void si_meminfo_node(struct sysinfo *val, int nid);
+static inline void vma_prio_tree_init(struct vm_area_struct *vma)
+{
+ vma->shared.vm_set.list.next = NULL;
+ vma->shared.vm_set.list.prev = NULL;
+ vma->shared.vm_set.parent = NULL;
+ vma->shared.vm_set.head = NULL;
+}
+
+/* prio_tree.c */
+void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
+void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
+void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
+struct vm_area_struct *vma_prio_tree_next(
+ struct vm_area_struct *, struct prio_tree_root *,
+ struct prio_tree_iter *, pgoff_t begin, pgoff_t end);
+
/* mmap.c */
+extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+extern struct vm_area_struct *vma_merge(struct mm_struct *,
+ struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+ unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
+ struct mempolicy *);
+extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+extern int split_vma(struct mm_struct *,
+ struct vm_area_struct *, unsigned long addr, int new_below);
extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
struct rb_node **, struct rb_node *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
- unsigned long addr, unsigned long len, unsigned long pgoff);
+ unsigned long addr, unsigned long len, pgoff_t pgoff);
extern void exit_mmap(struct mm_struct *);
extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
extern unsigned long do_brk(unsigned long, unsigned long);
-static inline void
-__vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev)
-{
- prev->vm_next = vma->vm_next;
- rb_erase(&vma->vm_rb, &mm->mm_rb);
- if (mm->mmap_cache == vma)
- mm->mmap_cache = prev;
-}
-
-static inline int
-can_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags)
-{
-#ifdef CONFIG_MMU
- if (!vma->vm_file && vma->vm_flags == vm_flags)
- return 1;
-#endif
- return 0;
-}
-
/* filemap.c */
extern unsigned long page_unuse(struct page *);
extern void truncate_inode_pages(struct address_space *, loff_t);
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
struct vm_area_struct **pprev);
-extern int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
- unsigned long addr, int new_below);
/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
NULL if none. Assume start_addr < end_addr. */
return vma;
}
+static inline unsigned long vma_pages(struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
extern struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr);
extern unsigned int nr_used_zone_pages(void);