1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
5 * Copyright 1995 Linus Torvalds
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <asm/uaccess.h>
12 #include <linux/gfp.h>
15 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
16 * allocation mode flags.
18 #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
19 #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
21 static inline int mapping_gfp_mask(struct address_space * mapping)
23 return mapping->flags & __GFP_BITS_MASK;
27 * This is non-atomic. Only to be used before the mapping is activated.
28 * Probably needs a barrier...
30 static inline void mapping_set_gfp_mask(struct address_space *m, int mask)
32 m->flags = (m->flags & ~__GFP_BITS_MASK) | mask;
36 * The page cache can done in larger chunks than
37 * one page, because it allows for more efficient
38 * throughput (it can then be mapped into user
39 * space in smaller chunks for same flexibility).
41 * Or rather, it _will_ be done in larger chunks.
43 #define PAGE_CACHE_SHIFT PAGE_SHIFT
44 #define PAGE_CACHE_SIZE PAGE_SIZE
45 #define PAGE_CACHE_MASK PAGE_MASK
46 #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
48 #define page_cache_get(page) get_page(page)
49 #define page_cache_release(page) put_page(page)
50 void release_pages(struct page **pages, int nr, int cold);
52 static inline struct page *page_cache_alloc(struct address_space *x)
54 return alloc_pages(mapping_gfp_mask(x), 0);
57 static inline struct page *page_cache_alloc_cold(struct address_space *x)
59 return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0);
62 typedef int filler_t(void *, struct page *);
64 extern struct page * find_get_page(struct address_space *mapping,
66 extern struct page * find_lock_page(struct address_space *mapping,
68 extern struct page * find_trylock_page(struct address_space *mapping,
70 extern struct page * find_or_create_page(struct address_space *mapping,
71 unsigned long index, unsigned int gfp_mask);
72 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
73 unsigned int nr_pages, struct page **pages);
74 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
75 int tag, unsigned int nr_pages, struct page **pages);
78 * Returns locked page at given index in given cache, creating it if needed.
80 static inline struct page *grab_cache_page(struct address_space *mapping, unsigned long index)
82 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
85 extern struct page * grab_cache_page_nowait(struct address_space *mapping,
87 extern struct page * read_cache_page(struct address_space *mapping,
88 unsigned long index, filler_t *filler,
90 extern int read_cache_pages(struct address_space *mapping,
91 struct list_head *pages, filler_t *filler, void *data);
93 int add_to_page_cache(struct page *page, struct address_space *mapping,
94 unsigned long index, int gfp_mask);
95 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
96 unsigned long index, int gfp_mask);
97 extern void remove_from_page_cache(struct page *page);
98 extern void __remove_from_page_cache(struct page *page);
100 extern atomic_t nr_pagecache;
104 #define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2)
105 DECLARE_PER_CPU(long, nr_pagecache_local);
108 * pagecache_acct implements approximate accounting for pagecache.
109 * vm_enough_memory() do not need high accuracy. Writers will keep
110 * an offset in their per-cpu arena and will spill that into the
111 * global count whenever the absolute value of the local count
112 * exceeds the counter's threshold.
114 * MUST be protected from preemption.
115 * current protection is mapping->page_lock.
117 static inline void pagecache_acct(int count)
121 local = &__get_cpu_var(nr_pagecache_local);
123 if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) {
124 atomic_add(*local, &nr_pagecache);
131 static inline void pagecache_acct(int count)
133 atomic_add(count, &nr_pagecache);
137 static inline unsigned long get_page_cache_size(void)
139 return atomic_read(&nr_pagecache);
142 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
143 unsigned long address)
145 pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
146 pgoff += vma->vm_pgoff;
147 return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
150 extern void FASTCALL(__lock_page(struct page *page));
151 extern void FASTCALL(unlock_page(struct page *page));
153 static inline void lock_page(struct page *page)
155 if (TestSetPageLocked(page))
160 * This is exported only for wait_on_page_locked/wait_on_page_writeback.
161 * Never use this directly!
163 extern void FASTCALL(wait_on_page_bit(struct page *page, int bit_nr));
166 * Wait for a page to be unlocked.
168 * This must be called with the caller "holding" the page,
169 * ie with increased "page->count" so that the page won't
170 * go away during the wait..
172 static inline void wait_on_page_locked(struct page *page)
174 if (PageLocked(page))
175 wait_on_page_bit(page, PG_locked);
179 * Wait for a page to complete writeback
181 static inline void wait_on_page_writeback(struct page *page)
183 if (PageWriteback(page))
184 wait_on_page_bit(page, PG_writeback);
187 extern void end_page_writeback(struct page *page);
190 * Fault a userspace page into pagetables. Return non-zero on a fault.
192 * This assumes that two userspace pages are always sufficient. That's
193 * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
195 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
200 * Writing zeroes into userspace here is OK, because we know that if
201 * the zero gets there, we'll be overwriting it.
203 ret = __put_user(0, uaddr);
205 char __user *end = uaddr + size - 1;
208 * If the page was already mapped, this will get a cache miss
209 * for sure, so try to avoid doing it.
211 if (((unsigned long)uaddr & PAGE_MASK) !=
212 ((unsigned long)end & PAGE_MASK))
213 ret = __put_user(0, end);
218 static inline void fault_in_pages_readable(const char __user *uaddr, int size)
223 ret = __get_user(c, (char *)uaddr);
225 const char __user *end = uaddr + size - 1;
227 if (((unsigned long)uaddr & PAGE_MASK) !=
228 ((unsigned long)end & PAGE_MASK))
229 __get_user(c, (char *)end);
233 #endif /* _LINUX_PAGEMAP_H */