X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fpagemap.h;h=4defabecd4d1873b74b927d38356ae55f025cfc3;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=e5acf35963a2ff7414f36ae61bfb2fa7687382ff;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e5acf3596..4defabecd 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -8,6 +8,7 @@ #include #include #include +#include #include #include @@ -18,18 +19,19 @@ #define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */ #define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */ -static inline int mapping_gfp_mask(struct address_space * mapping) +static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { - return mapping->flags & __GFP_BITS_MASK; + return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; } /* * This is non-atomic. Only to be used before the mapping is activated. * Probably needs a barrier... */ -static inline void mapping_set_gfp_mask(struct address_space *m, int mask) +static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) { - m->flags = (m->flags & ~__GFP_BITS_MASK) | mask; + m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | + (__force unsigned long)mask; } /* @@ -49,14 +51,23 @@ static inline void mapping_set_gfp_mask(struct address_space *m, int mask) #define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, int cold); +#ifdef CONFIG_NUMA +extern struct page *__page_cache_alloc(gfp_t gfp); +#else +static inline struct page *__page_cache_alloc(gfp_t gfp) +{ + return alloc_pages(gfp, 0); +} +#endif + static inline struct page *page_cache_alloc(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x), 0); + return __page_cache_alloc(mapping_gfp_mask(x)); } static inline struct page *page_cache_alloc_cold(struct address_space *x) { - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); + return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); } typedef int filler_t(void *, struct page *); @@ -65,12 +76,14 @@ extern struct page * find_get_page(struct address_space *mapping, unsigned long index); extern struct page * find_lock_page(struct address_space *mapping, unsigned long index); -extern struct page * find_trylock_page(struct address_space *mapping, - unsigned long index); +extern __deprecated_for_modules struct page * find_trylock_page( + struct address_space *mapping, unsigned long index); extern struct page * find_or_create_page(struct address_space *mapping, - unsigned long index, unsigned int gfp_mask); + unsigned long index, gfp_t gfp_mask); unsigned find_get_pages(struct address_space *mapping, pgoff_t start, unsigned int nr_pages, struct page **pages); +unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, + unsigned int nr_pages, struct page **pages); unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages); @@ -90,53 +103,26 @@ extern struct page * read_cache_page(struct address_space *mapping, extern int read_cache_pages(struct address_space *mapping, struct list_head *pages, filler_t *filler, void *data); +static inline struct page *read_mapping_page(struct address_space *mapping, + unsigned long index, void *data) +{ + filler_t *filler = (filler_t *)mapping->a_ops->readpage; + return read_cache_page(mapping, index, filler, data); +} + int add_to_page_cache(struct page *page, struct address_space *mapping, - unsigned long index, int gfp_mask); + unsigned long index, gfp_t gfp_mask); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, - unsigned long index, int gfp_mask); + unsigned long index, gfp_t gfp_mask); extern void remove_from_page_cache(struct page *page); extern void __remove_from_page_cache(struct page *page); -extern atomic_t nr_pagecache; - -#ifdef CONFIG_SMP - -#define PAGECACHE_ACCT_THRESHOLD max(16, NR_CPUS * 2) -DECLARE_PER_CPU(long, nr_pagecache_local); - /* - * pagecache_acct implements approximate accounting for pagecache. - * vm_enough_memory() do not need high accuracy. Writers will keep - * an offset in their per-cpu arena and will spill that into the - * global count whenever the absolute value of the local count - * exceeds the counter's threshold. - * - * MUST be protected from preemption. - * current protection is mapping->page_lock. + * Return byte-offset into filesystem object for page. */ -static inline void pagecache_acct(int count) +static inline loff_t page_offset(struct page *page) { - long *local; - - local = &__get_cpu_var(nr_pagecache_local); - *local += count; - if (*local > PAGECACHE_ACCT_THRESHOLD || *local < -PAGECACHE_ACCT_THRESHOLD) { - atomic_add(*local, &nr_pagecache); - *local = 0; - } -} - -#else - -static inline void pagecache_acct(int count) -{ - atomic_add(count, &nr_pagecache); -} -#endif - -static inline unsigned long get_page_cache_size(void) -{ - return atomic_read(&nr_pagecache); + return ((loff_t)page->index) << PAGE_CACHE_SHIFT; } static inline pgoff_t linear_page_index(struct vm_area_struct *vma, @@ -148,13 +134,29 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, } extern void FASTCALL(__lock_page(struct page *page)); +extern void FASTCALL(__lock_page_nosync(struct page *page)); extern void FASTCALL(unlock_page(struct page *page)); +/* + * lock_page may only be called if we have the page's inode pinned. + */ static inline void lock_page(struct page *page) { + might_sleep(); if (TestSetPageLocked(page)) __lock_page(page); } + +/* + * lock_page_nosync should only be used if we can't pin the page's inode. + * Doesn't play quite so well with block device plugging. + */ +static inline void lock_page_nosync(struct page *page) +{ + might_sleep(); + if (TestSetPageLocked(page)) + __lock_page_nosync(page); +} /* * This is exported only for wait_on_page_locked/wait_on_page_writeback. @@ -186,6 +188,12 @@ static inline void wait_on_page_writeback(struct page *page) extern void end_page_writeback(struct page *page); +/* + * permit installation of a state change monitor in the queue for a page + */ +extern void install_page_waitqueue_monitor(struct page *page, + wait_queue_t *monitor); + /* * Fault a userspace page into pagetables. Return non-zero on a fault. * @@ -220,13 +228,13 @@ static inline void fault_in_pages_readable(const char __user *uaddr, int size) volatile char c; int ret; - ret = __get_user(c, (char *)uaddr); + ret = __get_user(c, uaddr); if (ret == 0) { const char __user *end = uaddr + size - 1; if (((unsigned long)uaddr & PAGE_MASK) != ((unsigned long)end & PAGE_MASK)) - __get_user(c, (char *)end); + __get_user(c, end); } }