X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Ffilemap.c;h=96f354e183ce2f1e241daad9e7999f7ada71e0f8;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=a3aae8bec9ae552d36aacb4bc097c6d84f4b328e;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/mm/filemap.c b/mm/filemap.c index a3aae8bec..96f354e18 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -9,11 +9,11 @@ * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ +#include #include #include #include #include -#include #include #include #include @@ -38,6 +38,7 @@ */ #include /* for generic_osync_inode */ +#include #include static ssize_t @@ -119,7 +120,7 @@ void __remove_from_page_cache(struct page *page) radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; - __dec_zone_page_state(page, NR_FILE_PAGES); + pagecache_acct(-1); } void remove_from_page_cache(struct page *page) @@ -170,17 +171,15 @@ static int sync_page(void *word) } /** - * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range + * filemap_fdatawrite_range - start writeback against all of a mapping's + * dirty pages that lie within the byte offsets * @mapping: address space structure to write * @start: offset in bytes where the range starts * @end: offset in bytes where the range ends (inclusive) * @sync_mode: enable synchronous operation * - * Start writeback against all of a mapping's dirty pages that lie - * within the byte offsets inclusive. - * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as - * opposed to a regular memory cleansing writeback. The difference between + * opposed to a regular memory * cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. */ @@ -191,8 +190,8 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = mapping->nrpages * 2, - .range_start = start, - .range_end = end, + .start = start, + .end = end, }; if (!mapping_cap_writeback_dirty(mapping)) @@ -205,7 +204,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { - return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); + return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) @@ -220,10 +219,7 @@ static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } -/** - * filemap_flush - mostly a non-blocking flush - * @mapping: target address_space - * +/* * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. */ @@ -233,12 +229,7 @@ int filemap_flush(struct address_space *mapping) } EXPORT_SYMBOL(filemap_flush); -/** - * wait_on_page_writeback_range - wait for writeback to complete - * @mapping: target address_space - * @start: beginning page index - * @end: ending page index - * +/* * Wait for writeback to complete against pages indexed by start->end * inclusive */ @@ -285,13 +276,7 @@ int wait_on_page_writeback_range(struct address_space *mapping, return ret; } -/** - * sync_page_range - write and wait on all pages in the passed range - * @inode: target inode - * @mapping: target address_space - * @pos: beginning offset in pages to write - * @count: number of bytes to write - * +/* * Write and wait upon all the pages in the passed range. This is a "data * integrity" operation. It waits upon in-flight writeout before starting and * waiting upon new writeout. If there was an IO error, return it. @@ -320,13 +305,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, } EXPORT_SYMBOL(sync_page_range); -/** - * sync_page_range_nolock - * @inode: target inode - * @mapping: target address_space - * @pos: beginning offset in pages to write - * @count: number of bytes to write - * +/* * Note: Holding i_mutex across sync_page_range_nolock is not a good idea * as it forces O_SYNC writers to different parts of the same file * to be serialised right until io completion. @@ -350,11 +329,10 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, EXPORT_SYMBOL(sync_page_range_nolock); /** - * filemap_fdatawait - wait for all under-writeback pages to complete - * @mapping: address space structure to wait for + * filemap_fdatawait - walk the list of under-writeback pages of the given + * address space and wait for all of them. * - * Walk the list of under-writeback pages of the given address space - * and wait for all of them. + * @mapping: address space structure to wait for */ int filemap_fdatawait(struct address_space *mapping) { @@ -390,12 +368,7 @@ int filemap_write_and_wait(struct address_space *mapping) } EXPORT_SYMBOL(filemap_write_and_wait); -/** - * filemap_write_and_wait_range - write out & wait on a file range - * @mapping: the address_space for the pages - * @lstart: offset in bytes where the range starts - * @lend: offset in bytes where the range ends (inclusive) - * +/* * Write out and wait upon file offsets lstart->lend, inclusive. * * Note that `lend' is inclusive (describes the last byte to be written) so @@ -421,14 +394,8 @@ int filemap_write_and_wait_range(struct address_space *mapping, return err; } -/** - * add_to_page_cache - add newly allocated pagecache pages - * @page: page to add - * @mapping: the page's address_space - * @offset: page index - * @gfp_mask: page allocation mode - * - * This function is used to add newly allocated pagecache pages; +/* + * This function is used to add newly allocated pagecache pages: * the page is new, so we can just run SetPageLocked() against it. * The other page state flags were set by rmqueue(). * @@ -448,13 +415,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, page->mapping = mapping; page->index = offset; mapping->nrpages++; - __inc_zone_page_state(page, NR_FILE_PAGES); + pagecache_acct(1); } write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); } return error; } + EXPORT_SYMBOL(add_to_page_cache); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, @@ -520,20 +488,9 @@ void fastcall wait_on_page_bit(struct page *page, int bit_nr) } EXPORT_SYMBOL(wait_on_page_bit); -void install_page_waitqueue_monitor(struct page *page, wait_queue_t *monitor) -{ - wait_queue_head_t *q = page_waitqueue(page); - unsigned long flags; - - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, monitor); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL_GPL(install_page_waitqueue_monitor); - /** - * unlock_page - unlock a locked page + * unlock_page() - unlock a locked page + * * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). @@ -556,9 +513,8 @@ void fastcall unlock_page(struct page *page) } EXPORT_SYMBOL(unlock_page); -/** - * end_page_writeback - end writeback against a page - * @page: the page +/* + * End writeback against a page. */ void end_page_writeback(struct page *page) { @@ -571,11 +527,10 @@ void end_page_writeback(struct page *page) } EXPORT_SYMBOL(end_page_writeback); -/** - * __lock_page - get a lock on the page, assuming we need to sleep to get it - * @page: the page to lock +/* + * Get a lock on the page, assuming we need to sleep to get it. * - * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some + * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some * random driver's requestfn sets TASK_RUNNING, we could busywait. However * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. @@ -590,28 +545,7 @@ void fastcall __lock_page(struct page *page) EXPORT_SYMBOL(__lock_page); /* - * Note completion of filesystem specific page synchronisation - * - * This is used to allow a page to be written to a filesystem cache in the - * background without holding up the completion of readpage - */ -void fastcall end_page_fs_misc(struct page *page) -{ - smp_mb__before_clear_bit(); - if (!TestClearPageFsMisc(page)) - BUG(); - smp_mb__after_clear_bit(); - __wake_up_bit(page_waitqueue(page), &page->flags, PG_fs_misc); -} - -EXPORT_SYMBOL(end_page_fs_misc); - -/** - * find_get_page - find and get a page reference - * @mapping: the address_space to search - * @offset: the page index - * - * A rather lightweight function, finding and getting a reference to a + * a rather lightweight function, finding and getting a reference to a * hashed page atomically. */ struct page * find_get_page(struct address_space *mapping, unsigned long offset) @@ -625,14 +559,11 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset) read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_get_page); -/** - * find_trylock_page - find and lock a page - * @mapping: the address_space to search - * @offset: the page index - * - * Same as find_get_page(), but trylock it instead of incrementing the count. +/* + * Same as above, but trylock it instead of incrementing the count. */ struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) { @@ -645,10 +576,12 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_trylock_page); /** * find_lock_page - locate, pin and lock a pagecache page + * * @mapping: the address_space to search * @offset: the page index * @@ -684,10 +617,12 @@ repeat: read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_lock_page); /** * find_or_create_page - locate or add a pagecache page + * * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode @@ -728,6 +663,7 @@ repeat: page_cache_release(cached_page); return page; } + EXPORT_SYMBOL(find_or_create_page); /** @@ -793,16 +729,9 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, return i; } -/** - * find_get_pages_tag - find and return pages that match @tag - * @mapping: the address_space to search - * @index: the starting page index - * @tag: the tag index - * @nr_pages: the maximum number of pages - * @pages: where the resulting pages are placed - * +/* * Like find_get_pages, except we only return pages which are tagged with - * @tag. We update @index to index the next page for the traversal. + * `tag'. We update *index to index the next page for the traversal. */ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) @@ -821,11 +750,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, return ret; } -/** - * grab_cache_page_nowait - returns locked page at given index in given cache - * @mapping: target address_space - * @index: the page index - * +/* * Same as grab_cache_page, but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should @@ -854,49 +779,19 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index) } return page; } + EXPORT_SYMBOL(grab_cache_page_nowait); /* - * CD/DVDs are error prone. When a medium error occurs, the driver may fail - * a _large_ part of the i/o request. Imagine the worst scenario: - * - * ---R__________________________________________B__________ - * ^ reading here ^ bad block(assume 4k) - * - * read(R) => miss => readahead(R...B) => media error => frustrating retries - * => failing the whole request => read(R) => read(R+1) => - * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => - * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => - * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... - * - * It is going insane. Fix it by quickly scaling down the readahead size. - */ -static void shrink_readahead_size_eio(struct file *filp, - struct file_ra_state *ra) -{ - if (!ra->ra_pages) - return; - - ra->ra_pages /= 4; -} - -/** - * do_generic_mapping_read - generic file read routine - * @mapping: address_space to be read - * @_ra: file's readahead state - * @filp: the file to read - * @ppos: current file position - * @desc: read_descriptor - * @actor: read method - * * This is a generic file read routine, and uses the - * mapping->a_ops->readpage() function for the actual low-level stuff. + * mapping->a_ops->readpage() function for the actual low-level + * stuff. * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. * - * Note the struct file* is only passed for the use of readpage. - * It may be NULL. + * Note the struct file* is only passed for the use of readpage. It may be + * NULL. */ void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state *_ra, @@ -1048,7 +943,6 @@ readpage: } unlock_page(page); error = -EIO; - shrink_readahead_size_eio(filp, &ra); goto readpage_error; } unlock_page(page); @@ -1121,6 +1015,7 @@ out: if (filp) file_accessed(filp); } + EXPORT_SYMBOL(do_generic_mapping_read); int file_read_actor(read_descriptor_t *desc, struct page *page, @@ -1161,13 +1056,7 @@ success: return size; } -/** - * __generic_file_aio_read - generic filesystem read routine - * @iocb: kernel I/O control block - * @iov: io vector request - * @nr_segs: number of segments in the iovec - * @ppos: current file position - * +/* * This is the "read()" routine for all filesystems * that can use the page cache directly. */ @@ -1246,6 +1135,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, out: return retval; } + EXPORT_SYMBOL(__generic_file_aio_read); ssize_t @@ -1256,6 +1146,7 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t BUG_ON(iocb->ki_pos != pos); return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); } + EXPORT_SYMBOL(generic_file_aio_read); ssize_t @@ -1271,6 +1162,7 @@ generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppo ret = wait_on_sync_kiocb(&kiocb); return ret; } + EXPORT_SYMBOL(generic_file_read); int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) @@ -1311,6 +1203,7 @@ ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, return desc.written; return desc.error; } + EXPORT_SYMBOL(generic_file_sendfile); static ssize_t @@ -1346,15 +1239,11 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) } #ifdef CONFIG_MMU -static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); -/** - * page_cache_read - adds requested page to the page cache if not already there - * @file: file to read - * @offset: page index - * +/* * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ +static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); static int fastcall page_cache_read(struct file * file, unsigned long offset) { struct address_space *mapping = file->f_mapping; @@ -1381,12 +1270,7 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset) #define MMAP_LOTSAMISS (100) -/** - * filemap_nopage - read in file data for page fault handling - * @area: the applicable vm_area - * @address: target address to read in - * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL - * +/* * filemap_nopage() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * @@ -1453,7 +1337,7 @@ retry_find: */ if (!did_readaround) { majmin = VM_FAULT_MAJOR; - count_vm_event(PGMAJFAULT); + inc_page_state(pgmajfault); } did_readaround = 1; ra_pages = max_sane_readahead(file->f_ra.ra_pages); @@ -1524,7 +1408,7 @@ no_cached_page: page_not_uptodate: if (!did_readaround) { majmin = VM_FAULT_MAJOR; - count_vm_event(PGMAJFAULT); + inc_page_state(pgmajfault); } lock_page(page); @@ -1586,10 +1470,10 @@ page_not_uptodate: * Things didn't work out. Return zero to tell the * mm layer so, possibly freeing the page cache page first. */ - shrink_readahead_size_eio(file, ra); page_cache_release(page); return NULL; } + EXPORT_SYMBOL(filemap_nopage); static struct page * filemap_getpage(struct file *file, unsigned long pgoff, @@ -1843,13 +1727,7 @@ repeat: return page; } -/** - * read_cache_page - read into page cache, fill it if needed - * @mapping: the page's address_space - * @index: the page index - * @filler: function to perform the read - * @data: destination for read data - * +/* * Read into the page cache. If a page already exists, * and PageUptodate() is not set, try to fill the page. */ @@ -1887,6 +1765,7 @@ retry: out: return page; } + EXPORT_SYMBOL(read_cache_page); /* @@ -1957,7 +1836,7 @@ int remove_suid(struct dentry *dentry) EXPORT_SYMBOL(remove_suid); size_t -__filemap_copy_from_user_iovec_inatomic(char *vaddr, +__filemap_copy_from_user_iovec(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; @@ -1967,14 +1846,18 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr, int copy = min(bytes, iov->iov_len - base); base = 0; - left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); + left = __copy_from_user_inatomic(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; - if (unlikely(left)) + if (unlikely(left)) { + /* zero the rest of the target like __copy_from_user */ + if (bytes) + memset(vaddr, 0, bytes); break; + } } return copied - left; } @@ -1982,7 +1865,7 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr, /* * Performs necessary checks before doing a write * - * Can adjust writing position or amount of bytes to write. + * Can adjust writing position aor amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ @@ -2106,7 +1989,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; - const struct address_space_operations *a_ops = mapping->a_ops; + struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; long status = 0; struct page *page; @@ -2258,98 +2141,12 @@ zero_length_segment: } EXPORT_SYMBOL(generic_file_buffered_write); -/* - * This writes the data from the source page to the specified page offset in - * the nominated file - * - the source page does not need to have any association with the file or the - * page offset - */ -int -generic_file_buffered_write_one_kernel_page(struct address_space *mapping, - pgoff_t index, - struct page *src) -{ - const struct address_space_operations *a_ops = mapping->a_ops; - struct pagevec lru_pvec; - struct page *page, *cached_page = NULL; - long status = 0; - - pagevec_init(&lru_pvec, 0); - -#if 0 - if (mapping->tree_lock.magic != RWLOCK_MAGIC) - printk("RWLOCK magic incorrect: %x != %x\n", - mapping->tree_lock.magic, RWLOCK_MAGIC); -#endif - - page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec); - if (!page) { - BUG_ON(cached_page); - return -ENOMEM; - } - - status = a_ops->prepare_write(NULL, page, 0, PAGE_CACHE_SIZE); - if (unlikely(status)) { - loff_t isize = i_size_read(mapping->host); - - if (status != AOP_TRUNCATED_PAGE) - unlock_page(page); - page_cache_release(page); - if (status == AOP_TRUNCATED_PAGE) - goto sync; - - /* prepare_write() may have instantiated a few blocks outside - * i_size. Trim these off again. - */ - if ((1ULL << (index + 1)) > isize) - vmtruncate(mapping->host, isize); - goto sync; - } - - copy_highpage(page, src); - flush_dcache_page(page); - - status = a_ops->commit_write(NULL, page, 0, PAGE_CACHE_SIZE); - if (status == AOP_TRUNCATED_PAGE) { - page_cache_release(page); - goto sync; - } - - if (status > 0) - status = 0; - - unlock_page(page); - mark_page_accessed(page); - page_cache_release(page); - if (status < 0) - return status; - - balance_dirty_pages_ratelimited(mapping); - cond_resched(); - -sync: - if (cached_page) - page_cache_release(cached_page); - - /* the caller must handle O_SYNC themselves, but we handle S_SYNC and - * MS_SYNCHRONOUS here */ - if (unlikely(IS_SYNC(mapping->host)) && !a_ops->writepage) - status = generic_osync_inode(mapping->host, mapping, - OSYNC_METADATA | OSYNC_DATA); - - /* the caller must handle O_DIRECT for themselves */ - - pagevec_lru_add(&lru_pvec); - return status; -} -EXPORT_SYMBOL(generic_file_buffered_write_one_kernel_page); - static ssize_t __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; - const struct address_space * mapping = file->f_mapping; + struct address_space * mapping = file->f_mapping; size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host;