X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Ffilemap.c;fp=mm%2Ffilemap.c;h=44da3d47699485004994c6af302c7406d606c569;hb=64ba3f394c830ec48a1c31b53dcae312c56f1604;hp=a3aae8bec9ae552d36aacb4bc097c6d84f4b328e;hpb=be1e6109ac94a859551f8e1774eb9a8469fe055c;p=linux-2.6.git diff --git a/mm/filemap.c b/mm/filemap.c index a3aae8bec..44da3d476 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -9,11 +9,11 @@ * most "normal" filesystems (but you don't /have/ to use this: * the NFS filesystem used to do this differently, for example) */ +#include #include #include #include #include -#include #include #include #include @@ -29,15 +29,13 @@ #include #include #include -#include #include "filemap.h" -#include "internal.h" - /* * FIXME: remove all knowledge of the buffer layer from the core VM */ #include /* for generic_osync_inode */ +#include #include static ssize_t @@ -119,7 +117,7 @@ void __remove_from_page_cache(struct page *page) radix_tree_delete(&mapping->page_tree, page->index); page->mapping = NULL; mapping->nrpages--; - __dec_zone_page_state(page, NR_FILE_PAGES); + pagecache_acct(-1); } void remove_from_page_cache(struct page *page) @@ -170,29 +168,27 @@ static int sync_page(void *word) } /** - * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range + * filemap_fdatawrite_range - start writeback against all of a mapping's + * dirty pages that lie within the byte offsets * @mapping: address space structure to write * @start: offset in bytes where the range starts - * @end: offset in bytes where the range ends (inclusive) + * @end: offset in bytes where the range ends * @sync_mode: enable synchronous operation * - * Start writeback against all of a mapping's dirty pages that lie - * within the byte offsets inclusive. - * * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as - * opposed to a regular memory cleansing writeback. The difference between + * opposed to a regular memory * cleansing writeback. The difference between * these two operations is that if a dirty page/buffer is encountered, it must * be waited upon, and not just skipped over. */ -int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, - loff_t end, int sync_mode) +static int __filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end, int sync_mode) { int ret; struct writeback_control wbc = { .sync_mode = sync_mode, .nr_to_write = mapping->nrpages * 2, - .range_start = start, - .range_end = end, + .start = start, + .end = end, }; if (!mapping_cap_writeback_dirty(mapping)) @@ -205,7 +201,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, static inline int __filemap_fdatawrite(struct address_space *mapping, int sync_mode) { - return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); + return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); } int filemap_fdatawrite(struct address_space *mapping) @@ -214,16 +210,13 @@ int filemap_fdatawrite(struct address_space *mapping) } EXPORT_SYMBOL(filemap_fdatawrite); -static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, - loff_t end) +static int filemap_fdatawrite_range(struct address_space *mapping, + loff_t start, loff_t end) { return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); } -/** - * filemap_flush - mostly a non-blocking flush - * @mapping: target address_space - * +/* * This is a mostly non-blocking flush. Not suitable for data-integrity * purposes - I/O may not be started against all dirty pages. */ @@ -233,16 +226,11 @@ int filemap_flush(struct address_space *mapping) } EXPORT_SYMBOL(filemap_flush); -/** - * wait_on_page_writeback_range - wait for writeback to complete - * @mapping: target address_space - * @start: beginning page index - * @end: ending page index - * +/* * Wait for writeback to complete against pages indexed by start->end * inclusive */ -int wait_on_page_writeback_range(struct address_space *mapping, +static int wait_on_page_writeback_range(struct address_space *mapping, pgoff_t start, pgoff_t end) { struct pagevec pvec; @@ -285,13 +273,7 @@ int wait_on_page_writeback_range(struct address_space *mapping, return ret; } -/** - * sync_page_range - write and wait on all pages in the passed range - * @inode: target inode - * @mapping: target address_space - * @pos: beginning offset in pages to write - * @count: number of bytes to write - * +/* * Write and wait upon all the pages in the passed range. This is a "data * integrity" operation. It waits upon in-flight writeout before starting and * waiting upon new writeout. If there was an IO error, return it. @@ -320,13 +302,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping, } EXPORT_SYMBOL(sync_page_range); -/** - * sync_page_range_nolock - * @inode: target inode - * @mapping: target address_space - * @pos: beginning offset in pages to write - * @count: number of bytes to write - * +/* * Note: Holding i_mutex across sync_page_range_nolock is not a good idea * as it forces O_SYNC writers to different parts of the same file * to be serialised right until io completion. @@ -350,11 +326,10 @@ int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, EXPORT_SYMBOL(sync_page_range_nolock); /** - * filemap_fdatawait - wait for all under-writeback pages to complete - * @mapping: address space structure to wait for + * filemap_fdatawait - walk the list of under-writeback pages of the given + * address space and wait for all of them. * - * Walk the list of under-writeback pages of the given address space - * and wait for all of them. + * @mapping: address space structure to wait for */ int filemap_fdatawait(struct address_space *mapping) { @@ -390,17 +365,6 @@ int filemap_write_and_wait(struct address_space *mapping) } EXPORT_SYMBOL(filemap_write_and_wait); -/** - * filemap_write_and_wait_range - write out & wait on a file range - * @mapping: the address_space for the pages - * @lstart: offset in bytes where the range starts - * @lend: offset in bytes where the range ends (inclusive) - * - * Write out and wait upon file offsets lstart->lend, inclusive. - * - * Note that `lend' is inclusive (describes the last byte to be written) so - * that this function can be used to write to the very end-of-file (end = -1). - */ int filemap_write_and_wait_range(struct address_space *mapping, loff_t lstart, loff_t lend) { @@ -421,14 +385,8 @@ int filemap_write_and_wait_range(struct address_space *mapping, return err; } -/** - * add_to_page_cache - add newly allocated pagecache pages - * @page: page to add - * @mapping: the page's address_space - * @offset: page index - * @gfp_mask: page allocation mode - * - * This function is used to add newly allocated pagecache pages; +/* + * This function is used to add newly allocated pagecache pages: * the page is new, so we can just run SetPageLocked() against it. * The other page state flags were set by rmqueue(). * @@ -448,13 +406,14 @@ int add_to_page_cache(struct page *page, struct address_space *mapping, page->mapping = mapping; page->index = offset; mapping->nrpages++; - __inc_zone_page_state(page, NR_FILE_PAGES); + pagecache_acct(1); } write_unlock_irq(&mapping->tree_lock); radix_tree_preload_end(); } return error; } + EXPORT_SYMBOL(add_to_page_cache); int add_to_page_cache_lru(struct page *page, struct address_space *mapping, @@ -466,28 +425,6 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, return ret; } -#ifdef CONFIG_NUMA -struct page *page_cache_alloc(struct address_space *x) -{ - if (cpuset_do_page_mem_spread()) { - int n = cpuset_mem_spread_node(); - return alloc_pages_node(n, mapping_gfp_mask(x), 0); - } - return alloc_pages(mapping_gfp_mask(x), 0); -} -EXPORT_SYMBOL(page_cache_alloc); - -struct page *page_cache_alloc_cold(struct address_space *x) -{ - if (cpuset_do_page_mem_spread()) { - int n = cpuset_mem_spread_node(); - return alloc_pages_node(n, mapping_gfp_mask(x)|__GFP_COLD, 0); - } - return alloc_pages(mapping_gfp_mask(x)|__GFP_COLD, 0); -} -EXPORT_SYMBOL(page_cache_alloc_cold); -#endif - /* * In order to wait for pages to become available there must be * waitqueues associated with pages. By using a hash table of @@ -520,20 +457,9 @@ void fastcall wait_on_page_bit(struct page *page, int bit_nr) } EXPORT_SYMBOL(wait_on_page_bit); -void install_page_waitqueue_monitor(struct page *page, wait_queue_t *monitor) -{ - wait_queue_head_t *q = page_waitqueue(page); - unsigned long flags; - - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue(q, monitor); - spin_unlock_irqrestore(&q->lock, flags); -} - -EXPORT_SYMBOL_GPL(install_page_waitqueue_monitor); - /** - * unlock_page - unlock a locked page + * unlock_page() - unlock a locked page + * * @page: the page * * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). @@ -556,9 +482,8 @@ void fastcall unlock_page(struct page *page) } EXPORT_SYMBOL(unlock_page); -/** - * end_page_writeback - end writeback against a page - * @page: the page +/* + * End writeback against a page. */ void end_page_writeback(struct page *page) { @@ -571,11 +496,10 @@ void end_page_writeback(struct page *page) } EXPORT_SYMBOL(end_page_writeback); -/** - * __lock_page - get a lock on the page, assuming we need to sleep to get it - * @page: the page to lock +/* + * Get a lock on the page, assuming we need to sleep to get it. * - * Ugly. Running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some + * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some * random driver's requestfn sets TASK_RUNNING, we could busywait. However * chances are that on the second loop, the block layer's plug list is empty, * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. @@ -590,28 +514,7 @@ void fastcall __lock_page(struct page *page) EXPORT_SYMBOL(__lock_page); /* - * Note completion of filesystem specific page synchronisation - * - * This is used to allow a page to be written to a filesystem cache in the - * background without holding up the completion of readpage - */ -void fastcall end_page_fs_misc(struct page *page) -{ - smp_mb__before_clear_bit(); - if (!TestClearPageFsMisc(page)) - BUG(); - smp_mb__after_clear_bit(); - __wake_up_bit(page_waitqueue(page), &page->flags, PG_fs_misc); -} - -EXPORT_SYMBOL(end_page_fs_misc); - -/** - * find_get_page - find and get a page reference - * @mapping: the address_space to search - * @offset: the page index - * - * A rather lightweight function, finding and getting a reference to a + * a rather lightweight function, finding and getting a reference to a * hashed page atomically. */ struct page * find_get_page(struct address_space *mapping, unsigned long offset) @@ -625,14 +528,11 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset) read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_get_page); -/** - * find_trylock_page - find and lock a page - * @mapping: the address_space to search - * @offset: the page index - * - * Same as find_get_page(), but trylock it instead of incrementing the count. +/* + * Same as above, but trylock it instead of incrementing the count. */ struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) { @@ -645,10 +545,12 @@ struct page *find_trylock_page(struct address_space *mapping, unsigned long offs read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_trylock_page); /** * find_lock_page - locate, pin and lock a pagecache page + * * @mapping: the address_space to search * @offset: the page index * @@ -684,10 +586,12 @@ repeat: read_unlock_irq(&mapping->tree_lock); return page; } + EXPORT_SYMBOL(find_lock_page); /** * find_or_create_page - locate or add a pagecache page + * * @mapping: the page's address_space * @index: the page's index into the mapping * @gfp_mask: page allocation mode @@ -728,6 +632,7 @@ repeat: page_cache_release(cached_page); return page; } + EXPORT_SYMBOL(find_or_create_page); /** @@ -761,48 +666,9 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start, return ret; } -/** - * find_get_pages_contig - gang contiguous pagecache lookup - * @mapping: The address_space to search - * @index: The starting page index - * @nr_pages: The maximum number of pages - * @pages: Where the resulting pages are placed - * - * find_get_pages_contig() works exactly like find_get_pages(), except - * that the returned number of pages are guaranteed to be contiguous. - * - * find_get_pages_contig() returns the number of pages which were found. - */ -unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, - unsigned int nr_pages, struct page **pages) -{ - unsigned int i; - unsigned int ret; - - read_lock_irq(&mapping->tree_lock); - ret = radix_tree_gang_lookup(&mapping->page_tree, - (void **)pages, index, nr_pages); - for (i = 0; i < ret; i++) { - if (pages[i]->mapping == NULL || pages[i]->index != index) - break; - - page_cache_get(pages[i]); - index++; - } - read_unlock_irq(&mapping->tree_lock); - return i; -} - -/** - * find_get_pages_tag - find and return pages that match @tag - * @mapping: the address_space to search - * @index: the starting page index - * @tag: the tag index - * @nr_pages: the maximum number of pages - * @pages: where the resulting pages are placed - * +/* * Like find_get_pages, except we only return pages which are tagged with - * @tag. We update @index to index the next page for the traversal. + * `tag'. We update *index to index the next page for the traversal. */ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, int tag, unsigned int nr_pages, struct page **pages) @@ -821,11 +687,7 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, return ret; } -/** - * grab_cache_page_nowait - returns locked page at given index in given cache - * @mapping: target address_space - * @index: the page index - * +/* * Same as grab_cache_page, but do not wait if the page is unavailable. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should @@ -854,57 +716,26 @@ grab_cache_page_nowait(struct address_space *mapping, unsigned long index) } return page; } + EXPORT_SYMBOL(grab_cache_page_nowait); /* - * CD/DVDs are error prone. When a medium error occurs, the driver may fail - * a _large_ part of the i/o request. Imagine the worst scenario: - * - * ---R__________________________________________B__________ - * ^ reading here ^ bad block(assume 4k) - * - * read(R) => miss => readahead(R...B) => media error => frustrating retries - * => failing the whole request => read(R) => read(R+1) => - * readahead(R+1...B+1) => bang => read(R+2) => read(R+3) => - * readahead(R+3...B+2) => bang => read(R+3) => read(R+4) => - * readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ...... - * - * It is going insane. Fix it by quickly scaling down the readahead size. - */ -static void shrink_readahead_size_eio(struct file *filp, - struct file_ra_state *ra) -{ - if (!ra->ra_pages) - return; - - ra->ra_pages /= 4; -} - -/** - * do_generic_mapping_read - generic file read routine - * @mapping: address_space to be read - * @_ra: file's readahead state - * @filp: the file to read - * @ppos: current file position - * @desc: read_descriptor - * @actor: read method - * * This is a generic file read routine, and uses the - * mapping->a_ops->readpage() function for the actual low-level stuff. + * mapping->a_ops->readpage() function for the actual low-level + * stuff. * * This is really ugly. But the goto's actually try to clarify some * of the logic when it comes to error handling etc. * - * Note the struct file* is only passed for the use of readpage. - * It may be NULL. + * Note the struct file* is only passed for the use of readpage. It may be + * NULL. */ void do_generic_mapping_read(struct address_space *mapping, struct file_ra_state *_ra, struct file *filp, loff_t *ppos, read_descriptor_t *desc, - read_actor_t actor, - int nonblock) + read_actor_t actor) { struct inode *inode = mapping->host; unsigned long index; @@ -954,21 +785,11 @@ void do_generic_mapping_read(struct address_space *mapping, find_page: page = find_get_page(mapping, index); if (unlikely(page == NULL)) { - if (nonblock) { - desc->error = -EWOULDBLOCKIO; - break; - } handle_ra_miss(mapping, &ra, index); goto no_cached_page; } - if (!PageUptodate(page)) { - if (nonblock) { - page_cache_release(page); - desc->error = -EWOULDBLOCKIO; - break; - } + if (!PageUptodate(page)) goto page_not_up_to_date; - } page_ok: /* If users can be writing to this page using arbitrary @@ -1048,7 +869,6 @@ readpage: } unlock_page(page); error = -EIO; - shrink_readahead_size_eio(filp, &ra); goto readpage_error; } unlock_page(page); @@ -1121,6 +941,7 @@ out: if (filp) file_accessed(filp); } + EXPORT_SYMBOL(do_generic_mapping_read); int file_read_actor(read_descriptor_t *desc, struct page *page, @@ -1161,13 +982,7 @@ success: return size; } -/** - * __generic_file_aio_read - generic filesystem read routine - * @iocb: kernel I/O control block - * @iov: io vector request - * @nr_segs: number of segments in the iovec - * @ppos: current file position - * +/* * This is the "read()" routine for all filesystems * that can use the page cache directly. */ @@ -1235,7 +1050,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, if (desc.count == 0) continue; desc.error = 0; - do_generic_file_read(filp,ppos,&desc,file_read_actor,0); + do_generic_file_read(filp,ppos,&desc,file_read_actor); retval += desc.written; if (desc.error) { retval = retval ?: desc.error; @@ -1246,6 +1061,7 @@ __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, out: return retval; } + EXPORT_SYMBOL(__generic_file_aio_read); ssize_t @@ -1256,6 +1072,7 @@ generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t BUG_ON(iocb->ki_pos != pos); return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); } + EXPORT_SYMBOL(generic_file_aio_read); ssize_t @@ -1271,6 +1088,7 @@ generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppo ret = wait_on_sync_kiocb(&kiocb); return ret; } + EXPORT_SYMBOL(generic_file_read); int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) @@ -1306,11 +1124,12 @@ ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, desc.arg.data = target; desc.error = 0; - do_generic_file_read(in_file, ppos, &desc, actor, 0); + do_generic_file_read(in_file, ppos, &desc, actor); if (desc.written) return desc.written; return desc.error; } + EXPORT_SYMBOL(generic_file_sendfile); static ssize_t @@ -1346,15 +1165,11 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) } #ifdef CONFIG_MMU -static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); -/** - * page_cache_read - adds requested page to the page cache if not already there - * @file: file to read - * @offset: page index - * +/* * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ +static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); static int fastcall page_cache_read(struct file * file, unsigned long offset) { struct address_space *mapping = file->f_mapping; @@ -1381,12 +1196,7 @@ static int fastcall page_cache_read(struct file * file, unsigned long offset) #define MMAP_LOTSAMISS (100) -/** - * filemap_nopage - read in file data for page fault handling - * @area: the applicable vm_area - * @address: target address to read in - * @type: returned with VM_FAULT_{MINOR,MAJOR} if not %NULL - * +/* * filemap_nopage() is invoked via the vma operations vector for a * mapped memory region to read in file data during a page fault. * @@ -1453,7 +1263,7 @@ retry_find: */ if (!did_readaround) { majmin = VM_FAULT_MAJOR; - count_vm_event(PGMAJFAULT); + inc_page_state(pgmajfault); } did_readaround = 1; ra_pages = max_sane_readahead(file->f_ra.ra_pages); @@ -1524,7 +1334,7 @@ no_cached_page: page_not_uptodate: if (!did_readaround) { majmin = VM_FAULT_MAJOR; - count_vm_event(PGMAJFAULT); + inc_page_state(pgmajfault); } lock_page(page); @@ -1586,10 +1396,10 @@ page_not_uptodate: * Things didn't work out. Return zero to tell the * mm layer so, possibly freeing the page cache page first. */ - shrink_readahead_size_eio(file, ra); page_cache_release(page); return NULL; } + EXPORT_SYMBOL(filemap_nopage); static struct page * filemap_getpage(struct file *file, unsigned long pgoff, @@ -1843,13 +1653,7 @@ repeat: return page; } -/** - * read_cache_page - read into page cache, fill it if needed - * @mapping: the page's address_space - * @index: the page index - * @filler: function to perform the read - * @data: destination for read data - * +/* * Read into the page cache. If a page already exists, * and PageUptodate() is not set, try to fill the page. */ @@ -1887,6 +1691,7 @@ retry: out: return page; } + EXPORT_SYMBOL(read_cache_page); /* @@ -1957,7 +1762,7 @@ int remove_suid(struct dentry *dentry) EXPORT_SYMBOL(remove_suid); size_t -__filemap_copy_from_user_iovec_inatomic(char *vaddr, +__filemap_copy_from_user_iovec(char *vaddr, const struct iovec *iov, size_t base, size_t bytes) { size_t copied = 0, left = 0; @@ -1967,14 +1772,18 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr, int copy = min(bytes, iov->iov_len - base); base = 0; - left = __copy_from_user_inatomic_nocache(vaddr, buf, copy); + left = __copy_from_user_inatomic(vaddr, buf, copy); copied += copy; bytes -= copy; vaddr += copy; iov++; - if (unlikely(left)) + if (unlikely(left)) { + /* zero the rest of the target like __copy_from_user */ + if (bytes) + memset(vaddr, 0, bytes); break; + } } return copied - left; } @@ -1982,7 +1791,7 @@ __filemap_copy_from_user_iovec_inatomic(char *vaddr, /* * Performs necessary checks before doing a write * - * Can adjust writing position or amount of bytes to write. + * Can adjust writing position aor amount of bytes to write. * Returns appropriate error code that caller should return or * zero in case that write should be allowed. */ @@ -2106,7 +1915,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, { struct file *file = iocb->ki_filp; struct address_space * mapping = file->f_mapping; - const struct address_space_operations *a_ops = mapping->a_ops; + struct address_space_operations *a_ops = mapping->a_ops; struct inode *inode = mapping->host; long status = 0; struct page *page; @@ -2132,21 +1941,14 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, do { unsigned long index; unsigned long offset; + unsigned long maxlen; size_t copied; offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ index = pos >> PAGE_CACHE_SHIFT; bytes = PAGE_CACHE_SIZE - offset; - - /* Limit the size of the copy to the caller's write size */ - bytes = min(bytes, count); - - /* - * Limit the size of the copy to that of the current segment, - * because fault_in_pages_readable() doesn't know how to walk - * segments. - */ - bytes = min(bytes, cur_iov->iov_len - iov_base); + if (bytes > count) + bytes = count; /* * Bring in the user page that we will copy from _first_. @@ -2154,7 +1956,10 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, * same page as we're writing to, without it being marked * up-to-date. */ - fault_in_pages_readable(buf, bytes); + maxlen = cur_iov->iov_len - iov_base; + if (maxlen > bytes) + maxlen = bytes; + fault_in_pages_readable(buf, maxlen); page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); if (!page) { @@ -2162,12 +1967,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, break; } - if (unlikely(bytes == 0)) { - status = 0; - copied = 0; - goto zero_length_segment; - } - status = a_ops->prepare_write(file, page, offset, offset+bytes); if (unlikely(status)) { loff_t isize = i_size_read(inode); @@ -2197,8 +1996,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, page_cache_release(page); continue; } -zero_length_segment: - if (likely(copied >= 0)) { + if (likely(copied > 0)) { if (!status) status = copied; @@ -2258,98 +2056,12 @@ zero_length_segment: } EXPORT_SYMBOL(generic_file_buffered_write); -/* - * This writes the data from the source page to the specified page offset in - * the nominated file - * - the source page does not need to have any association with the file or the - * page offset - */ -int -generic_file_buffered_write_one_kernel_page(struct address_space *mapping, - pgoff_t index, - struct page *src) -{ - const struct address_space_operations *a_ops = mapping->a_ops; - struct pagevec lru_pvec; - struct page *page, *cached_page = NULL; - long status = 0; - - pagevec_init(&lru_pvec, 0); - -#if 0 - if (mapping->tree_lock.magic != RWLOCK_MAGIC) - printk("RWLOCK magic incorrect: %x != %x\n", - mapping->tree_lock.magic, RWLOCK_MAGIC); -#endif - - page = __grab_cache_page(mapping, index, &cached_page, &lru_pvec); - if (!page) { - BUG_ON(cached_page); - return -ENOMEM; - } - - status = a_ops->prepare_write(NULL, page, 0, PAGE_CACHE_SIZE); - if (unlikely(status)) { - loff_t isize = i_size_read(mapping->host); - - if (status != AOP_TRUNCATED_PAGE) - unlock_page(page); - page_cache_release(page); - if (status == AOP_TRUNCATED_PAGE) - goto sync; - - /* prepare_write() may have instantiated a few blocks outside - * i_size. Trim these off again. - */ - if ((1ULL << (index + 1)) > isize) - vmtruncate(mapping->host, isize); - goto sync; - } - - copy_highpage(page, src); - flush_dcache_page(page); - - status = a_ops->commit_write(NULL, page, 0, PAGE_CACHE_SIZE); - if (status == AOP_TRUNCATED_PAGE) { - page_cache_release(page); - goto sync; - } - - if (status > 0) - status = 0; - - unlock_page(page); - mark_page_accessed(page); - page_cache_release(page); - if (status < 0) - return status; - - balance_dirty_pages_ratelimited(mapping); - cond_resched(); - -sync: - if (cached_page) - page_cache_release(cached_page); - - /* the caller must handle O_SYNC themselves, but we handle S_SYNC and - * MS_SYNCHRONOUS here */ - if (unlikely(IS_SYNC(mapping->host)) && !a_ops->writepage) - status = generic_osync_inode(mapping->host, mapping, - OSYNC_METADATA | OSYNC_DATA); - - /* the caller must handle O_DIRECT for themselves */ - - pagevec_lru_add(&lru_pvec); - return status; -} -EXPORT_SYMBOL(generic_file_buffered_write_one_kernel_page); - static ssize_t __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t *ppos) { struct file *file = iocb->ki_filp; - const struct address_space * mapping = file->f_mapping; + struct address_space * mapping = file->f_mapping; size_t ocount; /* original count */ size_t count; /* after file limit checks */ struct inode *inode = mapping->host;