X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fntfs%2Ffile.c;h=36e1e136bb0c65bbdf21f0b8b19602c14964dfa1;hb=9464c7cf61b9433057924c36e6e02f303a00e768;hp=2e42c2dcae12310d123c13642809219c5cb51126;hpb=41689045f6a3cbe0550e1d34e9cc20d2e8c432ba;p=linux-2.6.git diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 2e42c2dca..36e1e136b 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -231,7 +231,8 @@ do_non_resident_extend: * Read the page. If the page is not present, this will zero * the uninitialized regions for us. */ - page = read_mapping_page(mapping, index, NULL); + page = read_cache_page(mapping, index, + (filler_t*)mapping->a_ops->readpage, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto init_err_out; @@ -1358,7 +1359,7 @@ err_out: goto out; } -static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, +static size_t __ntfs_copy_from_user_iovec(char *vaddr, const struct iovec *iov, size_t iov_ofs, size_t bytes) { size_t total = 0; @@ -1376,6 +1377,10 @@ static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr, bytes -= len; vaddr += len; if (unlikely(left)) { + /* + * Zero the rest of the target like __copy_from_user(). + */ + memset(vaddr, 0, bytes); total -= left; break; } @@ -1416,13 +1421,11 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp, * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s * single-segment behaviour. * - * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both - * when atomic and when not atomic. This is ok because - * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic() - * and it is ok to call this when non-atomic. - * Infact, the only difference between __copy_from_user_inatomic() and - * __copy_from_user() is that the latter calls might_sleep() and the former - * should not zero the tail of the buffer on error. And on many + * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and + * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls + * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In + * fact, the only difference between __copy_from_user_inatomic() and + * __copy_from_user() is that the latter calls might_sleep(). And on many * architectures __copy_from_user_inatomic() is just defined to * __copy_from_user() so it makes no difference at all on those architectures. */ @@ -1439,18 +1442,14 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages, if (len > bytes) len = bytes; kaddr = kmap_atomic(*pages, KM_USER0); - copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, + copied = __ntfs_copy_from_user_iovec(kaddr + ofs, *iov, *iov_ofs, len); kunmap_atomic(kaddr, KM_USER0); if (unlikely(copied != len)) { /* Do it the slow way. */ kaddr = kmap(*pages); - copied = __ntfs_copy_from_user_iovec_inatomic(kaddr + ofs, + copied = __ntfs_copy_from_user_iovec(kaddr + ofs, *iov, *iov_ofs, len); - /* - * Zero the rest of the target like __copy_from_user(). - */ - memset(kaddr + ofs + copied, 0, len - copied); kunmap(*pages); if (unlikely(copied != len)) goto err_out;