X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=fs%2Fntfs%2Fcompress.c;h=d98daf59e0b64ac91bf3c950594ba6685e6ca737;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=fba39c9b738f543d0d4121696d12712e37a7f4cd;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index fba39c9b7..d98daf59e 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -24,7 +24,11 @@ #include #include #include +#include +#include "attrib.h" +#include "inode.h" +#include "debug.h" #include "ntfs.h" /** @@ -58,12 +62,12 @@ static u8 *ntfs_compression_buffer = NULL; /** * ntfs_cb_lock - spinlock which protects ntfs_compression_buffer */ -static spinlock_t ntfs_cb_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(ntfs_cb_lock); /** * allocate_compression_buffers - allocate the decompression buffers * - * Caller has to hold the ntfs_lock semaphore. + * Caller has to hold the ntfs_lock mutex. * * Return 0 on success or -ENOMEM if the allocations failed. */ @@ -80,7 +84,7 @@ int allocate_compression_buffers(void) /** * free_compression_buffers - free the decompression buffers * - * Caller has to hold the ntfs_lock semaphore. + * Caller has to hold the ntfs_lock mutex. */ void free_compression_buffers(void) { @@ -92,13 +96,14 @@ void free_compression_buffers(void) /** * zero_partial_compressed_page - zero out of bounds compressed page region */ -static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) +static void zero_partial_compressed_page(struct page *page, + const s64 initialized_size) { u8 *kp = page_address(page); unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= ni->initialized_size) { + if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { /* * FIXME: Using clear_page() will become wrong when we get * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. @@ -106,7 +111,7 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) clear_page(kp); return; } - kp_ofs = ni->initialized_size & ~PAGE_CACHE_MASK; + kp_ofs = initialized_size & ~PAGE_CACHE_MASK; memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); return; } @@ -114,12 +119,12 @@ static void zero_partial_compressed_page(ntfs_inode *ni, struct page *page) /** * handle_bounds_compressed_page - test for&handle out of bounds compressed page */ -static inline void handle_bounds_compressed_page(ntfs_inode *ni, - struct page *page) +static inline void handle_bounds_compressed_page(struct page *page, + const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (ni->initialized_size >> PAGE_CACHE_SHIFT)) && - (ni->initialized_size < VFS_I(ni)->i_size)) - zero_partial_compressed_page(ni, page); + if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + (initialized_size < i_size)) + zero_partial_compressed_page(page, initialized_size); return; } @@ -134,6 +139,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni, * @xpage_done: set to 1 if xpage was completed successfully (IN/OUT) * @cb_start: compression block to decompress (IN) * @cb_size: size of compression block @cb_start in bytes (IN) + * @i_size: file size when we started the read (IN) + * @initialized_size: initialized file size when we started the read (IN) * * The caller must have disabled preemption. ntfs_decompress() reenables it when * the critical section is finished. @@ -161,7 +168,8 @@ static inline void handle_bounds_compressed_page(ntfs_inode *ni, static int ntfs_decompress(struct page *dest_pages[], int *dest_index, int *dest_ofs, const int dest_max_index, const int dest_max_ofs, const int xpage, char *xpage_done, u8 *const cb_start, - const u32 cb_size) + const u32 cb_size, const loff_t i_size, + const s64 initialized_size) { /* * Pointers into the compressed data, i.e. the compression block (cb), @@ -195,11 +203,17 @@ static int ntfs_decompress(struct page *dest_pages[], int *dest_index, ntfs_debug("Entering, cb_size = 0x%x.", cb_size); do_next_sb: - ntfs_debug("Beginning sub-block at offset = 0x%x in the cb.", + ntfs_debug("Beginning sub-block at offset = 0x%zx in the cb.", cb - cb_start); - - /* Have we reached the end of the compression block? */ - if (cb == cb_end || !le16_to_cpup((u16*)cb)) { + /* + * Have we reached the end of the compression block or the end of the + * decompressed data? The latter can happen for example if the current + * position in the compression block is one byte before its end so the + * first two checks do not detect it. + */ + if (cb == cb_end || !le16_to_cpup((le16*)cb) || + (*dest_index == dest_max_index && + *dest_ofs == dest_max_ofs)) { int i; ntfs_debug("Completed. Returning success (0)."); @@ -209,9 +223,6 @@ return_error: spin_unlock(&ntfs_cb_lock); /* Second stage: finalize completed pages. */ if (nr_completed_pages > 0) { - struct page *page = dest_pages[completed_pages[0]]; - ntfs_inode *ni = NTFS_I(page->mapping->host); - for (i = 0; i < nr_completed_pages; i++) { int di = completed_pages[i]; @@ -220,7 +231,8 @@ return_error: * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, dp); + handle_bounds_compressed_page(dp, i_size, + initialized_size); flush_dcache_page(dp); kunmap(dp); SetPageUptodate(dp); @@ -249,7 +261,7 @@ return_error: /* Setup the current sub-block source pointers and validate range. */ cb_sb_start = cb; - cb_sb_end = cb_sb_start + (le16_to_cpup((u16*)cb) & NTFS_SB_SIZE_MASK) + cb_sb_end = cb_sb_start + (le16_to_cpup((le16*)cb) & NTFS_SB_SIZE_MASK) + 3; if (cb_sb_end > cb_end) goto return_overflow; @@ -271,7 +283,7 @@ return_error: dp_addr = (u8*)page_address(dp) + do_sb_start; /* Now, we are ready to process the current sub-block (sb). */ - if (!(le16_to_cpup((u16*)cb) & NTFS_SB_IS_COMPRESSED)) { + if (!(le16_to_cpup((le16*)cb) & NTFS_SB_IS_COMPRESSED)) { ntfs_debug("Found uncompressed sub-block."); /* This sb is not compressed, just copy it into destination. */ @@ -376,7 +388,7 @@ do_next_tag: lg++; /* Get the phrase token into i. */ - pt = le16_to_cpup((u16*)cb); + pt = le16_to_cpup((le16*)cb); /* * Calculate starting position of the byte sequence in @@ -427,7 +439,7 @@ do_next_tag: goto do_next_tag; return_overflow: - ntfs_error(NULL, "Failed. Returning -EOVERFLOW.\n"); + ntfs_error(NULL, "Failed. Returning -EOVERFLOW."); goto return_error; } @@ -468,12 +480,14 @@ return_overflow: */ int ntfs_read_compressed_block(struct page *page) { + loff_t i_size; + s64 initialized_size; struct address_space *mapping = page->mapping; ntfs_inode *ni = NTFS_I(mapping->host); ntfs_volume *vol = ni->vol; struct super_block *sb = vol->sb; - run_list_element *rl; - unsigned long block_size = sb->s_blocksize; + runlist_element *rl; + unsigned long flags, block_size = sb->s_blocksize; unsigned char block_size_bits = sb->s_blocksize_bits; u8 *cb, *cb_pos, *cb_end; struct buffer_head **bhs; @@ -525,7 +539,6 @@ int ntfs_read_compressed_block(struct page *page) if (unlikely(!pages || !bhs)) { kfree(bhs); kfree(pages); - SetPageError(page); unlock_page(page); ntfs_error(vol->sb, "Failed to allocate internal buffers."); return -ENOMEM; @@ -542,8 +555,12 @@ int ntfs_read_compressed_block(struct page *page) * The remaining pages need to be allocated and inserted into the page * cache, alignment guarantees keep all the below much simpler. (-8 */ - max_page = ((VFS_I(ni)->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT) - offset; + read_lock_irqsave(&ni->size_lock, flags); + i_size = i_size_read(VFS_I(ni)); + initialized_size = ni->initialized_size; + read_unlock_irqrestore(&ni->size_lock, flags); + max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + offset; if (nr_pages < max_page) max_page = nr_pages; for (i = 0; i < max_page; i++, offset++) { @@ -569,7 +586,7 @@ int ntfs_read_compressed_block(struct page *page) } /* - * We have the run list, and all the destination pages we need to fill. + * We have the runlist, and all the destination pages we need to fill. * Now read the first compression block. */ cur_page = 0; @@ -583,20 +600,20 @@ do_next_cb: rl = NULL; for (vcn = start_vcn, start_vcn += cb_clusters; vcn < start_vcn; vcn++) { - BOOL is_retry = FALSE; + bool is_retry = false; if (!rl) { lock_retry_remap: - down_read(&ni->run_list.lock); - rl = ni->run_list.rl; + down_read(&ni->runlist.lock); + rl = ni->runlist.rl; } if (likely(rl != NULL)) { /* Seek to element containing target vcn. */ while (rl->length && rl[1].vcn <= vcn) rl++; - lcn = vcn_to_lcn(rl, vcn); + lcn = ntfs_rl_vcn_to_lcn(rl, vcn); } else - lcn = (LCN)LCN_RL_NOT_MAPPED; + lcn = LCN_RL_NOT_MAPPED; ntfs_debug("Reading vcn = 0x%llx, lcn = 0x%llx.", (unsigned long long)vcn, (unsigned long long)lcn); @@ -609,13 +626,13 @@ lock_retry_remap: break; if (is_retry || lcn != LCN_RL_NOT_MAPPED) goto rl_err; - is_retry = TRUE; + is_retry = true; /* - * Attempt to map run list, dropping lock for the + * Attempt to map runlist, dropping lock for the * duration. */ - up_read(&ni->run_list.lock); - if (!map_run_list(ni, vcn)) + up_read(&ni->runlist.lock); + if (!ntfs_map_runlist(ni, vcn)) goto lock_retry_remap; goto map_rl_err; } @@ -632,7 +649,7 @@ lock_retry_remap: /* Release the lock if we took it. */ if (rl) - up_read(&ni->run_list.lock); + up_read(&ni->runlist.lock); /* Setup and initiate io on all buffer heads. */ for (i = 0; i < nr_bhs; i++) { @@ -814,7 +831,8 @@ lock_retry_remap: * If we are outside the initialized size, zero * the out of bounds page range. */ - handle_bounds_compressed_page(ni, page); + handle_bounds_compressed_page(page, i_size, + initialized_size); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); @@ -837,7 +855,8 @@ lock_retry_remap: ntfs_debug("Found compressed compression block."); err = ntfs_decompress(pages, &cur_page, &cur_ofs, cb_max_page, cb_max_ofs, xpage, &xpage_done, - cb_pos, cb_size - (cb_pos - cb)); + cb_pos, cb_size - (cb_pos - cb), i_size, + initialized_size); /* * We can sleep from now on, lock already dropped by * ntfs_decompress(). @@ -845,15 +864,12 @@ lock_retry_remap: if (err) { ntfs_error(vol->sb, "ntfs_decompress() failed in inode " "0x%lx with error code %i. Skipping " - "this compression block.\n", + "this compression block.", ni->mft_no, -err); /* Release the unfinished pages. */ for (; prev_cur_page < cur_page; prev_cur_page++) { page = pages[prev_cur_page]; if (page) { - if (prev_cur_page == xpage && - !xpage_done) - SetPageError(page); flush_dcache_page(page); kunmap(page); unlock_page(page); @@ -882,9 +898,8 @@ lock_retry_remap: if (page) { ntfs_error(vol->sb, "Still have pages left! " "Terminating them with extreme " - "prejudice."); - if (cur_page == xpage && !xpage_done) - SetPageError(page); + "prejudice. Inode 0x%lx, page index " + "0x%lx.", ni->mft_no, page->index); flush_dcache_page(page); kunmap(page); unlock_page(page); @@ -913,18 +928,18 @@ read_err: goto err_out; map_rl_err: - ntfs_error(vol->sb, "map_run_list() failed. Cannot read compression " - "block."); + ntfs_error(vol->sb, "ntfs_map_runlist() failed. Cannot read " + "compression block."); goto err_out; rl_err: - up_read(&ni->run_list.lock); - ntfs_error(vol->sb, "vcn_to_lcn() failed. Cannot read compression " - "block."); + up_read(&ni->runlist.lock); + ntfs_error(vol->sb, "ntfs_rl_vcn_to_lcn() failed. Cannot read " + "compression block."); goto err_out; getblk_err: - up_read(&ni->run_list.lock); + up_read(&ni->runlist.lock); ntfs_error(vol->sb, "getblk() failed. Cannot read compression block."); err_out: @@ -932,8 +947,6 @@ err_out: for (i = cur_page; i < max_page; i++) { page = pages[i]; if (page) { - if (i == xpage && !xpage_done) - SetPageError(page); flush_dcache_page(page); kunmap(page); unlock_page(page);