#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
+#include <linux/task_io_accounting_ops.h>
#include <linux/buffer_head.h> /* grr. try_to_release_page,
- block_invalidatepage */
+ do_invalidatepage */
-static int do_invalidatepage(struct page *page, unsigned long offset)
+/**
+ * do_invalidatepage - invalidate part of all of a page
+ * @page: the page which is affected
+ * @offset: the index of the truncation point
+ *
+ * do_invalidatepage() is called when all or part of the page has become
+ * invalidated by a truncate operation.
+ *
+ * do_invalidatepage() does not have to release all buffers, but it must
+ * ensure that no dirty buffer is left outside @offset and that no I/O
+ * is underway against any of the blocks which are outside the truncation
+ * point. Because the caller is about to free (and possibly reuse) those
+ * blocks on-disk.
+ */
+void do_invalidatepage(struct page *page, unsigned long offset)
{
- int (*invalidatepage)(struct page *, unsigned long);
+ void (*invalidatepage)(struct page *, unsigned long);
invalidatepage = page->mapping->a_ops->invalidatepage;
- if (invalidatepage == NULL)
+#ifdef CONFIG_BLOCK
+ if (!invalidatepage)
invalidatepage = block_invalidatepage;
- return (*invalidatepage)(page, offset);
+#endif
+ if (invalidatepage)
+ (*invalidatepage)(page, offset);
}
static inline void truncate_partial_page(struct page *page, unsigned partial)
do_invalidatepage(page, partial);
}
+/*
+ * This cancels just the dirty bit on the kernel page itself, it
+ * does NOT actually remove dirty bits on any mmap's that may be
+ * around. It also leaves the page tagged dirty, so any sync
+ * activity will still find it on the dirty lists, and in particular,
+ * clear_page_dirty_for_io() will still look at the dirty bits in
+ * the VM.
+ *
+ * Doing this should *normally* only ever be done when a page
+ * is truncated, and is not actually mapped anywhere at all. However,
+ * fs/buffer.c does this when it notices that somebody has cleaned
+ * out all the buffers on a page without actually doing it through
+ * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
+ */
+void cancel_dirty_page(struct page *page, unsigned int account_size)
+{
+ if (TestClearPageDirty(page)) {
+ struct address_space *mapping = page->mapping;
+ if (mapping && mapping_cap_account_dirty(mapping)) {
+ dec_zone_page_state(page, NR_FILE_DIRTY);
+ if (account_size)
+ task_io_account_cancelled_write(account_size);
+ }
+ }
+}
+EXPORT_SYMBOL(cancel_dirty_page);
+
/*
* If truncate cannot remove the fs-private metadata from the page, the page
* becomes anonymous. It will be left on the LRU and may even be mapped into
if (page->mapping != mapping)
return;
+ cancel_dirty_page(page, PAGE_CACHE_SIZE);
+
if (PagePrivate(page))
do_invalidatepage(page, 0);
- clear_page_dirty(page);
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
remove_from_page_cache(page);
/*
* This is for invalidate_inode_pages(). That function can be called at
* any time, and is not supposed to throw away dirty pages. But pages can
- * be marked dirty at any time too. So we re-check the dirtiness inside
- * ->tree_lock. That provides exclusion against the __set_page_dirty
- * functions.
+ * be marked dirty at any time too, so use remove_mapping which safely
+ * discards clean, unused pages.
*
* Returns non-zero if the page was successfully invalidated.
*/
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
+ int ret;
+
if (page->mapping != mapping)
return 0;
if (PagePrivate(page) && !try_to_release_page(page, 0))
return 0;
- write_lock_irq(&mapping->tree_lock);
- if (PageDirty(page)) {
- write_unlock_irq(&mapping->tree_lock);
- return 0;
- }
+ ret = remove_mapping(mapping, page);
- BUG_ON(PagePrivate(page));
- __remove_from_page_cache(page);
- write_unlock_irq(&mapping->tree_lock);
- ClearPageUptodate(page);
- page_cache_release(page); /* pagecache ref */
- return 1;
+ return ret;
}
/**
- * truncate_inode_pages - truncate *all* the pages from an offset
+ * truncate_inode_pages - truncate range of pages specified by start and
+ * end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
+ * @lend: offset to which to truncate
*
- * Truncate the page cache at a set offset, removing the pages that are beyond
- * that offset (and zeroing out partial pages).
+ * Truncate the page cache, removing the pages that are between
+ * specified offsets (and zeroing out partial page
+ * (if lstart is not page aligned)).
*
* Truncate takes two passes - the first pass is nonblocking. It will not
* block on page locks and it will not block on writeback. The second pass
* We pass down the cache-hot hint to the page freeing code. Even if the
* mapping is large, it is probably the case that the final pages are the most
* recently touched, and freeing happens in ascending file offset order.
- *
- * Called under (and serialised by) inode->i_sem.
*/
-void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+void truncate_inode_pages_range(struct address_space *mapping,
+ loff_t lstart, loff_t lend)
{
const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
+ pgoff_t end;
const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t next;
if (mapping->nrpages == 0)
return;
+ BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
+ end = (lend >> PAGE_CACHE_SHIFT);
+
pagevec_init(&pvec, 0);
next = start;
- while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+ while (next <= end &&
+ pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index = page->index;
+ if (page_index > end) {
+ next = page_index;
+ break;
+ }
+
if (page_index > next)
next = page_index;
next++;
next = start;
continue;
}
+ if (pvec.pages[0]->index > end) {
+ pagevec_release(&pvec);
+ break;
+ }
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
+ if (page->index > end)
+ break;
lock_page(page);
wait_on_page_writeback(page);
if (page->index > next)
pagevec_release(&pvec);
}
}
+EXPORT_SYMBOL(truncate_inode_pages_range);
+/**
+ * truncate_inode_pages - truncate *all* the pages from an offset
+ * @mapping: mapping to truncate
+ * @lstart: offset from which to truncate
+ *
+ * Called under (and serialised by) inode->i_mutex.
+ */
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+{
+ truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
+}
EXPORT_SYMBOL(truncate_inode_pages);
/**
pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
+ pgoff_t index;
+ int lock_failed;
- if (TestSetPageLocked(page)) {
- next++;
- continue;
- }
- if (page->index > next)
- next = page->index;
+ lock_failed = TestSetPageLocked(page);
+
+ /*
+ * We really shouldn't be looking at the ->index of an
+ * unlocked page. But we're not allowed to lock these
+ * pages. So we rely upon nobody altering the ->index
+ * of this (pinned-by-us) page.
+ */
+ index = page->index;
+ if (index > next)
+ next = index;
next++;
+ if (lock_failed)
+ continue;
+
if (PageDirty(page) || PageWriteback(page))
goto unlock;
if (page_mapped(page))
break;
}
pagevec_release(&pvec);
- cond_resched();
}
return ret;
}
{
return invalidate_mapping_pages(mapping, 0, ~0UL);
}
-
EXPORT_SYMBOL(invalidate_inode_pages);
+/*
+ * This is like invalidate_complete_page(), except it ignores the page's
+ * refcount. We do this because invalidate_inode_pages2() needs stronger
+ * invalidation guarantees, and cannot afford to leave pages behind because
+ * shrink_list() has a temp ref on them, or because they're transiently sitting
+ * in the lru_cache_add() pagevecs.
+ */
+static int
+invalidate_complete_page2(struct address_space *mapping, struct page *page)
+{
+ if (page->mapping != mapping)
+ return 0;
+
+ if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
+ return 0;
+
+ write_lock_irq(&mapping->tree_lock);
+ if (PageDirty(page))
+ goto failed;
+
+ BUG_ON(PagePrivate(page));
+ __remove_from_page_cache(page);
+ write_unlock_irq(&mapping->tree_lock);
+ ClearPageUptodate(page);
+ page_cache_release(page); /* pagecache ref */
+ return 1;
+failed:
+ write_unlock_irq(&mapping->tree_lock);
+ return 0;
+}
+
+static int do_launder_page(struct address_space *mapping, struct page *page)
+{
+ if (!PageDirty(page))
+ return 0;
+ if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
+ return 0;
+ return mapping->a_ops->launder_page(page);
+}
+
/**
* invalidate_inode_pages2_range - remove range of pages from an address_space
* @mapping: the address_space
for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
pgoff_t page_index;
- int was_dirty;
lock_page(page);
if (page->mapping != mapping) {
* Zap the rest of the file in one hit.
*/
unmap_mapping_range(mapping,
- page_index << PAGE_CACHE_SHIFT,
- (end - page_index + 1)
+ (loff_t)page_index<<PAGE_CACHE_SHIFT,
+ (loff_t)(end - page_index + 1)
<< PAGE_CACHE_SHIFT,
0);
did_range_unmap = 1;
* Just zap this page
*/
unmap_mapping_range(mapping,
- page_index << PAGE_CACHE_SHIFT,
+ (loff_t)page_index<<PAGE_CACHE_SHIFT,
PAGE_CACHE_SIZE, 0);
}
}
- was_dirty = test_clear_page_dirty(page);
- if (!invalidate_complete_page(mapping, page)) {
- if (was_dirty)
- set_page_dirty(page);
+ ret = do_launder_page(mapping, page);
+ if (ret == 0 && !invalidate_complete_page2(mapping, page))
ret = -EIO;
- }
unlock_page(page);
}
pagevec_release(&pvec);