*/
int laptop_mode;
+EXPORT_SYMBOL(laptop_mode);
+
/* End of sysctl-exported parameters */
* For address_spaces which do not use buffers. Just tag the page as dirty in
* its radix tree.
*
- * __set_page_dirty_nobuffers() may return -ENOSPC. But if it does, the page
- * is still safe, as long as it actually manages to find some blocks at
- * writeback time.
- *
* This is also used when a single buffer is being dirtied: we want to set the
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
+ *
+ * Most callers have locked the page, which pins the address_space in memory.
+ * But zap_pte_range() does not lock the page, however in that case the
+ * mapping is pinned by the vma's ->vm_file reference.
+ *
+ * We take care to handle the case where the page was truncated from the
+ * mapping by re-checking page_mapping() insode tree_lock.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
int ret = 0;
if (!TestSetPageDirty(page)) {
- struct address_space *mapping = page->mapping;
+ struct address_space *mapping = page_mapping(page);
if (mapping) {
spin_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- BUG_ON(page->mapping != mapping);
+ mapping = page_mapping(page);
+ if (page_mapping(page)) { /* Race with truncate? */
+ BUG_ON(page_mapping(page) != mapping);
if (!mapping->backing_dev_info->memory_backed)
inc_page_state(nr_dirty);
radix_tree_tag_set(&mapping->page_tree,
- page->index, PAGECACHE_TAG_DIRTY);
+ page_index(page), PAGECACHE_TAG_DIRTY);
}
spin_unlock_irq(&mapping->tree_lock);
- if (!PageSwapCache(page))
+ if (mapping->host) {
+ /* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host,
I_DIRTY_PAGES);
+ }
}
}
return ret;
int fastcall set_page_dirty(struct page *page)
{
struct address_space *mapping = page_mapping(page);
- int (*spd)(struct page *);
- if (!mapping) {
- SetPageDirty(page);
- return 0;
+ if (likely(mapping)) {
+ int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
+ if (spd)
+ return (*spd)(page);
+ return __set_page_dirty_buffers(page);
}
- spd = mapping->a_ops->set_page_dirty;
- return spd? (*spd)(page): __set_page_dirty_buffers(page);
+ if (!PageDirty(page))
+ SetPageDirty(page);
+ return 0;
}
EXPORT_SYMBOL(set_page_dirty);
if (mapping) {
spin_lock_irqsave(&mapping->tree_lock, flags);
if (TestClearPageDirty(page)) {
- radix_tree_tag_clear(&mapping->page_tree, page->index,
+ radix_tree_tag_clear(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (!mapping->backing_dev_info->memory_backed)
spin_lock_irqsave(&mapping->tree_lock, flags);
if (TestClearPageDirty(page)) {
- radix_tree_tag_clear(&mapping->page_tree, page->index,
+ radix_tree_tag_clear(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
return 1;
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestClearPageWriteback(page);
if (ret)
- radix_tree_tag_clear(&mapping->page_tree, page->index,
+ radix_tree_tag_clear(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_WRITEBACK);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
spin_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page);
if (!ret)
- radix_tree_tag_set(&mapping->page_tree, page->index,
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_WRITEBACK);
if (!PageDirty(page))
- radix_tree_tag_clear(&mapping->page_tree, page->index,
+ radix_tree_tag_clear(&mapping->page_tree,
+ page_index(page),
PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
} else {