Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.4-vs2.0.2-rc26.diff
[linux-2.6.git] / mm / page-writeback.c
index 042ccad..75d7f48 100644 (file)
@@ -46,7 +46,7 @@
 static long ratelimit_pages = 32;
 
 static long total_pages;       /* The total number of pages in the machine. */
-static int dirty_exceeded;     /* Dirty mem may be over limit */
+static int dirty_exceeded __cacheline_aligned_in_smp;  /* Dirty mem may be over limit */
 
 /*
  * When balance_dirty_pages decides that the caller needs to perform some
@@ -72,15 +72,14 @@ int dirty_background_ratio = 10;
 int vm_dirty_ratio = 40;
 
 /*
- * The interval between `kupdate'-style writebacks, in centiseconds
- * (hundredths of a second)
+ * The interval between `kupdate'-style writebacks, in jiffies
  */
-int dirty_writeback_centisecs = 5 * 100;
+int dirty_writeback_interval = 5 * HZ;
 
 /*
- * The longest number of centiseconds for which data is allowed to remain dirty
+ * The longest number of jiffies for which data is allowed to remain dirty
  */
-int dirty_expire_centisecs = 30 * 100;
+int dirty_expire_interval = 30 * HZ;
 
 /*
  * Flag that makes the machine dump writes/reads and block dirtyings.
@@ -88,15 +87,34 @@ int dirty_expire_centisecs = 30 * 100;
 int block_dump;
 
 /*
- * Flag that puts the machine in "laptop mode".
+ * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
+ * a full sync is triggered after this time elapses without any disk activity.
  */
 int laptop_mode;
 
+EXPORT_SYMBOL(laptop_mode);
+
 /* End of sysctl-exported parameters */
 
 
 static void background_writeout(unsigned long _min_pages);
 
+struct writeback_state
+{
+       unsigned long nr_dirty;
+       unsigned long nr_unstable;
+       unsigned long nr_mapped;
+       unsigned long nr_writeback;
+};
+
+static void get_writeback_state(struct writeback_state *wbs)
+{
+       wbs->nr_dirty = read_page_state(nr_dirty);
+       wbs->nr_unstable = read_page_state(nr_unstable);
+       wbs->nr_mapped = read_page_state(nr_mapped);
+       wbs->nr_writeback = read_page_state(nr_writeback);
+}
+
 /*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
@@ -115,18 +133,30 @@ static void background_writeout(unsigned long _min_pages);
  * clamping level.
  */
 static void
-get_dirty_limits(struct page_state *ps, long *pbackground, long *pdirty)
+get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
+               struct address_space *mapping)
 {
        int background_ratio;           /* Percentages */
        int dirty_ratio;
        int unmapped_ratio;
        long background;
        long dirty;
+       unsigned long available_memory = total_pages;
        struct task_struct *tsk;
 
-       get_page_state(ps);
+       get_writeback_state(wbs);
 
-       unmapped_ratio = 100 - (ps->nr_mapped * 100) / total_pages;
+#ifdef CONFIG_HIGHMEM
+       /*
+        * If this mapping can only allocate from low memory,
+        * we exclude high memory from our count.
+        */
+       if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
+               available_memory -= totalhigh_pages;
+#endif
+
+
+       unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
 
        dirty_ratio = vm_dirty_ratio;
        if (dirty_ratio > unmapped_ratio / 2)
@@ -139,8 +169,8 @@ get_dirty_limits(struct page_state *ps, long *pbackground, long *pdirty)
        if (background_ratio >= dirty_ratio)
                background_ratio = dirty_ratio / 2;
 
-       background = (background_ratio * total_pages) / 100;
-       dirty = (dirty_ratio * total_pages) / 100;
+       background = (background_ratio * available_memory) / 100;
+       dirty = (dirty_ratio * available_memory) / 100;
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                background += background / 4;
@@ -159,7 +189,7 @@ get_dirty_limits(struct page_state *ps, long *pbackground, long *pdirty)
  */
 static void balance_dirty_pages(struct address_space *mapping)
 {
-       struct page_state ps;
+       struct writeback_state wbs;
        long nr_reclaimable;
        long background_thresh;
        long dirty_thresh;
@@ -176,12 +206,14 @@ static void balance_dirty_pages(struct address_space *mapping)
                        .nr_to_write    = write_chunk,
                };
 
-               get_dirty_limits(&ps, &background_thresh, &dirty_thresh);
-               nr_reclaimable = ps.nr_dirty + ps.nr_unstable;
-               if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
+               get_dirty_limits(&wbs, &background_thresh,
+                                       &dirty_thresh, mapping);
+               nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
+               if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
                        break;
 
-               dirty_exceeded = 1;
+               if (!dirty_exceeded)
+                       dirty_exceeded = 1;
 
                /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
                 * Unstable writes are a feature of certain networked
@@ -191,10 +223,10 @@ static void balance_dirty_pages(struct address_space *mapping)
                 */
                if (nr_reclaimable) {
                        writeback_inodes(&wbc);
-                       get_dirty_limits(&ps, &background_thresh,
-                                       &dirty_thresh);
-                       nr_reclaimable = ps.nr_dirty + ps.nr_unstable;
-                       if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
+                       get_dirty_limits(&wbs, &background_thresh,
+                                       &dirty_thresh, mapping);
+                       nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
+                       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
                                break;
                        pages_written += write_chunk - wbc.nr_to_write;
                        if (pages_written >= write_chunk)
@@ -203,7 +235,7 @@ static void balance_dirty_pages(struct address_space *mapping)
                blk_congestion_wait(WRITE, HZ/10);
        }
 
-       if (nr_reclaimable + ps.nr_writeback <= dirty_thresh)
+       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
                dirty_exceeded = 0;
 
        if (writeback_in_progress(bdi))
@@ -223,22 +255,25 @@ static void balance_dirty_pages(struct address_space *mapping)
 }
 
 /**
- * balance_dirty_pages_ratelimited - balance dirty memory state
- * @mapping - address_space which was dirtied
+ * balance_dirty_pages_ratelimited_nr - balance dirty memory state
+ * @mapping: address_space which was dirtied
+ * @nr_pages_dirtied: number of pages which the caller has just dirtied
  *
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
  * dirty state and will initiate writeback if needed.
  *
- * On really big machines, get_page_state is expensive, so try to avoid calling
- * it too often (ratelimiting).  But once we're over the dirty memory limit we
- * decrease the ratelimiting by a lot, to prevent individual processes from
- * overshooting the limit by (ratelimit_pages) each.
+ * On really big machines, get_writeback_state is expensive, so try to avoid
+ * calling it too often (ratelimiting).  But once we're over the dirty memory
+ * limit we decrease the ratelimiting by a lot, to prevent individual processes
+ * from overshooting the limit by (ratelimit_pages) each.
  */
-void balance_dirty_pages_ratelimited(struct address_space *mapping)
+void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
+                                       unsigned long nr_pages_dirtied)
 {
-       static DEFINE_PER_CPU(int, ratelimits) = 0;
-       long ratelimit;
+       static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
+       unsigned long ratelimit;
+       unsigned long *p;
 
        ratelimit = ratelimit_pages;
        if (dirty_exceeded)
@@ -248,15 +283,40 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
         * Check the rate limiting. Also, we do not want to throttle real-time
         * tasks in balance_dirty_pages(). Period.
         */
-       if (get_cpu_var(ratelimits)++ >= ratelimit) {
-               __get_cpu_var(ratelimits) = 0;
-               put_cpu_var(ratelimits);
+       preempt_disable();
+       p =  &__get_cpu_var(ratelimits);
+       *p += nr_pages_dirtied;
+       if (unlikely(*p >= ratelimit)) {
+               *p = 0;
+               preempt_enable();
                balance_dirty_pages(mapping);
                return;
        }
-       put_cpu_var(ratelimits);
+       preempt_enable();
 }
-EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
+EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
+
+void throttle_vm_writeout(void)
+{
+       struct writeback_state wbs;
+       long background_thresh;
+       long dirty_thresh;
+
+        for ( ; ; ) {
+               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
+
+                /*
+                 * Boost the allowable dirty threshold a bit for page
+                 * allocators so they don't get DoS'ed by heavy writers
+                 */
+                dirty_thresh += dirty_thresh / 10;      /* wheeee... */
+
+                if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
+                        break;
+                blk_congestion_wait(WRITE, HZ/10);
+        }
+}
+
 
 /*
  * writeback at least _min_pages, and keep writing until the amount of dirty
@@ -274,12 +334,12 @@ static void background_writeout(unsigned long _min_pages)
        };
 
        for ( ; ; ) {
-               struct page_state ps;
+               struct writeback_state wbs;
                long background_thresh;
                long dirty_thresh;
 
-               get_dirty_limits(&ps, &background_thresh, &dirty_thresh);
-               if (ps.nr_dirty + ps.nr_unstable < background_thresh
+               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
+               if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
                                && min_pages <= 0)
                        break;
                wbc.encountered_congestion = 0;
@@ -301,13 +361,13 @@ static void background_writeout(unsigned long _min_pages)
  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
  * -1 if all pdflush threads were busy.
  */
-int wakeup_bdflush(long nr_pages)
+int wakeup_pdflush(long nr_pages)
 {
        if (nr_pages == 0) {
-               struct page_state ps;
+               struct writeback_state wbs;
 
-               get_page_state(&ps);
-               nr_pages = ps.nr_dirty + ps.nr_unstable;
+               get_writeback_state(&wbs);
+               nr_pages = wbs.nr_dirty + wbs.nr_unstable;
        }
        return pdflush_operation(background_writeout, nr_pages);
 }
@@ -315,10 +375,8 @@ int wakeup_bdflush(long nr_pages)
 static void wb_timer_fn(unsigned long unused);
 static void laptop_timer_fn(unsigned long unused);
 
-static struct timer_list wb_timer =
-                       TIMER_INITIALIZER(wb_timer_fn, 0, 0);
-static struct timer_list laptop_mode_wb_timer =
-                       TIMER_INITIALIZER(laptop_timer_fn, 0, 0);
+static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
+static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
 
 /*
  * Periodic writeback of "old" data.
@@ -328,8 +386,8 @@ static struct timer_list laptop_mode_wb_timer =
  * just walks the superblock inode list, writing back any inodes which are
  * older than a specific point in time.
  *
- * Try to run once per dirty_writeback_centisecs.  But if a writeback event
- * takes longer than a dirty_writeback_centisecs interval, then leave a
+ * Try to run once per dirty_writeback_interval.  But if a writeback event
+ * takes longer than a dirty_writeback_interval interval, then leave a
  * one-second gap.
  *
  * older_than_this takes precedence over nr_to_write.  So we'll only write back
@@ -341,7 +399,7 @@ static void wb_kupdate(unsigned long arg)
        unsigned long start_jif;
        unsigned long next_jif;
        long nr_to_write;
-       struct page_state ps;
+       struct writeback_state wbs;
        struct writeback_control wbc = {
                .bdi            = NULL,
                .sync_mode      = WB_SYNC_NONE,
@@ -353,11 +411,11 @@ static void wb_kupdate(unsigned long arg)
 
        sync_supers();
 
-       get_page_state(&ps);
-       oldest_jif = jiffies - (dirty_expire_centisecs * HZ) / 100;
+       get_writeback_state(&wbs);
+       oldest_jif = jiffies - dirty_expire_interval;
        start_jif = jiffies;
-       next_jif = start_jif + (dirty_writeback_centisecs * HZ) / 100;
-       nr_to_write = ps.nr_dirty + ps.nr_unstable +
+       next_jif = start_jif + dirty_writeback_interval;
+       nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
        while (nr_to_write > 0) {
                wbc.encountered_congestion = 0;
@@ -373,7 +431,7 @@ static void wb_kupdate(unsigned long arg)
        }
        if (time_before(next_jif, jiffies + HZ))
                next_jif = jiffies + HZ;
-       if (dirty_writeback_centisecs)
+       if (dirty_writeback_interval)
                mod_timer(&wb_timer, next_jif);
 }
 
@@ -381,13 +439,13 @@ static void wb_kupdate(unsigned long arg)
  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
  */
 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
-               struct file *file, void __user *buffer, size_t *length)
+               struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length);
-       if (dirty_writeback_centisecs) {
+       proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
+       if (dirty_writeback_interval) {
                mod_timer(&wb_timer,
-                       jiffies + (dirty_writeback_centisecs * HZ) / 100);
-       } else {
+                       jiffies + dirty_writeback_interval);
+               } else {
                del_timer(&wb_timer);
        }
        return 0;
@@ -416,7 +474,7 @@ static void laptop_timer_fn(unsigned long unused)
  */
 void laptop_io_completion(void)
 {
-       mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode * HZ);
+       mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
 }
 
 /*
@@ -432,8 +490,8 @@ void laptop_sync_completion(void)
 /*
  * If ratelimit_pages is too high then we can get into dirty-data overload
  * if a large number of processes all perform writes at the same time.
- * If it is too low then SMP machines will call the (expensive) get_page_state
- * too often.
+ * If it is too low then SMP machines will call the (expensive)
+ * get_writeback_state too often.
  *
  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
@@ -486,26 +544,37 @@ void __init page_writeback_init(void)
                dirty_background_ratio /= 100;
                vm_dirty_ratio *= correction;
                vm_dirty_ratio /= 100;
+
+               if (dirty_background_ratio <= 0)
+                       dirty_background_ratio = 1;
+               if (vm_dirty_ratio <= 0)
+                       vm_dirty_ratio = 1;
        }
-       mod_timer(&wb_timer, jiffies + (dirty_writeback_centisecs * HZ) / 100);
+       mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
        set_ratelimit();
        register_cpu_notifier(&ratelimit_nb);
 }
 
 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
 {
+       int ret;
+
        if (wbc->nr_to_write <= 0)
                return 0;
+       wbc->for_writepages = 1;
        if (mapping->a_ops->writepages)
-               return mapping->a_ops->writepages(mapping, wbc);
-       return generic_writepages(mapping, wbc);
+               ret =  mapping->a_ops->writepages(mapping, wbc);
+       else
+               ret = generic_writepages(mapping, wbc);
+       wbc->for_writepages = 0;
+       return ret;
 }
 
 /**
  * write_one_page - write out a single page and optionally wait on I/O
  *
- * @page - the page to write
- * @wait - if true, wait on writeout
+ * @page: the page to write
+ * @wait: if true, wait on writeout
  *
  * The page must be locked by the caller and will be unlocked upon return.
  *
@@ -545,37 +614,43 @@ EXPORT_SYMBOL(write_one_page);
  * For address_spaces which do not use buffers.  Just tag the page as dirty in
  * its radix tree.
  *
- * __set_page_dirty_nobuffers() may return -ENOSPC.  But if it does, the page
- * is still safe, as long as it actually manages to find some blocks at
- * writeback time.
- *
  * This is also used when a single buffer is being dirtied: we want to set the
  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
+ *
+ * Most callers have locked the page, which pins the address_space in memory.
+ * But zap_pte_range() does not lock the page, however in that case the
+ * mapping is pinned by the vma's ->vm_file reference.
+ *
+ * We take care to handle the case where the page was truncated from the
+ * mapping by re-checking page_mapping() insode tree_lock.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
-       int ret = 0;
-
        if (!TestSetPageDirty(page)) {
-               struct address_space *mapping = page->mapping;
+               struct address_space *mapping = page_mapping(page);
+               struct address_space *mapping2;
 
                if (mapping) {
-                       spin_lock_irq(&mapping->tree_lock);
-                       if (page->mapping) {    /* Race with truncate? */
-                               BUG_ON(page->mapping != mapping);
-                               if (!mapping->backing_dev_info->memory_backed)
+                       write_lock_irq(&mapping->tree_lock);
+                       mapping2 = page_mapping(page);
+                       if (mapping2) { /* Race with truncate? */
+                               BUG_ON(mapping2 != mapping);
+                               if (mapping_cap_account_dirty(mapping))
                                        inc_page_state(nr_dirty);
                                radix_tree_tag_set(&mapping->page_tree,
-                                       page->index, PAGECACHE_TAG_DIRTY);
+                                       page_index(page), PAGECACHE_TAG_DIRTY);
                        }
-                       spin_unlock_irq(&mapping->tree_lock);
-                       if (!PageSwapCache(page))
+                       write_unlock_irq(&mapping->tree_lock);
+                       if (mapping->host) {
+                               /* !PageAnon && !swapper_space */
                                __mark_inode_dirty(mapping->host,
                                                        I_DIRTY_PAGES);
+                       }
                }
+               return 1;
        }
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
 
@@ -598,14 +673,18 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
 int fastcall set_page_dirty(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
-       int (*spd)(struct page *);
 
-       if (!mapping) {
-               SetPageDirty(page);
-               return 0;
+       if (likely(mapping)) {
+               int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
+               if (spd)
+                       return (*spd)(page);
+               return __set_page_dirty_buffers(page);
        }
-       spd = mapping->a_ops->set_page_dirty;
-       return spd? (*spd)(page): __set_page_dirty_buffers(page);
+       if (!PageDirty(page)) {
+               if (!TestSetPageDirty(page))
+                       return 1;
+       }
+       return 0;
 }
 EXPORT_SYMBOL(set_page_dirty);
 
@@ -640,16 +719,17 @@ int test_clear_page_dirty(struct page *page)
        unsigned long flags;
 
        if (mapping) {
-               spin_lock_irqsave(&mapping->tree_lock, flags);
+               write_lock_irqsave(&mapping->tree_lock, flags);
                if (TestClearPageDirty(page)) {
-                       radix_tree_tag_clear(&mapping->page_tree, page->index,
+                       radix_tree_tag_clear(&mapping->page_tree,
+                                               page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-                       spin_unlock_irqrestore(&mapping->tree_lock, flags);
-                       if (!mapping->backing_dev_info->memory_backed)
+                       write_unlock_irqrestore(&mapping->tree_lock, flags);
+                       if (mapping_cap_account_dirty(mapping))
                                dec_page_state(nr_dirty);
                        return 1;
                }
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               write_unlock_irqrestore(&mapping->tree_lock, flags);
                return 0;
        }
        return TestClearPageDirty(page);
@@ -676,7 +756,7 @@ int clear_page_dirty_for_io(struct page *page)
 
        if (mapping) {
                if (TestClearPageDirty(page)) {
-                       if (!mapping->backing_dev_info->memory_backed)
+                       if (mapping_cap_account_dirty(mapping))
                                dec_page_state(nr_dirty);
                        return 1;
                }
@@ -686,29 +766,6 @@ int clear_page_dirty_for_io(struct page *page)
 }
 EXPORT_SYMBOL(clear_page_dirty_for_io);
 
-/*
- * Clear a page's dirty flag while ignoring dirty memory accounting
- */
-int __clear_page_dirty(struct page *page)
-{
-       struct address_space *mapping = page_mapping(page);
-
-       if (mapping) {
-               unsigned long flags;
-
-               spin_lock_irqsave(&mapping->tree_lock, flags);
-               if (TestClearPageDirty(page)) {
-                       radix_tree_tag_clear(&mapping->page_tree, page->index,
-                                               PAGECACHE_TAG_DIRTY);
-                       spin_unlock_irqrestore(&mapping->tree_lock, flags);
-                       return 1;
-               }
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               return 0;
-       }
-       return TestClearPageDirty(page);
-}
-
 int test_clear_page_writeback(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
@@ -717,12 +774,13 @@ int test_clear_page_writeback(struct page *page)
        if (mapping) {
                unsigned long flags;
 
-               spin_lock_irqsave(&mapping->tree_lock, flags);
+               write_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestClearPageWriteback(page);
                if (ret)
-                       radix_tree_tag_clear(&mapping->page_tree, page->index,
+                       radix_tree_tag_clear(&mapping->page_tree,
+                                               page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               write_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestClearPageWriteback(page);
        }
@@ -737,15 +795,17 @@ int test_set_page_writeback(struct page *page)
        if (mapping) {
                unsigned long flags;
 
-               spin_lock_irqsave(&mapping->tree_lock, flags);
+               write_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestSetPageWriteback(page);
                if (!ret)
-                       radix_tree_tag_set(&mapping->page_tree, page->index,
+                       radix_tree_tag_set(&mapping->page_tree,
+                                               page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
                if (!PageDirty(page))
-                       radix_tree_tag_clear(&mapping->page_tree, page->index,
+                       radix_tree_tag_clear(&mapping->page_tree,
+                                               page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               write_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
        }
@@ -763,9 +823,9 @@ int mapping_tagged(struct address_space *mapping, int tag)
        unsigned long flags;
        int ret;
 
-       spin_lock_irqsave(&mapping->tree_lock, flags);
+       read_lock_irqsave(&mapping->tree_lock, flags);
        ret = radix_tree_tagged(&mapping->page_tree, tag);
-       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+       read_unlock_irqrestore(&mapping->tree_lock, flags);
        return ret;
 }
 EXPORT_SYMBOL(mapping_tagged);