fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / vmscan.c
index 37951ba..5b4528a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/vmstat.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
@@ -35,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -370,24 +372,49 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-
+               inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
 
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (!mapping)
-               return 0;               /* truncate got there first */
+       BUG_ON(!PageLocked(page));
+       BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
-
        /*
-        * The non-racy check for busy page.  It is critical to check
-        * PageDirty _after_ making sure that the page is freeable and
-        * not in use by anybody.       (pagecache + us == 2)
+        * The non racy check for a busy page.
+        *
+        * Must be careful with the order of the tests. When someone has
+        * a ref to the page, it may be possible that they dirty it then
+        * drop the reference. So if PageDirty is tested before page_count
+        * here, then the following race may occur:
+        *
+        * get_user_pages(&page);
+        * [user mapping goes away]
+        * write_to(page);
+        *                              !PageDirty(page)    [good]
+        * SetPageDirty(page);
+        * put_page(page);
+        *                              !page_count(page)   [good, discard it]
+        *
+        * [oops, our write_to data is lost]
+        *
+        * Reversing the order of the tests ensures such a situation cannot
+        * escape unnoticed. The smp_rmb is needed to ensure the page->flags
+        * load is not satisfied before that of page->_count.
+        *
+        * Note that if SetPageDirty is always performed via set_page_dirty,
+        * and thus under tree_lock, then this ordering is not required.
         */
        if (unlikely(page_count(page) != 2))
                goto cannot_free;
@@ -442,7 +469,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (TestSetPageLocked(page))
                        goto keep;
 
-               BUG_ON(PageActive(page));
+               VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
@@ -549,7 +576,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto free_it;
                }
 
-               if (!remove_mapping(mapping, page))
+               if (!mapping || !remove_mapping(mapping, page))
                        goto keep_locked;
 
 free_it:
@@ -566,7 +593,7 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
@@ -605,7 +632,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               BUG_ON(!PageLRU(page));
+               VM_BUG_ON(!PageLRU(page));
 
                list_del(&page->lru);
                target = src;
@@ -665,7 +692,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
                } else
                        __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-               __count_vm_events(PGACTIVATE, nr_freed);
+               __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
                if (nr_taken == 0)
                        goto done;
@@ -676,7 +703,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                while (!list_empty(&page_list)) {
                        page = lru_to_page(&page_list);
-                       BUG_ON(PageLRU(page));
+                       VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
                        if (PageActive(page))
@@ -822,9 +849,9 @@ force_reclaim_mapped:
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
@@ -852,9 +879,9 @@ force_reclaim_mapped:
        while (!list_empty(&l_active)) {
                page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                list_move(&page->lru, &zone->active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
@@ -922,7 +949,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                }
        }
 
-       throttle_vm_writeout();
+       throttle_vm_writeout(sc->gfp_mask);
 
        atomic_dec(&zone->reclaim_in_progress);
        return nr_reclaimed;
@@ -957,7 +984,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (!populated_zone(zone))
                        continue;
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
                note_zone_scanning_priority(zone, priority);
@@ -1007,7 +1034,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
                lru_pages += zone->nr_active + zone->nr_inactive;
@@ -1044,12 +1071,11 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 
                /* Take a nap, wait for some writeback to complete */
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
        }
        /* top priority shrink_caches still had more to do? don't OOM, then */
-       if (!sc.all_unreclaimable || nr_reclaimed)
+       if (!sc.all_unreclaimable)
                ret = 1;
-
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1063,7 +1089,7 @@ out:
        for (i = 0; zones[i] != 0; i++) {
                struct zone *zone = zones[i];
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
                zone->prev_priority = priority;
@@ -1147,11 +1173,12 @@ loop_again:
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               0, 0)) {
                                end_zone = i;
-                               goto scan;
+                               break;
                        }
                }
-               goto out;
-scan:
+               if (i < 0)
+                       goto out;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
@@ -1210,7 +1237,7 @@ scan:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1234,6 +1261,9 @@ out:
        }
        if (!all_zones_ok) {
                cond_resched();
+
+               try_to_freeze();
+
                goto loop_again;
        }
 
@@ -1324,7 +1354,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
-       if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
@@ -1339,8 +1369,8 @@ void wakeup_kswapd(struct zone *zone, int order)
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
-                                     int prio, struct scan_control *sc)
+static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+                                     int pass, struct scan_control *sc)
 {
        struct zone *zone;
        unsigned long nr_to_scan, ret = 0;
@@ -1376,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
        return ret;
 }
 
+static unsigned long count_lru_pages(void)
+{
+       struct zone *zone;
+       unsigned long ret = 0;
+
+       for_each_zone(zone)
+               ret += zone->nr_active + zone->nr_inactive;
+       return ret;
+}
+
 /*
  * Try to free `nr_pages' of memory, system-wide, and return the number of
  * freed pages.
@@ -1390,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
-       struct zone *zone;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_swap = 0,
@@ -1401,11 +1440,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = 0;
-       for_each_zone(zone)
-               lru_pages += zone->nr_active + zone->nr_inactive;
-
-       nr_slab = global_page_state(NR_SLAB);
+       lru_pages = count_lru_pages();
+       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
                reclaim_state.reclaimed_slab = 0;
@@ -1431,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for (pass = 0; pass < 5; pass++) {
                int prio;
 
-               /* Needed for shrinking slab caches later on */
-               if (!lru_pages)
-                       for_each_zone(zone) {
-                               lru_pages += zone->nr_active;
-                               lru_pages += zone->nr_inactive;
-                       }
-
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2) {
                        sc.may_swap = 1;
@@ -1453,28 +1482,28 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+                       shrink_slab(sc.nr_scanned, sc.gfp_mask,
+                                       count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                        if (ret >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               blk_congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(WRITE, HZ / 10);
                }
-
-               lru_pages = 0;
        }
 
        /*
         * If ret = 0, we could not shrink LRUs, but there may be something
         * in slab caches
         */
-       if (!ret)
+       if (!ret) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+                       shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+       }
 
 out:
        current->reclaim_state = NULL;
@@ -1483,7 +1512,6 @@ out:
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1504,7 +1532,6 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
@@ -1593,6 +1620,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
        };
+       unsigned long slab_reclaimable;
 
        disable_swap_token();
        cond_resched();
@@ -1607,7 +1635,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 
        if (zone_page_state(zone, NR_FILE_PAGES) -
                zone_page_state(zone, NR_FILE_MAPPED) >
-               zone->min_unmapped_ratio) {
+               zone->min_unmapped_pages) {
                /*
                 * Free memory by calling shrink zone with increasing
                 * priorities until we have enough memory freed.
@@ -1620,7 +1648,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                } while (priority >= 0 && nr_reclaimed < nr_pages);
        }
 
-       if (zone_page_state(zone, NR_SLAB) > zone->min_slab_pages) {
+       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (slab_reclaimable > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
                 * many pages were freed in this zone. So we take the current
@@ -1631,12 +1660,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * Note that shrink_slab will free memory on all zones and may
                 * take a long time.
                 */
-               unsigned long limit = zone_page_state(zone,
-                               NR_SLAB) - nr_pages;
-
                while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
-                       zone_page_state(zone, NR_SLAB) > limit)
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
+                               slab_reclaimable - nr_pages)
                        ;
+
+               /*
+                * Update nr_reclaimed by the number of slab pages we
+                * reclaimed from this zone.
+                */
+               nr_reclaimed += slab_reclaimable -
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
@@ -1660,8 +1694,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * unmapped file backed pages.
         */
        if (zone_page_state(zone, NR_FILE_PAGES) -
-           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio
-           && zone_page_state(zone, NR_SLAB)
+           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
+           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
                        <= zone->min_slab_pages)
                return 0;
 
@@ -1682,7 +1716,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * over remote processors and spread off node memory allocations
         * as wide as possible.
         */
-       node_id = zone->zone_pgdat->node_id;
+       node_id = zone_to_nid(zone);
        mask = node_to_cpumask(node_id);
        if (!cpus_empty(mask) && node_id != numa_node_id())
                return 0;