vserver 2.0 rc7
[linux-2.6.git] / mm / vmscan.c
index e7ed798..2395eeb 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/rmap.h>
 #include <linux/topology.h>
 #include <linux/cpu.h>
+#include <linux/cpuset.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 
@@ -72,6 +73,12 @@ struct scan_control {
        unsigned int gfp_mask;
 
        int may_writepage;
+
+       /* This context's SWAP_CLUSTER_MAX. If freeing memory for
+        * suspend, we effectively ignore SWAP_CLUSTER_MAX.
+        * In this context, it doesn't matter that we scan the
+        * whole list at once. */
+       int swap_cluster_max;
 };
 
 /*
@@ -137,7 +144,7 @@ struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
                shrinker->seeks = seeks;
                shrinker->nr = 0;
                down_write(&shrinker_rwsem);
-               list_add(&shrinker->list, &shrinker_list);
+               list_add_tail(&shrinker->list, &shrinker_list);
                up_write(&shrinker_rwsem);
        }
        return shrinker;
@@ -306,8 +313,20 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
         */
        if (!is_page_cache_freeable(page))
                return PAGE_KEEP;
-       if (!mapping)
+       if (!mapping) {
+               /*
+                * Some data journaling orphaned pages can have
+                * page->mapping == NULL while being dirty with clean buffers.
+                */
+               if (PagePrivate(page)) {
+                       if (try_to_free_buffers(page)) {
+                               ClearPageDirty(page);
+                               printk("%s: orphaned page\n", __FUNCTION__);
+                               return PAGE_CLEAN;
+                       }
+               }
                return PAGE_KEEP;
+       }
        if (mapping->a_ops->writepage == NULL)
                return PAGE_ACTIVATE;
        if (!may_write_to_queue(mapping->backing_dev_info))
@@ -475,7 +494,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                if (!mapping)
                        goto keep_locked;       /* truncate got there first */
 
-               spin_lock_irq(&mapping->tree_lock);
+               write_lock_irq(&mapping->tree_lock);
 
                /*
                 * The non-racy check for busy page.  It is critical to check
@@ -483,7 +502,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * not in use by anybody.       (pagecache + us == 2)
                 */
                if (page_count(page) != 2 || PageDirty(page)) {
-                       spin_unlock_irq(&mapping->tree_lock);
+                       write_unlock_irq(&mapping->tree_lock);
                        goto keep_locked;
                }
 
@@ -491,7 +510,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                if (PageSwapCache(page)) {
                        swp_entry_t swap = { .val = page->private };
                        __delete_from_swap_cache(page);
-                       spin_unlock_irq(&mapping->tree_lock);
+                       write_unlock_irq(&mapping->tree_lock);
                        swap_free(swap);
                        __put_page(page);       /* The pagecache ref */
                        goto free_it;
@@ -499,7 +518,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
 #endif /* CONFIG_SWAP */
 
                __remove_from_page_cache(page);
-               spin_unlock_irq(&mapping->tree_lock);
+               write_unlock_irq(&mapping->tree_lock);
                __put_page(page);
 
 free_it:
@@ -527,14 +546,56 @@ keep:
 }
 
 /*
- * zone->lru_lock is heavily contented.  We relieve it by quickly privatising
- * a batch of pages and working on them outside the lock.  Any pages which were
- * not freed will be added back to the LRU.
+ * zone->lru_lock is heavily contended.  Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
  *
- * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
  *
- * For pagecache intensive workloads, the first loop here is the hottest spot
- * in the kernel (apart from the copy_*_user functions).
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan:        The number of pages to look through on the list.
+ * @src:       The LRU list to pull pages off.
+ * @dst:       The temp list to put pages on to.
+ * @scanned:   The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto *@dst.
+ */
+static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
+                            struct list_head *dst, int *scanned)
+{
+       int nr_taken = 0;
+       struct page *page;
+       int scan = 0;
+
+       while (scan++ < nr_to_scan && !list_empty(src)) {
+               page = lru_to_page(src);
+               prefetchw_prev_lru_page(page, src, flags);
+
+               if (!TestClearPageLRU(page))
+                       BUG();
+               list_del(&page->lru);
+               if (get_page_testone(page)) {
+                       /*
+                        * It is being freed elsewhere
+                        */
+                       __put_page(page);
+                       SetPageLRU(page);
+                       list_add(&page->lru, src);
+                       continue;
+               } else {
+                       list_add(&page->lru, dst);
+                       nr_taken++;
+               }
+       }
+
+       *scanned = scan;
+       return nr_taken;
+}
+
+/*
+ * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
  */
 static void shrink_cache(struct zone *zone, struct scan_control *sc)
 {
@@ -548,32 +609,13 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
        spin_lock_irq(&zone->lru_lock);
        while (max_scan > 0) {
                struct page *page;
-               int nr_taken = 0;
-               int nr_scan = 0;
+               int nr_taken;
+               int nr_scan;
                int nr_freed;
 
-               while (nr_scan++ < SWAP_CLUSTER_MAX &&
-                               !list_empty(&zone->inactive_list)) {
-                       page = lru_to_page(&zone->inactive_list);
-
-                       prefetchw_prev_lru_page(page,
-                                               &zone->inactive_list, flags);
-
-                       if (!TestClearPageLRU(page))
-                               BUG();
-                       list_del(&page->lru);
-                       if (get_page_testone(page)) {
-                               /*
-                                * It is being freed elsewhere
-                                */
-                               __put_page(page);
-                               SetPageLRU(page);
-                               list_add(&page->lru, &zone->inactive_list);
-                               continue;
-                       }
-                       list_add(&page->lru, &page_list);
-                       nr_taken++;
-               }
+               nr_taken = isolate_lru_pages(sc->swap_cluster_max,
+                                            &zone->inactive_list,
+                                            &page_list, &nr_scan);
                zone->nr_inactive -= nr_taken;
                zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
@@ -639,7 +681,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
 {
        int pgmoved;
        int pgdeactivate = 0;
-       int pgscanned = 0;
+       int pgscanned;
        int nr_pages = sc->nr_to_scan;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
        LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
@@ -652,30 +694,9 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        long swap_tendency;
 
        lru_add_drain();
-       pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
-       while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
-               page = lru_to_page(&zone->active_list);
-               prefetchw_prev_lru_page(page, &zone->active_list, flags);
-               if (!TestClearPageLRU(page))
-                       BUG();
-               list_del(&page->lru);
-               if (get_page_testone(page)) {
-                       /*
-                        * It was already free!  release_pages() or put_page()
-                        * are about to remove it from the LRU and free it. So
-                        * put the refcount back and put the page back on the
-                        * LRU
-                        */
-                       __put_page(page);
-                       SetPageLRU(page);
-                       list_add(&page->lru, &zone->active_list);
-               } else {
-                       list_add(&page->lru, &l_hold);
-                       pgmoved++;
-               }
-               pgscanned++;
-       }
+       pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
+                                   &l_hold, &pgscanned);
        zone->pages_scanned += pgscanned;
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
@@ -797,37 +818,39 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
         */
        zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
        nr_active = zone->nr_scan_active;
-       if (nr_active >= SWAP_CLUSTER_MAX)
+       if (nr_active >= sc->swap_cluster_max)
                zone->nr_scan_active = 0;
        else
                nr_active = 0;
 
        zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
        nr_inactive = zone->nr_scan_inactive;
-       if (nr_inactive >= SWAP_CLUSTER_MAX)
+       if (nr_inactive >= sc->swap_cluster_max)
                zone->nr_scan_inactive = 0;
        else
                nr_inactive = 0;
 
-       sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
+       sc->nr_to_reclaim = sc->swap_cluster_max;
 
        while (nr_active || nr_inactive) {
                if (nr_active) {
                        sc->nr_to_scan = min(nr_active,
-                                       (unsigned long)SWAP_CLUSTER_MAX);
+                                       (unsigned long)sc->swap_cluster_max);
                        nr_active -= sc->nr_to_scan;
                        refill_inactive_zone(zone, sc);
                }
 
                if (nr_inactive) {
                        sc->nr_to_scan = min(nr_inactive,
-                                       (unsigned long)SWAP_CLUSTER_MAX);
+                                       (unsigned long)sc->swap_cluster_max);
                        nr_inactive -= sc->nr_to_scan;
                        shrink_cache(zone, sc);
                        if (sc->nr_to_reclaim <= 0)
                                break;
                }
        }
+
+       throttle_vm_writeout();
 }
 
 /*
@@ -857,6 +880,9 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
                if (zone->present_pages == 0)
                        continue;
 
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
                zone->temp_priority = sc->priority;
                if (zone->prev_priority > sc->priority)
                        zone->prev_priority = sc->priority;
@@ -900,6 +926,9 @@ int try_to_free_pages(struct zone **zones,
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
                zone->temp_priority = DEF_PRIORITY;
                lru_pages += zone->nr_active + zone->nr_inactive;
        }
@@ -909,18 +938,19 @@ int try_to_free_pages(struct zone **zones,
                sc.nr_scanned = 0;
                sc.nr_reclaimed = 0;
                sc.priority = priority;
+               sc.swap_cluster_max = SWAP_CLUSTER_MAX;
                shrink_caches(zones, &sc);
                shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
                if (reclaim_state) {
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        reclaim_state->reclaimed_slab = 0;
                }
-               if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) {
+               total_scanned += sc.nr_scanned;
+               total_reclaimed += sc.nr_reclaimed;
+               if (total_reclaimed >= sc.swap_cluster_max) {
                        ret = 1;
                        goto out;
                }
-               total_scanned += sc.nr_scanned;
-               total_reclaimed += sc.nr_reclaimed;
 
                /*
                 * Try to write back as many pages as we just scanned.  This
@@ -929,7 +959,7 @@ int try_to_free_pages(struct zone **zones,
                 * that's undesirable in laptop mode, where we *want* lumpy
                 * writeout.  So in laptop mode, write out the whole world.
                 */
-               if (total_scanned > SWAP_CLUSTER_MAX + SWAP_CLUSTER_MAX/2) {
+               if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) {
                        wakeup_bdflush(laptop_mode ? 0 : total_scanned);
                        sc.may_writepage = 1;
                }
@@ -939,8 +969,14 @@ int try_to_free_pages(struct zone **zones,
                        blk_congestion_wait(WRITE, HZ/10);
        }
 out:
-       for (i = 0; zones[i] != 0; i++)
-               zones[i]->prev_priority = zones[i]->temp_priority;
+       for (i = 0; zones[i] != 0; i++) {
+               struct zone *zone = zones[i];
+
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
+               zone->prev_priority = zone->temp_priority;
+       }
        return ret;
 }
 
@@ -1061,6 +1097,7 @@ scan:
                        sc.nr_scanned = 0;
                        sc.nr_reclaimed = 0;
                        sc.priority = priority;
+                       sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
                        shrink_zone(zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
@@ -1098,7 +1135,7 @@ scan:
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if (total_reclaimed >= SWAP_CLUSTER_MAX)
+               if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
                        break;
        }
 out:
@@ -1200,6 +1237,8 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
+       if (!cpuset_zone_allowed(zone))
+               return;
        if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
                return;
        wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);