upgrade to fedora-2.6.12-1.1398.FC4 + vserver 2.0.rc7
[linux-2.6.git] / mm / vmscan.c
index ec85f5f..0453c8e 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/highmem.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
-#include <linux/suspend.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h> /* for try_to_release_page(),
                                        buffer_heads_over_limit */
@@ -31,6 +30,7 @@
 #include <linux/rmap.h>
 #include <linux/topology.h>
 #include <linux/cpu.h>
+#include <linux/cpuset.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 
@@ -38,7 +38,6 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
-#include <linux/vs_cvirt.h>
 
 
 /* possible outcome of pageout() */
@@ -75,6 +74,12 @@ struct scan_control {
        unsigned int gfp_mask;
 
        int may_writepage;
+
+       /* This context's SWAP_CLUSTER_MAX. If freeing memory for
+        * suspend, we effectively ignore SWAP_CLUSTER_MAX.
+        * In this context, it doesn't matter that we scan the
+        * whole list at once. */
+       int swap_cluster_max;
 };
 
 /*
@@ -140,7 +145,7 @@ struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
                shrinker->seeks = seeks;
                shrinker->nr = 0;
                down_write(&shrinker_rwsem);
-               list_add(&shrinker->list, &shrinker_list);
+               list_add_tail(&shrinker->list, &shrinker_list);
                up_write(&shrinker_rwsem);
        }
        return shrinker;
@@ -309,8 +314,20 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
         */
        if (!is_page_cache_freeable(page))
                return PAGE_KEEP;
-       if (!mapping)
+       if (!mapping) {
+               /*
+                * Some data journaling orphaned pages can have
+                * page->mapping == NULL while being dirty with clean buffers.
+                */
+               if (PagePrivate(page)) {
+                       if (try_to_free_buffers(page)) {
+                               ClearPageDirty(page);
+                               printk("%s: orphaned page\n", __FUNCTION__);
+                               return PAGE_CLEAN;
+                       }
+               }
                return PAGE_KEEP;
+       }
        if (mapping->a_ops->writepage == NULL)
                return PAGE_ACTIVATE;
        if (!may_write_to_queue(mapping->backing_dev_info))
@@ -363,6 +380,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                int may_enter_fs;
                int referenced;
 
+               cond_resched();
+
                page = lru_to_page(page_list);
                list_del(&page->lru);
 
@@ -476,7 +495,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                if (!mapping)
                        goto keep_locked;       /* truncate got there first */
 
-               spin_lock_irq(&mapping->tree_lock);
+               write_lock_irq(&mapping->tree_lock);
 
                /*
                 * The non-racy check for busy page.  It is critical to check
@@ -484,7 +503,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                 * not in use by anybody.       (pagecache + us == 2)
                 */
                if (page_count(page) != 2 || PageDirty(page)) {
-                       spin_unlock_irq(&mapping->tree_lock);
+                       write_unlock_irq(&mapping->tree_lock);
                        goto keep_locked;
                }
 
@@ -492,7 +511,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                if (PageSwapCache(page)) {
                        swp_entry_t swap = { .val = page->private };
                        __delete_from_swap_cache(page);
-                       spin_unlock_irq(&mapping->tree_lock);
+                       write_unlock_irq(&mapping->tree_lock);
                        swap_free(swap);
                        __put_page(page);       /* The pagecache ref */
                        goto free_it;
@@ -500,7 +519,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
 #endif /* CONFIG_SWAP */
 
                __remove_from_page_cache(page);
-               spin_unlock_irq(&mapping->tree_lock);
+               write_unlock_irq(&mapping->tree_lock);
                __put_page(page);
 
 free_it:
@@ -528,14 +547,56 @@ keep:
 }
 
 /*
- * zone->lru_lock is heavily contented.  We relieve it by quickly privatising
- * a batch of pages and working on them outside the lock.  Any pages which were
- * not freed will be added back to the LRU.
+ * zone->lru_lock is heavily contended.  Some of the functions that
+ * shrink the lists perform better by taking out a batch of pages
+ * and working on them outside the LRU lock.
  *
- * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
+ * For pagecache intensive workloads, this function is the hottest
+ * spot in the kernel (apart from copy_*_user functions).
  *
- * For pagecache intensive workloads, the first loop here is the hottest spot
- * in the kernel (apart from the copy_*_user functions).
+ * Appropriate locks must be held before calling this function.
+ *
+ * @nr_to_scan:        The number of pages to look through on the list.
+ * @src:       The LRU list to pull pages off.
+ * @dst:       The temp list to put pages on to.
+ * @scanned:   The number of pages that were scanned.
+ *
+ * returns how many pages were moved onto *@dst.
+ */
+static int isolate_lru_pages(int nr_to_scan, struct list_head *src,
+                            struct list_head *dst, int *scanned)
+{
+       int nr_taken = 0;
+       struct page *page;
+       int scan = 0;
+
+       while (scan++ < nr_to_scan && !list_empty(src)) {
+               page = lru_to_page(src);
+               prefetchw_prev_lru_page(page, src, flags);
+
+               if (!TestClearPageLRU(page))
+                       BUG();
+               list_del(&page->lru);
+               if (get_page_testone(page)) {
+                       /*
+                        * It is being freed elsewhere
+                        */
+                       __put_page(page);
+                       SetPageLRU(page);
+                       list_add(&page->lru, src);
+                       continue;
+               } else {
+                       list_add(&page->lru, dst);
+                       nr_taken++;
+               }
+       }
+
+       *scanned = scan;
+       return nr_taken;
+}
+
+/*
+ * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed
  */
 static void shrink_cache(struct zone *zone, struct scan_control *sc)
 {
@@ -551,33 +612,15 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
        spin_lock_irq(&zone->lru_lock);
        while (max_scan > 0) {
                struct page *page;
-               int nr_taken = 0;
-               int nr_scan = 0;
+               int nr_taken;
+               int nr_scan;
                int nr_freed;
 
-               while (nr_scan++ < SWAP_CLUSTER_MAX &&
-                               !list_empty(inactive_list)) {
-                       page = lru_to_page(inactive_list);
-
-                       prefetchw_prev_lru_page(page,
-                                               inactive_list, flags);
-
-                       if (!TestClearPageLRU(page))
-                               BUG();
-                       list_del(&page->lru);
-                       if (get_page_testone(page)) {
-                               /*
-                                * It is being freed elsewhere
-                                */
-                               __put_page(page);
-                               SetPageLRU(page);
-                               list_add(&page->lru, inactive_list);
-                               continue;
-                       }
-                       list_add(&page->lru, &page_list);
-                       nr_taken++;
-               }
+               nr_taken = isolate_lru_pages(sc->swap_cluster_max,
+                                            &zone->inactive_list,
+                                            &page_list, &nr_scan);
                zone->nr_inactive -= nr_taken;
+               zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
 
                if (nr_taken == 0)
@@ -644,7 +687,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
 {
        int pgmoved;
        int pgdeactivate = 0;
-       int pgscanned = 0;
+       int pgscanned;
        int nr_pages = sc->nr_to_scan;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
        LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
@@ -659,30 +702,9 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        struct list_head *inactive_list = &zone->inactive_list;
 
        lru_add_drain();
-       pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
-       while (pgscanned < nr_pages && !list_empty(active_list)) {
-               page = lru_to_page(active_list);
-               prefetchw_prev_lru_page(page, active_list, flags);
-               if (!TestClearPageLRU(page))
-                       BUG();
-               list_del(&page->lru);
-               if (get_page_testone(page)) {
-                       /*
-                        * It was already free!  release_pages() or put_page()
-                        * are about to remove it from the LRU and free it. So
-                        * put the refcount back and put the page back on the
-                        * LRU
-                        */
-                       __put_page(page);
-                       SetPageLRU(page);
-                       list_add(&page->lru, active_list);
-               } else {
-                       list_add(&page->lru, &l_hold);
-                       pgmoved++;
-               }
-               pgscanned++;
-       }
+       pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
+                                   &l_hold, &pgscanned);
        zone->pages_scanned += pgscanned;
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
@@ -719,6 +741,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                reclaim_mapped = 1;
 
        while (!list_empty(&l_hold)) {
+               cond_resched();
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
                if (page_mapped(page)) {
@@ -803,37 +826,39 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
         */
        zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
        nr_active = zone->nr_scan_active;
-       if (nr_active >= SWAP_CLUSTER_MAX)
+       if (nr_active >= sc->swap_cluster_max)
                zone->nr_scan_active = 0;
        else
                nr_active = 0;
 
        zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
        nr_inactive = zone->nr_scan_inactive;
-       if (nr_inactive >= SWAP_CLUSTER_MAX)
+       if (nr_inactive >= sc->swap_cluster_max)
                zone->nr_scan_inactive = 0;
        else
                nr_inactive = 0;
 
-       sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
+       sc->nr_to_reclaim = sc->swap_cluster_max;
 
        while (nr_active || nr_inactive) {
                if (nr_active) {
                        sc->nr_to_scan = min(nr_active,
-                                       (unsigned long)SWAP_CLUSTER_MAX);
+                                       (unsigned long)sc->swap_cluster_max);
                        nr_active -= sc->nr_to_scan;
                        refill_inactive_zone(zone, sc);
                }
 
                if (nr_inactive) {
                        sc->nr_to_scan = min(nr_inactive,
-                                       (unsigned long)SWAP_CLUSTER_MAX);
+                                       (unsigned long)sc->swap_cluster_max);
                        nr_inactive -= sc->nr_to_scan;
                        shrink_cache(zone, sc);
                        if (sc->nr_to_reclaim <= 0)
                                break;
                }
        }
+
+       throttle_vm_writeout();
 }
 
 /*
@@ -863,6 +888,9 @@ shrink_caches(struct zone **zones, struct scan_control *sc)
                if (zone->present_pages == 0)
                        continue;
 
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
                zone->temp_priority = sc->priority;
                if (zone->prev_priority > sc->priority)
                        zone->prev_priority = sc->priority;
@@ -906,6 +934,9 @@ int try_to_free_pages(struct zone **zones,
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
                zone->temp_priority = DEF_PRIORITY;
                lru_pages += zone->nr_active + zone->nr_inactive;
        }
@@ -915,18 +946,19 @@ int try_to_free_pages(struct zone **zones,
                sc.nr_scanned = 0;
                sc.nr_reclaimed = 0;
                sc.priority = priority;
+               sc.swap_cluster_max = SWAP_CLUSTER_MAX;
                shrink_caches(zones, &sc);
                shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
                if (reclaim_state) {
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        reclaim_state->reclaimed_slab = 0;
                }
-               if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) {
+               total_scanned += sc.nr_scanned;
+               total_reclaimed += sc.nr_reclaimed;
+               if (total_reclaimed >= sc.swap_cluster_max) {
                        ret = 1;
                        goto out;
                }
-               total_scanned += sc.nr_scanned;
-               total_reclaimed += sc.nr_reclaimed;
 
                /*
                 * Try to write back as many pages as we just scanned.  This
@@ -935,7 +967,7 @@ int try_to_free_pages(struct zone **zones,
                 * that's undesirable in laptop mode, where we *want* lumpy
                 * writeout.  So in laptop mode, write out the whole world.
                 */
-               if (total_scanned > SWAP_CLUSTER_MAX + SWAP_CLUSTER_MAX/2) {
+               if (total_scanned > sc.swap_cluster_max + sc.swap_cluster_max/2) {
                        wakeup_bdflush(laptop_mode ? 0 : total_scanned);
                        sc.may_writepage = 1;
                }
@@ -944,11 +976,15 @@ int try_to_free_pages(struct zone **zones,
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
                        blk_congestion_wait(WRITE, HZ/10);
        }
-       if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
-               out_of_memory(gfp_mask);
 out:
-       for (i = 0; zones[i] != 0; i++)
-               zones[i]->prev_priority = zones[i]->temp_priority;
+       for (i = 0; zones[i] != 0; i++) {
+               struct zone *zone = zones[i];
+
+               if (!cpuset_zone_allowed(zone))
+                       continue;
+
+               zone->prev_priority = zone->temp_priority;
+       }
        return ret;
 }
 
@@ -977,7 +1013,7 @@ out:
  * the page allocator fallback scheme to ensure that aging of pages is balanced
  * across the zones.
  */
-static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
+static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
 {
        int to_free = nr_pages;
        int all_zones_ok;
@@ -1023,7 +1059,8 @@ loop_again:
                                                priority != DEF_PRIORITY)
                                        continue;
 
-                               if (zone->free_pages <= zone->pages_high) {
+                               if (!zone_watermark_ok(zone, order,
+                                               zone->pages_high, 0, 0, 0)) {
                                        end_zone = i;
                                        goto scan;
                                }
@@ -1058,7 +1095,8 @@ scan:
                                continue;
 
                        if (nr_pages == 0) {    /* Not software suspend */
-                               if (zone->free_pages <= zone->pages_high)
+                               if (!zone_watermark_ok(zone, order,
+                                               zone->pages_high, end_zone, 0, 0))
                                        all_zones_ok = 0;
                        }
                        zone->temp_priority = priority;
@@ -1067,6 +1105,7 @@ scan:
                        sc.nr_scanned = 0;
                        sc.nr_reclaimed = 0;
                        sc.priority = priority;
+                       sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX;
                        shrink_zone(zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
@@ -1104,7 +1143,7 @@ scan:
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if (total_reclaimed >= SWAP_CLUSTER_MAX)
+               if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages))
                        break;
        }
 out:
@@ -1136,6 +1175,7 @@ out:
  */
 static int kswapd(void *p)
 {
+       unsigned long order;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
        DEFINE_WAIT(wait);
@@ -1164,13 +1204,28 @@ static int kswapd(void *p)
         */
        tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
 
+       order = 0;
        for ( ; ; ) {
+               unsigned long new_order;
                if (current->flags & PF_FREEZE)
                        refrigerator(PF_FREEZE);
+
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-               schedule();
+               new_order = pgdat->kswapd_max_order;
+               pgdat->kswapd_max_order = 0;
+               if (order < new_order) {
+                       /*
+                        * Don't sleep if someone wants a larger 'order'
+                        * allocation
+                        */
+                       order = new_order;
+               } else {
+                       schedule();
+                       order = pgdat->kswapd_max_order;
+               }
                finish_wait(&pgdat->kswapd_wait, &wait);
-               balance_pgdat(pgdat, 0);
+
+               balance_pgdat(pgdat, 0, order);
        }
        return 0;
 }
@@ -1178,11 +1233,19 @@ static int kswapd(void *p)
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone)
+void wakeup_kswapd(struct zone *zone, int order)
 {
+       pg_data_t *pgdat;
+
        if (zone->present_pages == 0)
                return;
-       if (zone->free_pages > zone->pages_low)
+
+       pgdat = zone->zone_pgdat;
+       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0, 0))
+               return;
+       if (pgdat->kswapd_max_order < order)
+               pgdat->kswapd_max_order = order;
+       if (!cpuset_zone_allowed(zone))
                return;
        if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
                return;
@@ -1206,7 +1269,7 @@ int shrink_all_memory(int nr_pages)
        current->reclaim_state = &reclaim_state;
        for_each_pgdat(pgdat) {
                int freed;
-               freed = balance_pgdat(pgdat, nr_to_free);
+               freed = balance_pgdat(pgdat, nr_to_free, 0);
                ret += freed;
                nr_to_free -= freed;
                if (nr_to_free <= 0)