vserver 1.9.5.x5
[linux-2.6.git] / mm / vmscan.c
index 156d701..e7ed798 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/highmem.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
-#include <linux/suspend.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h> /* for try_to_release_page(),
                                        buffer_heads_over_limit */
@@ -361,6 +360,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                int may_enter_fs;
                int referenced;
 
+               cond_resched();
+
                page = lru_to_page(page_list);
                list_del(&page->lru);
 
@@ -369,15 +370,15 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
 
                BUG_ON(PageActive(page));
 
-               if (PageWriteback(page))
-                       goto keep_locked;
-
                sc->nr_scanned++;
                /* Double the slab pressure for mapped and swapcache pages */
                if (page_mapped(page) || PageSwapCache(page))
                        sc->nr_scanned++;
 
-               referenced = page_referenced(page, 1);
+               if (PageWriteback(page))
+                       goto keep_locked;
+
+               referenced = page_referenced(page, 1, sc->priority <= 0);
                /* In active use or really unfreeable?  Activate it. */
                if (referenced && page_mapping_inuse(page))
                        goto activate_locked;
@@ -574,7 +575,7 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                        nr_taken++;
                }
                zone->nr_inactive -= nr_taken;
-               zone->pages_scanned += nr_taken;
+               zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
 
                if (nr_taken == 0)
@@ -675,6 +676,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                }
                pgscanned++;
        }
+       zone->pages_scanned += pgscanned;
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
 
@@ -710,12 +712,13 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                reclaim_mapped = 1;
 
        while (!list_empty(&l_hold)) {
+               cond_resched();
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
                if (page_mapped(page)) {
                        if (!reclaim_mapped ||
                            (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0)) {
+                           page_referenced(page, 0, sc->priority <= 0)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -935,8 +938,6 @@ int try_to_free_pages(struct zone **zones,
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
                        blk_congestion_wait(WRITE, HZ/10);
        }
-       if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY))
-               out_of_memory(gfp_mask);
 out:
        for (i = 0; zones[i] != 0; i++)
                zones[i]->prev_priority = zones[i]->temp_priority;
@@ -968,7 +969,7 @@ out:
  * the page allocator fallback scheme to ensure that aging of pages is balanced
  * across the zones.
  */
-static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
+static int balance_pgdat(pg_data_t *pgdat, int nr_pages, int order)
 {
        int to_free = nr_pages;
        int all_zones_ok;
@@ -1014,7 +1015,8 @@ loop_again:
                                                priority != DEF_PRIORITY)
                                        continue;
 
-                               if (zone->free_pages <= zone->pages_high) {
+                               if (!zone_watermark_ok(zone, order,
+                                               zone->pages_high, 0, 0, 0)) {
                                        end_zone = i;
                                        goto scan;
                                }
@@ -1049,7 +1051,8 @@ scan:
                                continue;
 
                        if (nr_pages == 0) {    /* Not software suspend */
-                               if (zone->free_pages <= zone->pages_high)
+                               if (!zone_watermark_ok(zone, order,
+                                               zone->pages_high, end_zone, 0, 0))
                                        all_zones_ok = 0;
                        }
                        zone->temp_priority = priority;
@@ -1063,6 +1066,7 @@ scan:
                        shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_reclaimed += sc.nr_reclaimed;
+                       total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
                        if (zone->pages_scanned >= (zone->nr_active +
@@ -1126,6 +1130,7 @@ out:
  */
 static int kswapd(void *p)
 {
+       unsigned long order;
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
        DEFINE_WAIT(wait);
@@ -1154,14 +1159,28 @@ static int kswapd(void *p)
         */
        tsk->flags |= PF_MEMALLOC|PF_KSWAPD;
 
+       order = 0;
        for ( ; ; ) {
+               unsigned long new_order;
                if (current->flags & PF_FREEZE)
                        refrigerator(PF_FREEZE);
+
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
-               schedule();
+               new_order = pgdat->kswapd_max_order;
+               pgdat->kswapd_max_order = 0;
+               if (order < new_order) {
+                       /*
+                        * Don't sleep if someone wants a larger 'order'
+                        * allocation
+                        */
+                       order = new_order;
+               } else {
+                       schedule();
+                       order = pgdat->kswapd_max_order;
+               }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               balance_pgdat(pgdat, 0);
+               balance_pgdat(pgdat, 0, order);
        }
        return 0;
 }
@@ -1169,12 +1188,18 @@ static int kswapd(void *p)
 /*
  * A zone is low on free memory, so wake its kswapd task to service it.
  */
-void wakeup_kswapd(struct zone *zone)
+void wakeup_kswapd(struct zone *zone, int order)
 {
+       pg_data_t *pgdat;
+
        if (zone->present_pages == 0)
                return;
-       if (zone->free_pages > zone->pages_low)
+
+       pgdat = zone->zone_pgdat;
+       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0, 0))
                return;
+       if (pgdat->kswapd_max_order < order)
+               pgdat->kswapd_max_order = order;
        if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
                return;
        wake_up_interruptible(&zone->zone_pgdat->kswapd_wait);
@@ -1197,7 +1222,7 @@ int shrink_all_memory(int nr_pages)
        current->reclaim_state = &reclaim_state;
        for_each_pgdat(pgdat) {
                int freed;
-               freed = balance_pgdat(pgdat, nr_to_free);
+               freed = balance_pgdat(pgdat, nr_to_free, 0);
                ret += freed;
                nr_to_free -= freed;
                if (nr_to_free <= 0)