This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / mm / vmscan.c
index 1f0d7fb..ec85f5f 100644 (file)
@@ -38,6 +38,8 @@
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
+#include <linux/vs_cvirt.h>
+
 
 /* possible outcome of pageout() */
 typedef enum {
@@ -369,15 +371,15 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
 
                BUG_ON(PageActive(page));
 
-               if (PageWriteback(page))
-                       goto keep_locked;
-
                sc->nr_scanned++;
                /* Double the slab pressure for mapped and swapcache pages */
                if (page_mapped(page) || PageSwapCache(page))
                        sc->nr_scanned++;
 
-               referenced = page_referenced(page, 1);
+               if (PageWriteback(page))
+                       goto keep_locked;
+
+               referenced = page_referenced(page, 1, sc->priority <= 0);
                /* In active use or really unfreeable?  Activate it. */
                if (referenced && page_mapping_inuse(page))
                        goto activate_locked;
@@ -540,6 +542,8 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
        LIST_HEAD(page_list);
        struct pagevec pvec;
        int max_scan = sc->nr_to_scan;
+       struct list_head *inactive_list = &zone->inactive_list;
+       struct list_head *active_list = &zone->active_list;
 
        pagevec_init(&pvec, 1);
 
@@ -552,11 +556,11 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                int nr_freed;
 
                while (nr_scan++ < SWAP_CLUSTER_MAX &&
-                               !list_empty(&zone->inactive_list)) {
-                       page = lru_to_page(&zone->inactive_list);
+                               !list_empty(inactive_list)) {
+                       page = lru_to_page(inactive_list);
 
                        prefetchw_prev_lru_page(page,
-                                               &zone->inactive_list, flags);
+                                               inactive_list, flags);
 
                        if (!TestClearPageLRU(page))
                                BUG();
@@ -567,14 +571,13 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                                 */
                                __put_page(page);
                                SetPageLRU(page);
-                               list_add(&page->lru, &zone->inactive_list);
+                               list_add(&page->lru, inactive_list);
                                continue;
                        }
                        list_add(&page->lru, &page_list);
                        nr_taken++;
                }
                zone->nr_inactive -= nr_taken;
-               zone->pages_scanned += nr_taken;
                spin_unlock_irq(&zone->lru_lock);
 
                if (nr_taken == 0)
@@ -600,10 +603,13 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                        if (TestSetPageLRU(page))
                                BUG();
                        list_del(&page->lru);
-                       if (PageActive(page))
-                               add_page_to_active_list(zone, page);
-                       else
-                               add_page_to_inactive_list(zone, page);
+                       if (PageActive(page)) {
+                               zone->nr_active++;
+                               list_add(&page->lru, active_list);
+                       } else {
+                               zone->nr_inactive++;
+                               list_add(&page->lru, inactive_list);
+                       }
                        if (!pagevec_add(&pvec, page)) {
                                spin_unlock_irq(&zone->lru_lock);
                                __pagevec_release(&pvec);
@@ -649,13 +655,15 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
        long mapped_ratio;
        long distress;
        long swap_tendency;
+       struct list_head *active_list = &zone->active_list;
+       struct list_head *inactive_list = &zone->inactive_list;
 
        lru_add_drain();
        pgmoved = 0;
        spin_lock_irq(&zone->lru_lock);
-       while (pgscanned < nr_pages && !list_empty(&zone->active_list)) {
-               page = lru_to_page(&zone->active_list);
-               prefetchw_prev_lru_page(page, &zone->active_list, flags);
+       while (pgscanned < nr_pages && !list_empty(active_list)) {
+               page = lru_to_page(active_list);
+               prefetchw_prev_lru_page(page, active_list, flags);
                if (!TestClearPageLRU(page))
                        BUG();
                list_del(&page->lru);
@@ -668,13 +676,14 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                         */
                        __put_page(page);
                        SetPageLRU(page);
-                       list_add(&page->lru, &zone->active_list);
+                       list_add(&page->lru, active_list);
                } else {
                        list_add(&page->lru, &l_hold);
                        pgmoved++;
                }
                pgscanned++;
        }
+       zone->pages_scanned += pgscanned;
        zone->nr_active -= pgmoved;
        spin_unlock_irq(&zone->lru_lock);
 
@@ -715,7 +724,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                if (page_mapped(page)) {
                        if (!reclaim_mapped ||
                            (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0)) {
+                           page_referenced(page, 0, sc->priority <= 0)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -733,7 +742,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                        BUG();
                if (!TestClearPageActive(page))
                        BUG();
-               list_move(&page->lru, &zone->inactive_list);
+               list_move(&page->lru, inactive_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        zone->nr_inactive += pgmoved;
@@ -761,7 +770,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                if (TestSetPageLRU(page))
                        BUG();
                BUG_ON(!PageActive(page));
-               list_move(&page->lru, &zone->active_list);
+               list_move(&page->lru, active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
                        zone->nr_active += pgmoved;
@@ -1063,6 +1072,7 @@ scan:
                        shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages);
                        sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_reclaimed += sc.nr_reclaimed;
+                       total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
                        if (zone->pages_scanned >= (zone->nr_active +
@@ -1160,7 +1170,6 @@ static int kswapd(void *p)
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                schedule();
                finish_wait(&pgdat->kswapd_wait, &wait);
-
                balance_pgdat(pgdat, 0);
        }
        return 0;
@@ -1238,7 +1247,7 @@ static int __init kswapd_init(void)
        swap_setup();
        for_each_pgdat(pgdat)
                pgdat->kswapd
-               = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
+               = find_task_by_real_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
        total_memory = nr_free_pagecache_pages();
        hotcpu_notifier(cpu_callback, 0);
        return 0;