Initial revision
[linux-2.6.git] / mm / vmscan.c
index 0849579..6570a66 100644 (file)
 
 #include <linux/swapops.h>
 
+/* possible outcome of pageout() */
+typedef enum {
+       /* failed to write page out, page is locked */
+       PAGE_KEEP,
+       /* move page to the active list, page is locked */
+       PAGE_ACTIVATE,
+       /* page has been sent to the disk successfully, page is unlocked */
+       PAGE_SUCCESS,
+       /* page is clean and locked */
+       PAGE_CLEAN,
+} pageout_t;
+
+struct scan_control {
+       /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
+       unsigned long nr_to_scan;
+
+       /* Incremented by the number of inactive pages that were scanned */
+       unsigned long nr_scanned;
+
+       /* Incremented by the number of pages reclaimed */
+       unsigned long nr_reclaimed;
+
+       unsigned long nr_mapped;        /* From page_state */
+
+       /* How many pages shrink_cache() should reclaim */
+       int nr_to_reclaim;
+
+       /* Ask shrink_caches, or shrink_zone to scan at this priority */
+       unsigned int priority;
+
+       /* This context's GFP mask */
+       unsigned int gfp_mask;
+
+       int may_writepage;
+};
+
 /*
- * From 0 .. 100.  Higher means more swappy.
+ * The list of shrinker callbacks used by to apply pressure to
+ * ageable caches.
  */
-int vm_swappiness = 60;
-static long total_memory;
+struct shrinker {
+       shrinker_t              shrinker;
+       struct list_head        list;
+       int                     seeks;  /* seeks to recreate an obj */
+       long                    nr;     /* objs pending delete */
+};
 
 
 
@@ -71,7 +112,7 @@ void try_to_clip_inodes(void);
                if ((_page)->lru.prev != _base) {                       \
                        struct page *prev;                              \
                                                                        \
-                       prev = lru_to_page(&(_page->lru));                      \
+                       prev = lru_to_page(&(_page->lru));              \
                        prefetchw(&prev->_field);                       \
                }                                                       \
        } while (0)
@@ -80,15 +121,10 @@ void try_to_clip_inodes(void);
 #endif
 
 /*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
+ * From 0 .. 100.  Higher means more swappy.
  */
-struct shrinker {
-       shrinker_t              shrinker;
-       struct list_head        list;
-       int                     seeks;  /* seeks to recreate an obj */
-       long                    nr;     /* objs pending delete */
-};
+int vm_swappiness = 60;
+static long total_memory;
 
 static LIST_HEAD(shrinker_list);
 static DECLARE_MUTEX(shrinker_sem);
@@ -111,7 +147,6 @@ struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
        }
        return shrinker;
 }
-
 EXPORT_SYMBOL(set_shrinker);
 
 /*
@@ -124,7 +159,6 @@ void remove_shrinker(struct shrinker *shrinker)
        up(&shrinker_sem);
        kfree(shrinker);
 }
-
 EXPORT_SYMBOL(remove_shrinker);
  
 #define SHRINK_BATCH 128
@@ -244,18 +278,6 @@ static void handle_write_error(struct address_space *mapping,
        unlock_page(page);
 }
 
-/* possible outcome of pageout() */
-typedef enum {
-       /* failed to write page out, page is locked */
-       PAGE_KEEP,
-       /* move page to the active list, page is locked */
-       PAGE_ACTIVATE,
-       /* page has been sent to the disk successfully, page is unlocked */
-       PAGE_SUCCESS,
-       /* page is clean and locked */
-       PAGE_CLEAN,
-} pageout_t;
-
 /*
  * pageout is called by shrink_list() for each dirty page. Calls ->writepage().
  */
@@ -315,27 +337,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
        return PAGE_CLEAN;
 }
 
-struct scan_control {
-       /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */
-       unsigned long nr_to_scan;
-
-       /* Incremented by the number of inactive pages that were scanned */
-       unsigned long nr_scanned;
-
-       /* Incremented by the number of pages reclaimed */
-       unsigned long nr_reclaimed;
-
-       unsigned long nr_mapped;        /* From page_state */
-
-       /* Ask shrink_caches, or shrink_zone to scan at this priority */
-       unsigned int priority;
-
-       /* This context's GFP mask */
-       unsigned int gfp_mask;
-
-       int may_writepage;
-};
-
 /*
  * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed
  */
@@ -355,6 +356,8 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
                int may_enter_fs;
                int referenced;
 
+               cond_resched();
+
                page = lru_to_page(page_list);
                list_del(&page->lru);
 
@@ -593,6 +596,7 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
                if (current_is_kswapd())
                        mod_page_state(kswapd_steal, nr_freed);
                mod_page_state_zone(zone, pgsteal, nr_freed);
+               sc->nr_to_reclaim -= nr_freed;
 
                spin_lock_irq(&zone->lru_lock);
                /*
@@ -713,6 +717,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
                reclaim_mapped = 1;
 
        while (!list_empty(&l_hold)) {
+               cond_resched();
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
                if (page_mapped(page)) {
@@ -796,54 +801,50 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
 }
 
 /*
- * Scan `nr_pages' from this zone.  Returns the number of reclaimed pages.
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
  */
 static void
 shrink_zone(struct zone *zone, struct scan_control *sc)
 {
-       unsigned long scan_active, scan_inactive;
-       int count;
-
-       scan_inactive = (zone->nr_active + zone->nr_inactive) >> sc->priority;
+       unsigned long nr_active;
+       unsigned long nr_inactive;
 
        /*
-        * Try to keep the active list 2/3 of the size of the cache.  And
-        * make sure that refill_inactive is given a decent number of pages.
-        *
-        * The "scan_active + 1" here is important.  With pagecache-intensive
-        * workloads the inactive list is huge, and `ratio' evaluates to zero
-        * all the time.  Which pins the active list memory.  So we add one to
-        * `scan_active' just to make sure that the kernel will slowly sift
-        * through the active list.
+        * Add one to `nr_to_scan' just to make sure that the kernel will
+        * slowly sift through the active list.
         */
-       if (zone->nr_active >= 4*(zone->nr_inactive*2 + 1)) {
-               /* Don't scan more than 4 times the inactive list scan size */
-               scan_active = 4*scan_inactive;
-       } else {
-               unsigned long long tmp;
-
-               /* Cast to long long so the multiply doesn't overflow */
-
-               tmp = (unsigned long long)scan_inactive * zone->nr_active;
-               do_div(tmp, zone->nr_inactive*2 + 1);
-               scan_active = (unsigned long)tmp;
-       }
-
-       atomic_add(scan_active + 1, &zone->nr_scan_active);
-       count = atomic_read(&zone->nr_scan_active);
-       if (count >= SWAP_CLUSTER_MAX) {
-               atomic_set(&zone->nr_scan_active, 0);
-               sc->nr_to_scan = count;
-               refill_inactive_zone(zone, sc);
-       }
+       zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1;
+       nr_active = zone->nr_scan_active;
+       if (nr_active >= SWAP_CLUSTER_MAX)
+               zone->nr_scan_active = 0;
+       else
+               nr_active = 0;
+
+       zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1;
+       nr_inactive = zone->nr_scan_inactive;
+       if (nr_inactive >= SWAP_CLUSTER_MAX)
+               zone->nr_scan_inactive = 0;
+       else
+               nr_inactive = 0;
+
+       sc->nr_to_reclaim = SWAP_CLUSTER_MAX;
+
+       while (nr_active || nr_inactive) {
+               if (nr_active) {
+                       sc->nr_to_scan = min(nr_active,
+                                       (unsigned long)SWAP_CLUSTER_MAX);
+                       nr_active -= sc->nr_to_scan;
+                       refill_inactive_zone(zone, sc);
+               }
 
-       atomic_add(scan_inactive, &zone->nr_scan_inactive);
-       count = atomic_read(&zone->nr_scan_inactive);
-       if (count >= SWAP_CLUSTER_MAX) {
-               atomic_set(&zone->nr_scan_inactive, 0);
-               sc->nr_to_scan = count;
-               shrink_cache(zone, sc);
+               if (nr_inactive) {
+                       sc->nr_to_scan = min(nr_inactive,
+                                       (unsigned long)SWAP_CLUSTER_MAX);
+                       nr_inactive -= sc->nr_to_scan;
+                       shrink_cache(zone, sc);
+                       if (sc->nr_to_reclaim <= 0)
+                               break;
+               }
        }
 }
 
@@ -1104,7 +1105,7 @@ out:
  * If there are applications that are active memory-allocators
  * (most normal use), this basically shouldn't matter.
  */
-int kswapd(void *p)
+static int kswapd(void *p)
 {
        pg_data_t *pgdat = (pg_data_t*)p;
        struct task_struct *tsk = current;
@@ -1144,6 +1145,7 @@ int kswapd(void *p)
 
                balance_pgdat(pgdat, 0);
        }
+       return 0;
 }
 
 /*