fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / mm / swap.c
index b524ea9..2ed7be3 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
 /* How many pages do we try to swap or page in/out together? */
 int page_cluster;
 
+/*
+ * This path almost never happens for VM activity - pages are normally
+ * freed via pagevecs.  But it gets used by networking.
+ */
+static void fastcall __page_cache_release(struct page *page)
+{
+       if (PageLRU(page)) {
+               unsigned long flags;
+               struct zone *zone = page_zone(page);
+
+               spin_lock_irqsave(&zone->lru_lock, flags);
+               VM_BUG_ON(!PageLRU(page));
+               __ClearPageLRU(page);
+               del_page_from_lru(zone, page);
+               spin_unlock_irqrestore(&zone->lru_lock, flags);
+       }
+       free_hot_page(page);
+}
+
 static void put_compound_page(struct page *page)
 {
        page = (struct page *)page_private(page);
        if (put_page_testzero(page)) {
-               void (*dtor)(struct page *page);
+               compound_page_dtor *dtor;
 
-               dtor = (void (*)(struct page *))page[1].lru.next;
+               dtor = get_compound_page_dtor(page);
                (*dtor)(page);
        }
 }
@@ -54,6 +73,26 @@ void put_page(struct page *page)
 }
 EXPORT_SYMBOL(put_page);
 
+/**
+ * put_pages_list(): release a list of pages
+ *
+ * Release a list of pages which are strung together on page.lru.  Currently
+ * used by read_cache_pages() and related error recovery code.
+ *
+ * @pages: list of pages threaded on page->lru
+ */
+void put_pages_list(struct list_head *pages)
+{
+       while (!list_empty(pages)) {
+               struct page *victim;
+
+               victim = list_entry(pages->prev, struct page, lru);
+               list_del(&victim->lru);
+               page_cache_release(victim);
+       }
+}
+EXPORT_SYMBOL(put_pages_list);
+
 /*
  * Writeback is about to end against a page which has been marked for immediate
  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
@@ -86,9 +125,8 @@ int rotate_reclaimable_page(struct page *page)
        zone = page_zone(page);
        spin_lock_irqsave(&zone->lru_lock, flags);
        if (PageLRU(page) && !PageActive(page)) {
-               list_del(&page->lru);
-               list_add_tail(&page->lru, &zone->inactive_list);
-               inc_page_state(pgrotated);
+               list_move_tail(&page->lru, &zone->inactive_list);
+               __count_vm_event(PGROTATED);
        }
        if (!test_clear_page_writeback(page))
                BUG();
@@ -108,7 +146,7 @@ void fastcall activate_page(struct page *page)
                del_page_from_inactive_list(zone, page);
                SetPageActive(page);
                add_page_to_active_list(zone, page);
-               inc_page_state(pgactivate);
+               __count_vm_event(PGACTIVATE);
        }
        spin_unlock_irq(&zone->lru_lock);
 }
@@ -178,7 +216,7 @@ void lru_add_drain(void)
 }
 
 #ifdef CONFIG_NUMA
-static void lru_add_drain_per_cpu(void *dummy)
+static void lru_add_drain_per_cpu(struct work_struct *dummy)
 {
        lru_add_drain();
 }
@@ -188,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
  */
 int lru_add_drain_all(void)
 {
-       return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL);
+       return schedule_on_each_cpu(lru_add_drain_per_cpu);
 }
 
 #else
@@ -203,27 +241,6 @@ int lru_add_drain_all(void)
 }
 #endif
 
-/*
- * This path almost never happens for VM activity - pages are normally
- * freed via pagevecs.  But it gets used by networking.
- */
-void fastcall __page_cache_release(struct page *page)
-{
-       unsigned long flags;
-       struct zone *zone = page_zone(page);
-
-       spin_lock_irqsave(&zone->lru_lock, flags);
-       if (TestClearPageLRU(page))
-               del_page_from_lru(zone, page);
-       if (page_count(page) != 0)
-               page = NULL;
-       spin_unlock_irqrestore(&zone->lru_lock, flags);
-       if (page)
-               free_hot_page(page);
-}
-
-EXPORT_SYMBOL(__page_cache_release);
-
 /*
  * Batched page_cache_release().  Decrement the reference count on all the
  * passed pages.  If it fell to zero then remove the page from the LRU and
@@ -245,7 +262,6 @@ void release_pages(struct page **pages, int nr, int cold)
        pagevec_init(&pages_to_free, cold);
        for (i = 0; i < nr; i++) {
                struct page *page = pages[i];
-               struct zone *pagezone;
 
                if (unlikely(PageCompound(page))) {
                        if (zone) {
@@ -259,23 +275,27 @@ void release_pages(struct page **pages, int nr, int cold)
                if (!put_page_testzero(page))
                        continue;
 
-               pagezone = page_zone(page);
-               if (pagezone != zone) {
-                       if (zone)
-                               spin_unlock_irq(&zone->lru_lock);
-                       zone = pagezone;
-                       spin_lock_irq(&zone->lru_lock);
-               }
-               if (TestClearPageLRU(page))
+               if (PageLRU(page)) {
+                       struct zone *pagezone = page_zone(page);
+                       if (pagezone != zone) {
+                               if (zone)
+                                       spin_unlock_irq(&zone->lru_lock);
+                               zone = pagezone;
+                               spin_lock_irq(&zone->lru_lock);
+                       }
+                       VM_BUG_ON(!PageLRU(page));
+                       __ClearPageLRU(page);
                        del_page_from_lru(zone, page);
-               if (page_count(page) == 0) {
-                       if (!pagevec_add(&pages_to_free, page)) {
+               }
+
+               if (!pagevec_add(&pages_to_free, page)) {
+                       if (zone) {
                                spin_unlock_irq(&zone->lru_lock);
-                               __pagevec_free(&pages_to_free);
-                               pagevec_reinit(&pages_to_free);
-                               zone = NULL;    /* No lock is held */
+                               zone = NULL;
                        }
-               }
+                       __pagevec_free(&pages_to_free);
+                       pagevec_reinit(&pages_to_free);
+               }
        }
        if (zone)
                spin_unlock_irq(&zone->lru_lock);
@@ -316,7 +336,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
        for (i = 0; i < pagevec_count(pvec); i++) {
                struct page *page = pvec->pages[i];
 
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                if (put_page_testzero(page))
                        pagevec_add(&pages_to_free, page);
        }
@@ -343,8 +363,8 @@ void __pagevec_lru_add(struct pagevec *pvec)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
-               if (TestSetPageLRU(page))
-                       BUG();
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
                add_page_to_inactive_list(zone, page);
        }
        if (zone)
@@ -370,10 +390,10 @@ void __pagevec_lru_add_active(struct pagevec *pvec)
                        zone = pagezone;
                        spin_lock_irq(&zone->lru_lock);
                }
-               if (TestSetPageLRU(page))
-                       BUG();
-               if (TestSetPageActive(page))
-                       BUG();
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
+               VM_BUG_ON(PageActive(page));
+               SetPageActive(page);
                add_page_to_active_list(zone, page);
        }
        if (zone)
@@ -478,48 +498,6 @@ static int cpu_swap_callback(struct notifier_block *nfb,
 #endif /* CONFIG_HOTPLUG_CPU */
 #endif /* CONFIG_SMP */
 
-#ifdef CONFIG_SMP
-void percpu_counter_mod(struct percpu_counter *fbc, long amount)
-{
-       long count;
-       long *pcount;
-       int cpu = get_cpu();
-
-       pcount = per_cpu_ptr(fbc->counters, cpu);
-       count = *pcount + amount;
-       if (count >= FBC_BATCH || count <= -FBC_BATCH) {
-               spin_lock(&fbc->lock);
-               fbc->count += count;
-               *pcount = 0;
-               spin_unlock(&fbc->lock);
-       } else {
-               *pcount = count;
-       }
-       put_cpu();
-}
-EXPORT_SYMBOL(percpu_counter_mod);
-
-/*
- * Add up all the per-cpu counts, return the result.  This is a more accurate
- * but much slower version of percpu_counter_read_positive()
- */
-long percpu_counter_sum(struct percpu_counter *fbc)
-{
-       long ret;
-       int cpu;
-
-       spin_lock(&fbc->lock);
-       ret = fbc->count;
-       for_each_cpu(cpu) {
-               long *pcount = per_cpu_ptr(fbc->counters, cpu);
-               ret += *pcount;
-       }
-       spin_unlock(&fbc->lock);
-       return ret < 0 ? 0 : ret;
-}
-EXPORT_SYMBOL(percpu_counter_sum);
-#endif
-
 /*
  * Perform any setup for the swap system
  */
@@ -536,5 +514,7 @@ void __init swap_setup(void)
         * Right now other parts of the system means that we
         * _really_ don't want to cluster much more
         */
+#ifdef CONFIG_HOTPLUG_CPU
        hotcpu_notifier(cpu_swap_callback, 0);
+#endif
 }