X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fswap.c;h=687686a61f7cb81cc75cd65a636ade65eb00d08d;hb=e0ff8aa1acd079b70e796571917ae0449b7c465b;hp=7771d2803f62a40402409733cadaebf1a72ead45;hpb=9bf4aaab3e101692164d49b7ca357651eb691cb6;p=linux-2.6.git diff --git a/mm/swap.c b/mm/swap.c index 7771d2803..687686a61 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -34,25 +34,45 @@ /* How many pages do we try to swap or page in/out together? */ int page_cluster; -#ifdef CONFIG_HUGETLB_PAGE - -void put_page(struct page *page) +static void put_compound_page(struct page *page) { - if (unlikely(PageCompound(page))) { - page = (struct page *)page->private; - if (put_page_testzero(page)) { - void (*dtor)(struct page *page); + page = (struct page *)page_private(page); + if (put_page_testzero(page)) { + void (*dtor)(struct page *page); - dtor = (void (*)(struct page *))page[1].mapping; - (*dtor)(page); - } - return; + dtor = (void (*)(struct page *))page[1].lru.next; + (*dtor)(page); } - if (!PageReserved(page) && put_page_testzero(page)) +} + +void put_page(struct page *page) +{ + if (unlikely(PageCompound(page))) + put_compound_page(page); + else if (put_page_testzero(page)) __page_cache_release(page); } EXPORT_SYMBOL(put_page); -#endif + +/** + * put_pages_list(): release a list of pages + * + * Release a list of pages which are strung together on page.lru. Currently + * used by read_cache_pages() and related error recovery code. + * + * @pages: list of pages threaded on page->lru + */ +void put_pages_list(struct list_head *pages) +{ + while (!list_empty(pages)) { + struct page *victim; + + victim = list_entry(pages->prev, struct page, lru); + list_del(&victim->lru); + page_cache_release(victim); + } +} +EXPORT_SYMBOL(put_pages_list); /* * Writeback is about to end against a page which has been marked for immediate @@ -86,9 +106,8 @@ int rotate_reclaimable_page(struct page *page) zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); if (PageLRU(page) && !PageActive(page)) { - list_del(&page->lru); - list_add_tail(&page->lru, &zone->inactive_list); - inc_page_state(pgrotated); + list_move_tail(&page->lru, &zone->inactive_list); + __count_vm_event(PGROTATED); } if (!test_clear_page_writeback(page)) BUG(); @@ -108,7 +127,7 @@ void fastcall activate_page(struct page *page) del_page_from_inactive_list(zone, page); SetPageActive(page); add_page_to_active_list(zone, page); - inc_page_state(pgactivate); + __count_vm_event(PGACTIVATE); } spin_unlock_irq(&zone->lru_lock); } @@ -159,37 +178,68 @@ void fastcall lru_cache_add_active(struct page *page) put_cpu_var(lru_add_active_pvecs); } -void lru_add_drain(void) +static void __lru_add_drain(int cpu) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); + struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); + /* CPU is dead, so no locking needed. */ if (pagevec_count(pvec)) __pagevec_lru_add(pvec); - pvec = &__get_cpu_var(lru_add_active_pvecs); + pvec = &per_cpu(lru_add_active_pvecs, cpu); if (pagevec_count(pvec)) __pagevec_lru_add_active(pvec); - put_cpu_var(lru_add_pvecs); } +void lru_add_drain(void) +{ + __lru_add_drain(get_cpu()); + put_cpu(); +} + +#ifdef CONFIG_NUMA +static void lru_add_drain_per_cpu(void *dummy) +{ + lru_add_drain(); +} + +/* + * Returns 0 for success + */ +int lru_add_drain_all(void) +{ + return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); +} + +#else + +/* + * Returns 0 for success + */ +int lru_add_drain_all(void) +{ + lru_add_drain(); + return 0; +} +#endif + /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ void fastcall __page_cache_release(struct page *page) { - unsigned long flags; - struct zone *zone = page_zone(page); + if (PageLRU(page)) { + unsigned long flags; + struct zone *zone = page_zone(page); - spin_lock_irqsave(&zone->lru_lock, flags); - if (TestClearPageLRU(page)) + spin_lock_irqsave(&zone->lru_lock, flags); + BUG_ON(!PageLRU(page)); + __ClearPageLRU(page); del_page_from_lru(zone, page); - if (page_count(page) != 0) - page = NULL; - spin_unlock_irqrestore(&zone->lru_lock, flags); - if (page) - free_hot_page(page); + spin_unlock_irqrestore(&zone->lru_lock, flags); + } + free_hot_page(page); } - EXPORT_SYMBOL(__page_cache_release); /* @@ -213,28 +263,40 @@ void release_pages(struct page **pages, int nr, int cold) pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; - struct zone *pagezone; - - if (PageReserved(page) || !put_page_testzero(page)) - continue; - pagezone = page_zone(page); - if (pagezone != zone) { - if (zone) + if (unlikely(PageCompound(page))) { + if (zone) { spin_unlock_irq(&zone->lru_lock); - zone = pagezone; - spin_lock_irq(&zone->lru_lock); + zone = NULL; + } + put_compound_page(page); + continue; } - if (TestClearPageLRU(page)) + + if (!put_page_testzero(page)) + continue; + + if (PageLRU(page)) { + struct zone *pagezone = page_zone(page); + if (pagezone != zone) { + if (zone) + spin_unlock_irq(&zone->lru_lock); + zone = pagezone; + spin_lock_irq(&zone->lru_lock); + } + BUG_ON(!PageLRU(page)); + __ClearPageLRU(page); del_page_from_lru(zone, page); - if (page_count(page) == 0) { - if (!pagevec_add(&pages_to_free, page)) { + } + + if (!pagevec_add(&pages_to_free, page)) { + if (zone) { spin_unlock_irq(&zone->lru_lock); - __pagevec_free(&pages_to_free); - pagevec_reinit(&pages_to_free); - zone = NULL; /* No lock is held */ + zone = NULL; } - } + __pagevec_free(&pages_to_free); + pagevec_reinit(&pages_to_free); + } } if (zone) spin_unlock_irq(&zone->lru_lock); @@ -259,6 +321,8 @@ void __pagevec_release(struct pagevec *pvec) pagevec_reinit(pvec); } +EXPORT_SYMBOL(__pagevec_release); + /* * pagevec_release() for pages which are known to not be on the LRU * @@ -270,7 +334,6 @@ void __pagevec_release_nonlru(struct pagevec *pvec) struct pagevec pages_to_free; pagevec_init(&pages_to_free, pvec->cold); - pages_to_free.cold = pvec->cold; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; @@ -301,8 +364,8 @@ void __pagevec_lru_add(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); add_page_to_inactive_list(zone, page); } if (zone) @@ -328,10 +391,10 @@ void __pagevec_lru_add_active(struct pagevec *pvec) zone = pagezone; spin_lock_irq(&zone->lru_lock); } - if (TestSetPageLRU(page)) - BUG(); - if (TestSetPageActive(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); + BUG_ON(PageActive(page)); + SetPageActive(page); add_page_to_active_list(zone, page); } if (zone) @@ -351,7 +414,8 @@ void pagevec_strip(struct pagevec *pvec) struct page *page = pvec->pages[i]; if (PagePrivate(page) && !TestSetPageLocked(page)) { - try_to_release_page(page, 0); + if (PagePrivate(page)) + try_to_release_page(page, 0); unlock_page(page); } } @@ -380,6 +444,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, return pagevec_count(pvec); } +EXPORT_SYMBOL(pagevec_lookup); + unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { @@ -388,6 +454,7 @@ unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, return pagevec_count(pvec); } +EXPORT_SYMBOL(pagevec_lookup_tag); #ifdef CONFIG_SMP /* @@ -411,20 +478,8 @@ void vm_acct_memory(long pages) } preempt_enable(); } -EXPORT_SYMBOL(vm_acct_memory); #ifdef CONFIG_HOTPLUG_CPU -static void lru_drain_cache(unsigned int cpu) -{ - struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu); - - /* CPU is dead, so no locking needed. */ - if (pagevec_count(pvec)) - __pagevec_lru_add(pvec); - pvec = &per_cpu(lru_add_active_pvecs, cpu); - if (pagevec_count(pvec)) - __pagevec_lru_add_active(pvec); -} /* Drop the CPU's cached committed space back into the central pool. */ static int cpu_swap_callback(struct notifier_block *nfb, @@ -437,34 +492,13 @@ static int cpu_swap_callback(struct notifier_block *nfb, if (action == CPU_DEAD) { atomic_add(*committed, &vm_committed_space); *committed = 0; - lru_drain_cache((long)hcpu); + __lru_add_drain((long)hcpu); } return NOTIFY_OK; } #endif /* CONFIG_HOTPLUG_CPU */ #endif /* CONFIG_SMP */ -#ifdef CONFIG_SMP -void percpu_counter_mod(struct percpu_counter *fbc, long amount) -{ - long count; - long *pcount; - int cpu = get_cpu(); - - pcount = per_cpu_ptr(fbc->counters, cpu); - count = *pcount + amount; - if (count >= FBC_BATCH || count <= -FBC_BATCH) { - spin_lock(&fbc->lock); - fbc->count += count; - spin_unlock(&fbc->lock); - count = 0; - } - *pcount = count; - put_cpu(); -} -EXPORT_SYMBOL(percpu_counter_mod); -#endif - /* * Perform any setup for the swap system */