X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Fvmscan.c;h=840092d46a90244a5a0ad23aedde2bdf030ab928;hb=af7a3c4bdf86ef01814776f94aaca3f671fcbb6e;hp=e7244bc6e4ee79b95453adf33abaf57f9998cd0d;hpb=ec9397bab20a628530ce3051167d3d0fcc2c1af7;p=linux-2.6.git diff --git a/mm/vmscan.c b/mm/vmscan.c index e7244bc6e..840092d46 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include /* for try_to_release_page(), buffer_heads_over_limit */ @@ -31,58 +30,37 @@ #include #include #include +#include #include #include +#include #include #include #include -#include -#ifndef AT_LIMIT_SUPPORT -#warning "ckrm_at_limit disabled due to problems with memory hog tests -- seting ckrm_shrink_list_empty to true" -#undef ckrm_shrink_list_empty -#define ckrm_shrink_list_empty() (1) -#endif - -/* possible outcome of pageout() */ -typedef enum { - /* failed to write page out, page is locked */ - PAGE_KEEP, - /* move page to the active list, page is locked */ - PAGE_ACTIVATE, - /* page has been sent to the disk successfully, page is unlocked */ - PAGE_SUCCESS, - /* page is clean and locked */ - PAGE_CLEAN, -} pageout_t; +#include "internal.h" struct scan_control { - /* Ask refill_inactive_zone, or shrink_cache to scan this many pages */ - unsigned long nr_to_scan; - /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; - /* Incremented by the number of pages reclaimed */ - unsigned long nr_reclaimed; - unsigned long nr_mapped; /* From page_state */ - /* How many pages shrink_cache() should reclaim */ - int nr_to_reclaim; - - /* Ask shrink_caches, or shrink_zone to scan at this priority */ - unsigned int priority; - /* This context's GFP mask */ - unsigned int gfp_mask; - - /* Flag used by CKRM */ - unsigned int ckrm_flags; + gfp_t gfp_mask; int may_writepage; + + /* Can pages be swapped as part of reclaim? */ + int may_swap; + + /* This context's SWAP_CLUSTER_MAX. If freeing memory for + * suspend, we effectively ignore SWAP_CLUSTER_MAX. + * In this context, it doesn't matter that we scan the + * whole list at once. */ + int swap_cluster_max; }; /* @@ -148,7 +126,7 @@ struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker) shrinker->seeks = seeks; shrinker->nr = 0; down_write(&shrinker_rwsem); - list_add(&shrinker->list, &shrinker_list); + list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); } return shrinker; @@ -184,28 +162,43 @@ EXPORT_SYMBOL(remove_shrinker); * `lru_pages' represents the number of on-LRU pages in all the zones which * are eligible for the caller's allocation attempt. It is used for balancing * slab reclaim versus page reclaim. + * + * Returns the number of slab objects which we shrunk. */ -static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, +unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, unsigned long lru_pages) { struct shrinker *shrinker; + unsigned long ret = 0; if (scanned == 0) scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) - return 0; + return 1; /* Assume we'll be able to shrink next time */ list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; unsigned long total_scan; + unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); delta = (4 * scanned) / shrinker->seeks; - delta *= (*shrinker->shrinker)(0, gfp_mask); + delta *= max_pass; do_div(delta, lru_pages + 1); shrinker->nr += delta; - if (shrinker->nr < 0) - shrinker->nr = LONG_MAX; /* It wrapped! */ + if (shrinker->nr < 0) { + printk(KERN_ERR "%s: nr=%ld\n", + __FUNCTION__, shrinker->nr); + shrinker->nr = max_pass; + } + + /* + * Avoid risking looping forever due to too large nr value: + * never try to free more than twice the estimate number of + * freeable entries. + */ + if (shrinker->nr > max_pass * 2) + shrinker->nr = max_pass * 2; total_scan = shrinker->nr; shrinker->nr = 0; @@ -213,10 +206,14 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, while (total_scan >= SHRINK_BATCH) { long this_scan = SHRINK_BATCH; int shrink_ret; + int nr_before; + nr_before = (*shrinker->shrinker)(0, gfp_mask); shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask); if (shrink_ret == -1) break; + if (shrink_ret < nr_before) + ret += nr_before - shrink_ret; mod_page_state(slabs_scanned, this_scan); total_scan -= this_scan; @@ -226,7 +223,7 @@ static int shrink_slab(unsigned long scanned, unsigned int gfp_mask, shrinker->nr += total_scan; } up_read(&shrinker_rwsem); - return 0; + return ret; } /* Called without lock on whether page is mapped, so answer is unstable */ @@ -257,9 +254,7 @@ static inline int is_page_cache_freeable(struct page *page) static int may_write_to_queue(struct backing_dev_info *bdi) { - if (current_is_kswapd()) - return 1; - if (current_is_pdflush()) /* This is unlikely, but why not... */ + if (current->flags & PF_SWAPWRITE) return 1; if (!bdi_write_congested(bdi)) return 1; @@ -294,9 +289,10 @@ static void handle_write_error(struct address_space *mapping, } /* - * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). + * pageout is called by shrink_page_list() for each dirty page. + * Calls ->writepage(). */ -static pageout_t pageout(struct page *page, struct address_space *mapping) +pageout_t pageout(struct page *page, struct address_space *mapping) { /* * If the page is dirty, only perform writeback if that write @@ -317,8 +313,20 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) */ if (!is_page_cache_freeable(page)) return PAGE_KEEP; - if (!mapping) + if (!mapping) { + /* + * Some data journaling orphaned pages can have + * page->mapping == NULL while being dirty with clean buffers. + */ + if (PagePrivate(page)) { + if (try_to_free_buffers(page)) { + ClearPageDirty(page); + printk("%s: orphaned page\n", __FUNCTION__); + return PAGE_CLEAN; + } + } return PAGE_KEEP; + } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_queue(mapping->backing_dev_info)) @@ -337,7 +345,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); - if (res == WRITEPAGE_ACTIVATE) { + if (res == AOP_WRITEPAGE_ACTIVATE) { ClearPageReclaim(page); return PAGE_ACTIVATE; } @@ -352,15 +360,53 @@ static pageout_t pageout(struct page *page, struct address_space *mapping) return PAGE_CLEAN; } +int remove_mapping(struct address_space *mapping, struct page *page) +{ + if (!mapping) + return 0; /* truncate got there first */ + + write_lock_irq(&mapping->tree_lock); + + /* + * The non-racy check for busy page. It is critical to check + * PageDirty _after_ making sure that the page is freeable and + * not in use by anybody. (pagecache + us == 2) + */ + if (unlikely(page_count(page) != 2)) + goto cannot_free; + smp_rmb(); + if (unlikely(PageDirty(page))) + goto cannot_free; + + if (PageSwapCache(page)) { + swp_entry_t swap = { .val = page_private(page) }; + __delete_from_swap_cache(page); + write_unlock_irq(&mapping->tree_lock); + swap_free(swap); + __put_page(page); /* The pagecache ref */ + return 1; + } + + __remove_from_page_cache(page); + write_unlock_irq(&mapping->tree_lock); + __put_page(page); + return 1; + +cannot_free: + write_unlock_irq(&mapping->tree_lock); + return 0; +} + /* - * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed + * shrink_page_list() returns the number of reclaimed pages */ -static int shrink_list(struct list_head *page_list, struct scan_control *sc) +static unsigned long shrink_page_list(struct list_head *page_list, + struct scan_control *sc) { LIST_HEAD(ret_pages); struct pagevec freed_pvec; int pgactivate = 0; - int reclaimed = 0; + unsigned long nr_reclaimed = 0; cond_resched(); @@ -381,15 +427,19 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) BUG_ON(PageActive(page)); - if (PageWriteback(page)) + sc->nr_scanned++; + + if (!sc->may_swap && page_mapped(page)) goto keep_locked; - sc->nr_scanned++; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; - referenced = page_referenced(page, 1, sc->priority <= 0); + if (PageWriteback(page)) + goto keep_locked; + + referenced = page_referenced(page, 1); /* In active use or really unfreeable? Activate it. */ if (referenced && page_mapping_inuse(page)) goto activate_locked; @@ -399,10 +449,9 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ - if (PageAnon(page) && !PageSwapCache(page)) { - if (!add_to_swap(page)) + if (PageAnon(page) && !PageSwapCache(page)) + if (!add_to_swap(page, GFP_ATOMIC)) goto activate_locked; - } #endif /* CONFIG_SWAP */ mapping = page_mapping(page); @@ -414,7 +463,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { - switch (try_to_unmap(page)) { + switch (try_to_unmap(page, 0)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -429,7 +478,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) goto keep_locked; if (!may_enter_fs) goto keep_locked; - if (laptop_mode && !sc->may_writepage) + if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ @@ -483,39 +532,12 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) goto free_it; } - if (!mapping) - goto keep_locked; /* truncate got there first */ - - spin_lock_irq(&mapping->tree_lock); - - /* - * The non-racy check for busy page. It is critical to check - * PageDirty _after_ making sure that the page is freeable and - * not in use by anybody. (pagecache + us == 2) - */ - if (page_count(page) != 2 || PageDirty(page)) { - spin_unlock_irq(&mapping->tree_lock); + if (!remove_mapping(mapping, page)) goto keep_locked; - } - -#ifdef CONFIG_SWAP - if (PageSwapCache(page)) { - swp_entry_t swap = { .val = page->private }; - __delete_from_swap_cache(page); - spin_unlock_irq(&mapping->tree_lock); - swap_free(swap); - __put_page(page); /* The pagecache ref */ - goto free_it; - } -#endif /* CONFIG_SWAP */ - - __remove_from_page_cache(page); - spin_unlock_irq(&mapping->tree_lock); - __put_page(page); free_it: unlock_page(page); - reclaimed++; + nr_reclaimed++; if (!pagevec_add(&freed_pvec, page)) __pagevec_release_nonlru(&freed_pvec); continue; @@ -533,98 +555,112 @@ keep: if (pagevec_count(&freed_pvec)) __pagevec_release_nonlru(&freed_pvec); mod_page_state(pgactivate, pgactivate); - sc->nr_reclaimed += reclaimed; - return reclaimed; + return nr_reclaimed; } /* - * zone->lru_lock is heavily contented. We relieve it by quickly privatising - * a batch of pages and working on them outside the lock. Any pages which were - * not freed will be added back to the LRU. + * zone->lru_lock is heavily contended. Some of the functions that + * shrink the lists perform better by taking out a batch of pages + * and working on them outside the LRU lock. + * + * For pagecache intensive workloads, this function is the hottest + * spot in the kernel (apart from copy_*_user functions). * - * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed + * Appropriate locks must be held before calling this function. * - * For pagecache intensive workloads, the first loop here is the hottest spot - * in the kernel (apart from the copy_*_user functions). + * @nr_to_scan: The number of pages to look through on the list. + * @src: The LRU list to pull pages off. + * @dst: The temp list to put pages on to. + * @scanned: The number of pages that were scanned. + * + * returns how many pages were moved onto *@dst. + */ +static unsigned long isolate_lru_pages(unsigned long nr_to_scan, + struct list_head *src, struct list_head *dst, + unsigned long *scanned) +{ + unsigned long nr_taken = 0; + struct page *page; + unsigned long scan; + + for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { + struct list_head *target; + page = lru_to_page(src); + prefetchw_prev_lru_page(page, src, flags); + + BUG_ON(!PageLRU(page)); + + list_del(&page->lru); + target = src; + if (likely(get_page_unless_zero(page))) { + /* + * Be careful not to clear PageLRU until after we're + * sure the page is not being freed elsewhere -- the + * page release code relies on it. + */ + ClearPageLRU(page); + target = dst; + nr_taken++; + } /* else it is being freed elsewhere */ + + list_add(&page->lru, target); + } + + *scanned = scan; + return nr_taken; +} + +/* + * shrink_inactive_list() is a helper for shrink_zone(). It returns the number + * of reclaimed pages */ -static void shrink_cache(struct zone *zone, struct scan_control *sc) +static unsigned long shrink_inactive_list(unsigned long max_scan, + struct zone *zone, struct scan_control *sc) { LIST_HEAD(page_list); struct pagevec pvec; - int max_scan = sc->nr_to_scan, nr_pass; - unsigned int ckrm_flags = sc->ckrm_flags, bit_flag; + unsigned long nr_scanned = 0; + unsigned long nr_reclaimed = 0; pagevec_init(&pvec, 1); lru_add_drain(); spin_lock_irq(&zone->lru_lock); -redo: - ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag); - nr_pass = zone->nr_inactive; - while (max_scan > 0) { + do { struct page *page; - int nr_taken = 0; - int nr_scan = 0; - int nr_freed; - - while (nr_pass-- && nr_scan++ < SWAP_CLUSTER_MAX && - !list_empty(&zone->inactive_list)) { - page = lru_to_page(&zone->inactive_list); - - prefetchw_prev_lru_page(page, - &zone->inactive_list, flags); + unsigned long nr_taken; + unsigned long nr_scan; + unsigned long nr_freed; - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It is being freed elsewhere - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, &zone->inactive_list); - continue; - } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) { - __put_page(page); - SetPageLRU(page); -#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE - list_add_tail(&page->lru, &zone->inactive_list); -#else - list_add(&page->lru, &zone->inactive_list); -#endif - continue; - } - list_add(&page->lru, &page_list); - ckrm_mem_dec_inactive(page); - nr_taken++; - } + nr_taken = isolate_lru_pages(sc->swap_cluster_max, + &zone->inactive_list, + &page_list, &nr_scan); zone->nr_inactive -= nr_taken; - zone->pages_scanned += nr_taken; + zone->pages_scanned += nr_scan; spin_unlock_irq(&zone->lru_lock); - if ((bit_flag == 0) && (nr_taken == 0)) + nr_scanned += nr_scan; + nr_freed = shrink_page_list(&page_list, sc); + nr_reclaimed += nr_freed; + local_irq_disable(); + if (current_is_kswapd()) { + __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); + __mod_page_state(kswapd_steal, nr_freed); + } else + __mod_page_state_zone(zone, pgscan_direct, nr_scan); + __mod_page_state_zone(zone, pgsteal, nr_freed); + + if (nr_taken == 0) goto done; - max_scan -= nr_scan; - if (current_is_kswapd()) - mod_page_state_zone(zone, pgscan_kswapd, nr_scan); - else - mod_page_state_zone(zone, pgscan_direct, nr_scan); - nr_freed = shrink_list(&page_list, sc); - if (current_is_kswapd()) - mod_page_state(kswapd_steal, nr_freed); - mod_page_state_zone(zone, pgsteal, nr_freed); - sc->nr_to_reclaim -= nr_freed; - - spin_lock_irq(&zone->lru_lock); + spin_lock(&zone->lru_lock); /* * Put back any unfreeable pages. */ while (!list_empty(&page_list)) { page = lru_to_page(&page_list); - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); list_del(&page->lru); if (PageActive(page)) add_page_to_active_list(zone, page); @@ -636,13 +672,12 @@ redo: spin_lock_irq(&zone->lru_lock); } } - if (ckrm_flags && (nr_pass <= 0)) { - goto redo; - } - } - spin_unlock_irq(&zone->lru_lock); + } while (nr_scanned < max_scan); + spin_unlock(&zone->lru_lock); done: + local_irq_enable(); pagevec_release(&pvec); + return nr_reclaimed; } /* @@ -662,100 +697,67 @@ done: * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ -static void -refill_inactive_zone(struct zone *zone, struct scan_control *sc) +static void shrink_active_list(unsigned long nr_pages, struct zone *zone, + struct scan_control *sc) { - int pgmoved; + unsigned long pgmoved; int pgdeactivate = 0; - int pgscanned = 0; - int nr_pages = sc->nr_to_scan; + unsigned long pgscanned; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_inactive); /* Pages to go onto the inactive_list */ LIST_HEAD(l_active); /* Pages to go onto the active_list */ struct page *page; struct pagevec pvec; int reclaim_mapped = 0; - long mapped_ratio; - long distress; - long swap_tendency; - unsigned int ckrm_flags = sc->ckrm_flags, bit_flag; - int nr_pass; - lru_add_drain(); - pgmoved = 0; - spin_lock_irq(&zone->lru_lock); -redo: - ckrm_get_reclaim_bits(&ckrm_flags, &bit_flag); - nr_pass = zone->nr_active; - while (pgscanned < nr_pages && !list_empty(&zone->active_list) && - nr_pass) { - page = lru_to_page(&zone->active_list); - prefetchw_prev_lru_page(page, &zone->active_list, flags); - if (!TestClearPageLRU(page)) - BUG(); - list_del(&page->lru); - if (get_page_testone(page)) { - /* - * It was already free! release_pages() or put_page() - * are about to remove it from the LRU and free it. So - * put the refcount back and put the page back on the - * LRU - */ - __put_page(page); - SetPageLRU(page); - list_add(&page->lru, &zone->active_list); - pgscanned++; - } else if (bit_flag && !ckrm_kick_page(page, bit_flag)) { - __put_page(page); - SetPageLRU(page); -#ifdef CONFIG_CKRM_MEM_LRUORDER_CHANGE - list_add_tail(&page->lru, &zone->active_list); -#else - list_add(&page->lru, &zone->active_list); -#endif - } else { - list_add(&page->lru, &l_hold); - ckrm_mem_dec_active(page); - pgmoved++; - pgscanned++; - } - if (!--nr_pass && ckrm_flags) { - goto redo; - } - } - zone->nr_active -= pgmoved; - spin_unlock_irq(&zone->lru_lock); + if (sc->may_swap) { + long mapped_ratio; + long distress; + long swap_tendency; - /* - * `distress' is a measure of how much trouble we're having reclaiming - * pages. 0 -> no problems. 100 -> great trouble. - */ - distress = 100 >> zone->prev_priority; + /* + * `distress' is a measure of how much trouble we're having + * reclaiming pages. 0 -> no problems. 100 -> great trouble. + */ + distress = 100 >> zone->prev_priority; - /* - * The point of this algorithm is to decide when to start reclaiming - * mapped memory instead of just pagecache. Work out how much memory - * is mapped. - */ - mapped_ratio = (sc->nr_mapped * 100) / total_memory; + /* + * The point of this algorithm is to decide when to start + * reclaiming mapped memory instead of just pagecache. Work out + * how much memory + * is mapped. + */ + mapped_ratio = (sc->nr_mapped * 100) / total_memory; - /* - * Now decide how much we really want to unmap some pages. The mapped - * ratio is downgraded - just because there's a lot of mapped memory - * doesn't necessarily mean that page reclaim isn't succeeding. - * - * The distress ratio is important - we don't want to start going oom. - * - * A 100% value of vm_swappiness overrides this algorithm altogether. - */ - swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; + /* + * Now decide how much we really want to unmap some pages. The + * mapped ratio is downgraded - just because there's a lot of + * mapped memory doesn't necessarily mean that page reclaim + * isn't succeeding. + * + * The distress ratio is important - we don't want to start + * going oom. + * + * A 100% value of vm_swappiness overrides this algorithm + * altogether. + */ + swap_tendency = mapped_ratio / 2 + distress + vm_swappiness; - /* - * Now use this metric to decide whether to start moving mapped memory - * onto the inactive list. - */ - if (swap_tendency >= 100) - reclaim_mapped = 1; + /* + * Now use this metric to decide whether to start moving mapped + * memory onto the inactive list. + */ + if (swap_tendency >= 100) + reclaim_mapped = 1; + } + + lru_add_drain(); + spin_lock_irq(&zone->lru_lock); + pgmoved = isolate_lru_pages(nr_pages, &zone->active_list, + &l_hold, &pgscanned); + zone->pages_scanned += pgscanned; + zone->nr_active -= pgmoved; + spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); @@ -764,7 +766,7 @@ redo: if (page_mapped(page)) { if (!reclaim_mapped || (total_swap_pages == 0 && PageAnon(page)) || - page_referenced(page, 0, sc->priority <= 0)) { + page_referenced(page, 0)) { list_add(&page->lru, &l_active); continue; } @@ -778,12 +780,12 @@ redo: while (!list_empty(&l_inactive)) { page = lru_to_page(&l_inactive); prefetchw_prev_lru_page(page, &l_inactive, flags); - if (TestSetPageLRU(page)) - BUG(); - if (!TestClearPageActive(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); + BUG_ON(!PageActive(page)); + ClearPageActive(page); + list_move(&page->lru, &zone->inactive_list); - ckrm_mem_inc_inactive(page); pgmoved++; if (!pagevec_add(&pvec, page)) { zone->nr_inactive += pgmoved; @@ -808,11 +810,10 @@ redo: while (!list_empty(&l_active)) { page = lru_to_page(&l_active); prefetchw_prev_lru_page(page, &l_active, flags); - if (TestSetPageLRU(page)) - BUG(); + BUG_ON(PageLRU(page)); + SetPageLRU(page); BUG_ON(!PageActive(page)); list_move(&page->lru, &zone->active_list); - ckrm_mem_inc_active(page); pgmoved++; if (!pagevec_add(&pvec, page)) { zone->nr_active += pgmoved; @@ -823,171 +824,68 @@ redo: } } zone->nr_active += pgmoved; - spin_unlock_irq(&zone->lru_lock); - pagevec_release(&pvec); + spin_unlock(&zone->lru_lock); + + __mod_page_state_zone(zone, pgrefill, pgscanned); + __mod_page_state(pgdeactivate, pgdeactivate); + local_irq_enable(); - mod_page_state_zone(zone, pgrefill, pgscanned); - mod_page_state(pgdeactivate, pgdeactivate); + pagevec_release(&pvec); } /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ -static void -shrink_zone(struct zone *zone, struct scan_control *sc) +static unsigned long shrink_zone(int priority, struct zone *zone, + struct scan_control *sc) { unsigned long nr_active; unsigned long nr_inactive; + unsigned long nr_to_scan; + unsigned long nr_reclaimed = 0; + + atomic_inc(&zone->reclaim_in_progress); /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. */ - zone->nr_scan_active += (zone->nr_active >> sc->priority) + 1; + zone->nr_scan_active += (zone->nr_active >> priority) + 1; nr_active = zone->nr_scan_active; - if (nr_active >= SWAP_CLUSTER_MAX) + if (nr_active >= sc->swap_cluster_max) zone->nr_scan_active = 0; else nr_active = 0; - zone->nr_scan_inactive += (zone->nr_inactive >> sc->priority) + 1; + zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1; nr_inactive = zone->nr_scan_inactive; - if (nr_inactive >= SWAP_CLUSTER_MAX) + if (nr_inactive >= sc->swap_cluster_max) zone->nr_scan_inactive = 0; else nr_inactive = 0; - sc->nr_to_reclaim = SWAP_CLUSTER_MAX; - while (nr_active || nr_inactive) { - sc->ckrm_flags = ckrm_setup_reclamation(); if (nr_active) { - sc->nr_to_scan = min(nr_active, - (unsigned long)SWAP_CLUSTER_MAX); - nr_active -= sc->nr_to_scan; - refill_inactive_zone(zone, sc); + nr_to_scan = min(nr_active, + (unsigned long)sc->swap_cluster_max); + nr_active -= nr_to_scan; + shrink_active_list(nr_to_scan, zone, sc); } if (nr_inactive) { - sc->nr_to_scan = min(nr_inactive, - (unsigned long)SWAP_CLUSTER_MAX); - nr_inactive -= sc->nr_to_scan; - shrink_cache(zone, sc); - if (sc->nr_to_reclaim <= 0) - break; + nr_to_scan = min(nr_inactive, + (unsigned long)sc->swap_cluster_max); + nr_inactive -= nr_to_scan; + nr_reclaimed += shrink_inactive_list(nr_to_scan, zone, + sc); } - ckrm_teardown_reclamation(); - } -} - -#if defined(CONFIG_CKRM_RES_MEM) && defined(AT_LIMIT_SUPPORT) -// This function needs to be given more thought. -// Shrink the class to be at 90% of its limit -static void -ckrm_shrink_class(ckrm_mem_res_t *cls) -{ - struct scan_control sc; - struct zone *zone; - int zindex = 0, active_credit = 0, inactive_credit = 0; - - if (ckrm_test_set_shrink(cls)) { // set the SHRINK bit atomically - // if it is already set somebody is working on it. so... leave - return; } - sc.nr_mapped = read_page_state(nr_mapped); - sc.nr_scanned = 0; - sc.ckrm_flags = ckrm_get_reclaim_flags(cls); - sc.nr_reclaimed = 0; - sc.priority = 0; // always very high priority - - for_each_zone(zone) { - int zone_total, zone_limit, active_limit, inactive_limit; - int active_over, inactive_over; - unsigned long nr_active, nr_inactive; - u64 temp; - - zone->temp_priority = zone->prev_priority; - zone->prev_priority = sc.priority; - - zone_total = zone->nr_active + zone->nr_inactive + zone->free_pages; - - temp = (u64) cls->pg_limit * zone_total; - do_div(temp, ckrm_tot_lru_pages); - zone_limit = (int) temp; - active_limit = (6 * zone_limit) / 10; // 2/3rd in active list - inactive_limit = (3 * zone_limit) / 10; // 1/3rd in inactive list - - active_over = cls->nr_active[zindex] - active_limit + active_credit; - inactive_over = active_over + - (cls->nr_inactive[zindex] - inactive_limit) + inactive_credit; - - if (active_over > 0) { - zone->nr_scan_active += active_over + 1; - nr_active = zone->nr_scan_active; - active_credit = 0; - } else { - active_credit += active_over; - nr_active = 0; - } - if (inactive_over > 0) { - zone->nr_scan_inactive += inactive_over; - nr_inactive = zone->nr_scan_inactive; - inactive_credit = 0; - } else { - inactive_credit += inactive_over; - nr_inactive = 0; - } - while (nr_active || nr_inactive) { - if (nr_active) { - sc.nr_to_scan = min(nr_active, - (unsigned long)SWAP_CLUSTER_MAX); - nr_active -= sc.nr_to_scan; - refill_inactive_zone(zone, &sc); - } - - if (nr_inactive) { - sc.nr_to_scan = min(nr_inactive, - (unsigned long)SWAP_CLUSTER_MAX); - nr_inactive -= sc.nr_to_scan; - shrink_cache(zone, &sc); - if (sc.nr_to_reclaim <= 0) - break; - } - } - zone->prev_priority = zone->temp_priority; - zindex++; - } - ckrm_clear_shrink(cls); -} - -static void -ckrm_shrink_classes(void) -{ - ckrm_mem_res_t *cls; - - spin_lock(&ckrm_mem_lock); - while (!ckrm_shrink_list_empty()) { - cls = list_entry(ckrm_shrink_list.next, ckrm_mem_res_t, - shrink_list); - spin_unlock(&ckrm_mem_lock); - ckrm_shrink_class(cls); - spin_lock(&ckrm_mem_lock); - list_del(&cls->shrink_list); - cls->flags &= ~MEM_AT_LIMIT; - } - spin_unlock(&ckrm_mem_lock); throttle_vm_writeout(); -} - -#else -#if defined(CONFIG_CKRM_RES_MEM) && !defined(AT_LIMIT_SUPPORT) -#warning "disabling ckrm_at_limit -- setting ckrm_shrink_classes to noop " -#endif - -#define ckrm_shrink_classes() do { } while(0) -#endif + atomic_dec(&zone->reclaim_in_progress); + return nr_reclaimed; +} /* * This is the direct reclaim path, for page-allocating processes. We only @@ -1005,26 +903,31 @@ ckrm_shrink_classes(void) * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. */ -static void -shrink_caches(struct zone **zones, struct scan_control *sc) +static unsigned long shrink_zones(int priority, struct zone **zones, + struct scan_control *sc) { + unsigned long nr_reclaimed = 0; int i; for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; - zone->temp_priority = sc->priority; - if (zone->prev_priority > sc->priority) - zone->prev_priority = sc->priority; + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + continue; + + zone->temp_priority = priority; + if (zone->prev_priority > priority) + zone->prev_priority = priority; - if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) + if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - shrink_zone(zone, sc); + nr_reclaimed += shrink_zone(priority, zone, sc); } + return nr_reclaimed; } /* @@ -1040,25 +943,30 @@ shrink_caches(struct zone **zones, struct scan_control *sc) * holds filesystem locks which prevent writeout this might not work, and the * allocation attempt will fail. */ -int try_to_free_pages(struct zone **zones, - unsigned int gfp_mask, unsigned int order) +unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) { int priority; int ret = 0; - int total_scanned = 0, total_reclaimed = 0; + unsigned long total_scanned = 0; + unsigned long nr_reclaimed = 0; struct reclaim_state *reclaim_state = current->reclaim_state; - struct scan_control sc; unsigned long lru_pages = 0; int i; - - sc.gfp_mask = gfp_mask; - sc.may_writepage = 0; + struct scan_control sc = { + .gfp_mask = gfp_mask, + .may_writepage = !laptop_mode, + .swap_cluster_max = SWAP_CLUSTER_MAX, + .may_swap = 1, + }; inc_page_state(allocstall); for (i = 0; zones[i] != NULL; i++) { struct zone *zone = zones[i]; + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + continue; + zone->temp_priority = DEF_PRIORITY; lru_pages += zone->nr_active + zone->nr_inactive; } @@ -1066,20 +974,19 @@ int try_to_free_pages(struct zone **zones, for (priority = DEF_PRIORITY; priority >= 0; priority--) { sc.nr_mapped = read_page_state(nr_mapped); sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - sc.priority = priority; - shrink_caches(zones, &sc); + if (!priority) + disable_swap_token(); + nr_reclaimed += shrink_zones(priority, zones, &sc); shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); if (reclaim_state) { - sc.nr_reclaimed += reclaim_state->reclaimed_slab; + nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } - if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) { + total_scanned += sc.nr_scanned; + if (nr_reclaimed >= sc.swap_cluster_max) { ret = 1; goto out; } - total_scanned += sc.nr_scanned; - total_reclaimed += sc.nr_reclaimed; /* * Try to write back as many pages as we just scanned. This @@ -1088,8 +995,9 @@ int try_to_free_pages(struct zone **zones, * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ - if (total_scanned > SWAP_CLUSTER_MAX + SWAP_CLUSTER_MAX/2) { - wakeup_bdflush(laptop_mode ? 0 : total_scanned); + if (total_scanned > sc.swap_cluster_max + + sc.swap_cluster_max / 2) { + wakeup_pdflush(laptop_mode ? 0 : total_scanned); sc.may_writepage = 1; } @@ -1097,11 +1005,15 @@ int try_to_free_pages(struct zone **zones, if (sc.nr_scanned && priority < DEF_PRIORITY - 2) blk_congestion_wait(WRITE, HZ/10); } - if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) - out_of_memory(gfp_mask); out: - for (i = 0; zones[i] != 0; i++) - zones[i]->prev_priority = zones[i]->temp_priority; + for (i = 0; zones[i] != 0; i++) { + struct zone *zone = zones[i]; + + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) + continue; + + zone->prev_priority = zone->temp_priority; + } return ret; } @@ -1130,21 +1042,26 @@ out: * the page allocator fallback scheme to ensure that aging of pages is balanced * across the zones. */ -static int balance_pgdat(pg_data_t *pgdat, int nr_pages) +static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, + int order) { - int to_free = nr_pages; + unsigned long to_free = nr_pages; int all_zones_ok; int priority; int i; - int total_scanned, total_reclaimed; + unsigned long total_scanned; + unsigned long nr_reclaimed; struct reclaim_state *reclaim_state = current->reclaim_state; - struct scan_control sc; + struct scan_control sc = { + .gfp_mask = GFP_KERNEL, + .may_swap = 1, + .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX, + }; loop_again: total_scanned = 0; - total_reclaimed = 0; - sc.gfp_mask = GFP_KERNEL; - sc.may_writepage = 0; + nr_reclaimed = 0; + sc.may_writepage = !laptop_mode; sc.nr_mapped = read_page_state(nr_mapped); inc_page_state(pageoutrun); @@ -1159,6 +1076,10 @@ loop_again: int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ unsigned long lru_pages = 0; + /* The swap token gets in the way of swapout... */ + if (!priority) + disable_swap_token(); + all_zones_ok = 1; if (nr_pages == 0) { @@ -1169,14 +1090,15 @@ loop_again: for (i = pgdat->nr_zones - 1; i >= 0; i--) { struct zone *zone = pgdat->node_zones + i; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; - if (zone->free_pages <= zone->pages_high) { + if (!zone_watermark_ok(zone, order, + zone->pages_high, 0, 0)) { end_zone = i; goto scan; } @@ -1203,32 +1125,33 @@ scan: */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; + int nr_slab; - if (zone->present_pages == 0) + if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && priority != DEF_PRIORITY) continue; if (nr_pages == 0) { /* Not software suspend */ - if (zone->free_pages <= zone->pages_high) + if (!zone_watermark_ok(zone, order, + zone->pages_high, end_zone, 0)) all_zones_ok = 0; } zone->temp_priority = priority; if (zone->prev_priority > priority) zone->prev_priority = priority; sc.nr_scanned = 0; - sc.nr_reclaimed = 0; - sc.priority = priority; - shrink_zone(zone, &sc); + nr_reclaimed += shrink_zone(priority, zone, &sc); reclaim_state->reclaimed_slab = 0; - shrink_slab(sc.nr_scanned, GFP_KERNEL, lru_pages); - sc.nr_reclaimed += reclaim_state->reclaimed_slab; - total_reclaimed += sc.nr_reclaimed; + nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, + lru_pages); + nr_reclaimed += reclaim_state->reclaimed_slab; + total_scanned += sc.nr_scanned; if (zone->all_unreclaimable) continue; - if (zone->pages_scanned >= (zone->nr_active + - zone->nr_inactive) * 4) + if (nr_slab == 0 && zone->pages_scanned >= + (zone->nr_active + zone->nr_inactive) * 4) zone->all_unreclaimable = 1; /* * If we've done a decent amount of scanning and @@ -1236,10 +1159,10 @@ scan: * even in laptop mode */ if (total_scanned > SWAP_CLUSTER_MAX * 2 && - total_scanned > total_reclaimed+total_reclaimed/2) + total_scanned > nr_reclaimed + nr_reclaimed / 2) sc.may_writepage = 1; } - if (nr_pages && to_free > total_reclaimed) + if (nr_pages && to_free > nr_reclaimed) continue; /* swsusp: need to do more work */ if (all_zones_ok) break; /* kswapd: all done */ @@ -1256,7 +1179,7 @@ scan: * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ - if (total_reclaimed >= SWAP_CLUSTER_MAX) + if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) break; } out: @@ -1270,7 +1193,7 @@ out: goto loop_again; } - return total_reclaimed; + return nr_reclaimed; } /* @@ -1288,6 +1211,7 @@ out: */ static int kswapd(void *p) { + unsigned long order; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; DEFINE_WAIT(wait); @@ -1314,19 +1238,30 @@ static int kswapd(void *p) * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ - tsk->flags |= PF_MEMALLOC|PF_KSWAPD; + tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; + order = 0; for ( ; ; ) { - if (current->flags & PF_FREEZE) - refrigerator(PF_FREEZE); + unsigned long new_order; + + try_to_freeze(); + prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); - schedule(); + new_order = pgdat->kswapd_max_order; + pgdat->kswapd_max_order = 0; + if (order < new_order) { + /* + * Don't sleep if someone wants a larger 'order' + * allocation + */ + order = new_order; + } else { + schedule(); + order = pgdat->kswapd_max_order; + } finish_wait(&pgdat->kswapd_wait, &wait); - if (!ckrm_shrink_list_empty()) - ckrm_shrink_classes(); - else - balance_pgdat(pgdat, 0); + balance_pgdat(pgdat, 0, order); } return 0; } @@ -1334,15 +1269,23 @@ static int kswapd(void *p) /* * A zone is low on free memory, so wake its kswapd task to service it. */ -void wakeup_kswapd(struct zone *zone) +void wakeup_kswapd(struct zone *zone, int order) { - if (zone->present_pages == 0) + pg_data_t *pgdat; + + if (!populated_zone(zone)) + return; + + pgdat = zone->zone_pgdat; + if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0)) return; - if ((zone->free_pages > zone->pages_low) && ckrm_shrink_list_empty()) + if (pgdat->kswapd_max_order < order) + pgdat->kswapd_max_order = order; + if (!cpuset_zone_allowed(zone, __GFP_HARDWALL)) return; - if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait)) + if (!waitqueue_active(&pgdat->kswapd_wait)) return; - wake_up_interruptible(&zone->zone_pgdat->kswapd_wait); + wake_up_interruptible(&pgdat->kswapd_wait); } #ifdef CONFIG_PM @@ -1350,24 +1293,31 @@ void wakeup_kswapd(struct zone *zone) * Try to free `nr_pages' of memory, system-wide. Returns the number of freed * pages. */ -int shrink_all_memory(int nr_pages) +unsigned long shrink_all_memory(unsigned long nr_pages) { pg_data_t *pgdat; - int nr_to_free = nr_pages; - int ret = 0; + unsigned long nr_to_free = nr_pages; + unsigned long ret = 0; + unsigned retry = 2; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; current->reclaim_state = &reclaim_state; - for_each_pgdat(pgdat) { - int freed; - freed = balance_pgdat(pgdat, nr_to_free); +repeat: + for_each_online_pgdat(pgdat) { + unsigned long freed; + + freed = balance_pgdat(pgdat, nr_to_free, 0); ret += freed; nr_to_free -= freed; - if (nr_to_free <= 0) + if ((long)nr_to_free <= 0) break; } + if (retry-- && ret < nr_pages) { + blk_congestion_wait(WRITE, HZ/5); + goto repeat; + } current->reclaim_state = NULL; return ret; } @@ -1378,15 +1328,14 @@ int shrink_all_memory(int nr_pages) not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ -static int __devinit cpu_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static int cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) { pg_data_t *pgdat; cpumask_t mask; if (action == CPU_ONLINE) { - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { mask = node_to_cpumask(pgdat->node_id); if (any_online_cpu(mask) != NR_CPUS) /* One of our CPUs online: restore mask */ @@ -1400,13 +1349,162 @@ static int __devinit cpu_callback(struct notifier_block *nfb, static int __init kswapd_init(void) { pg_data_t *pgdat; + swap_setup(); - for_each_pgdat(pgdat) - pgdat->kswapd - = find_task_by_real_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL)); + for_each_online_pgdat(pgdat) { + pid_t pid; + + pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL); + BUG_ON(pid < 0); + read_lock(&tasklist_lock); + pgdat->kswapd = find_task_by_real_pid(pid); + read_unlock(&tasklist_lock); + } total_memory = nr_free_pagecache_pages(); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) + +#ifdef CONFIG_NUMA +/* + * Zone reclaim mode + * + * If non-zero call zone_reclaim when the number of free pages falls below + * the watermarks. + * + * In the future we may add flags to the mode. However, the page allocator + * should only have to check that zone_reclaim_mode != 0 before calling + * zone_reclaim(). + */ +int zone_reclaim_mode __read_mostly; + +#define RECLAIM_OFF 0 +#define RECLAIM_ZONE (1<<0) /* Run shrink_cache on the zone */ +#define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ +#define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ +#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ + +/* + * Mininum time between zone reclaim scans + */ +int zone_reclaim_interval __read_mostly = 30*HZ; + +/* + * Priority for ZONE_RECLAIM. This determines the fraction of pages + * of a node considered for each zone_reclaim. 4 scans 1/16th of + * a zone. + */ +#define ZONE_RECLAIM_PRIORITY 4 + +/* + * Try to free up some pages from this zone through reclaim. + */ +static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) +{ + /* Minimum pages needed in order to stay on node */ + const unsigned long nr_pages = 1 << order; + struct task_struct *p = current; + struct reclaim_state reclaim_state; + int priority; + unsigned long nr_reclaimed = 0; + struct scan_control sc = { + .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), + .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), + .nr_mapped = read_page_state(nr_mapped), + .swap_cluster_max = max_t(unsigned long, nr_pages, + SWAP_CLUSTER_MAX), + .gfp_mask = gfp_mask, + }; + + disable_swap_token(); + cond_resched(); + /* + * We need to be able to allocate from the reserves for RECLAIM_SWAP + * and we also need to be able to write out pages for RECLAIM_WRITE + * and RECLAIM_SWAP. + */ + p->flags |= PF_MEMALLOC | PF_SWAPWRITE; + reclaim_state.reclaimed_slab = 0; + p->reclaim_state = &reclaim_state; + + /* + * Free memory by calling shrink zone with increasing priorities + * until we have enough memory freed. + */ + priority = ZONE_RECLAIM_PRIORITY; + do { + nr_reclaimed += shrink_zone(priority, zone, &sc); + priority--; + } while (priority >= 0 && nr_reclaimed < nr_pages); + + if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { + /* + * shrink_slab() does not currently allow us to determine how + * many pages were freed in this zone. So we just shake the slab + * a bit and then go off node for this particular allocation + * despite possibly having freed enough memory to allocate in + * this zone. If we freed local memory then the next + * allocations will be local again. + * + * shrink_slab will free memory on all zones and may take + * a long time. + */ + shrink_slab(sc.nr_scanned, gfp_mask, order); + } + + p->reclaim_state = NULL; + current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); + + if (nr_reclaimed == 0) { + /* + * We were unable to reclaim enough pages to stay on node. We + * now allow off node accesses for a certain time period before + * trying again to reclaim pages from the local zone. + */ + zone->last_unsuccessful_zone_reclaim = jiffies; + } + + return nr_reclaimed >= nr_pages; +} + +int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) +{ + cpumask_t mask; + int node_id; + + /* + * Do not reclaim if there was a recent unsuccessful attempt at zone + * reclaim. In that case we let allocations go off node for the + * zone_reclaim_interval. Otherwise we would scan for each off-node + * page allocation. + */ + if (time_before(jiffies, + zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) + return 0; + + /* + * Avoid concurrent zone reclaims, do not reclaim in a zone that does + * not have reclaimable pages and if we should not delay the allocation + * then do not scan. + */ + if (!(gfp_mask & __GFP_WAIT) || + zone->all_unreclaimable || + atomic_read(&zone->reclaim_in_progress) > 0 || + (current->flags & PF_MEMALLOC)) + return 0; + + /* + * Only run zone reclaim on the local zone or on zones that do not + * have associated processors. This will favor the local processor + * over remote processors and spread off node memory allocations + * as wide as possible. + */ + node_id = zone->zone_pgdat->node_id; + mask = node_to_cpumask(node_id); + if (!cpus_empty(mask) && node_id != numa_node_id()) + return 0; + return __zone_reclaim(zone, gfp_mask, order); +} +#endif