#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
+#include <linux/rwsem.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
static long total_memory;
static LIST_HEAD(shrinker_list);
-static DECLARE_MUTEX(shrinker_sem);
+static DECLARE_RWSEM(shrinker_rwsem);
/*
* Add a shrinker callback to be called from the vm
shrinker->shrinker = theshrinker;
shrinker->seeks = seeks;
shrinker->nr = 0;
- down(&shrinker_sem);
+ down_write(&shrinker_rwsem);
list_add(&shrinker->list, &shrinker_list);
- up(&shrinker_sem);
+ up_write(&shrinker_rwsem);
}
return shrinker;
}
*/
void remove_shrinker(struct shrinker *shrinker)
{
- down(&shrinker_sem);
+ down_write(&shrinker_rwsem);
list_del(&shrinker->list);
- up(&shrinker_sem);
+ up_write(&shrinker_rwsem);
kfree(shrinker);
}
EXPORT_SYMBOL(remove_shrinker);
-
+
#define SHRINK_BATCH 128
/*
* Call the shrink functions to age shrinkable caches
{
struct shrinker *shrinker;
- if (down_trylock(&shrinker_sem))
+ if (scanned == 0)
+ scanned = SWAP_CLUSTER_MAX;
+
+ if (!down_read_trylock(&shrinker_rwsem))
return 0;
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
+ unsigned long total_scan;
delta = (4 * scanned) / shrinker->seeks;
delta *= (*shrinker->shrinker)(0, gfp_mask);
if (shrinker->nr < 0)
shrinker->nr = LONG_MAX; /* It wrapped! */
- if (shrinker->nr <= SHRINK_BATCH)
- continue;
- while (shrinker->nr) {
- long this_scan = shrinker->nr;
+ total_scan = shrinker->nr;
+ shrinker->nr = 0;
+
+ while (total_scan >= SHRINK_BATCH) {
+ long this_scan = SHRINK_BATCH;
int shrink_ret;
- if (this_scan > 128)
- this_scan = 128;
shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
- mod_page_state(slabs_scanned, this_scan);
- shrinker->nr -= this_scan;
if (shrink_ret == -1)
break;
+ mod_page_state(slabs_scanned, this_scan);
+ total_scan -= this_scan;
+
cond_resched();
}
+
+ shrinker->nr += total_scan;
}
- up(&shrinker_sem);
+ up_read(&shrinker_rwsem);
return 0;
}
-/* Must be called with page's rmap lock held. */
+/* Called without lock on whether page is mapped, so answer is unstable */
static inline int page_mapping_inuse(struct page *page)
{
struct address_space *mapping;
if (page_mapped(page) || PageSwapCache(page))
sc->nr_scanned++;
- page_map_lock(page);
- referenced = page_referenced(page);
- if (referenced && page_mapping_inuse(page)) {
- /* In active use or really unfreeable. Activate it. */
- page_map_unlock(page);
+ referenced = page_referenced(page, 1);
+ /* In active use or really unfreeable? Activate it. */
+ if (referenced && page_mapping_inuse(page))
goto activate_locked;
- }
#ifdef CONFIG_SWAP
/*
* Anonymous process memory has backing store?
* Try to allocate it some swap space here.
- *
- * XXX: implement swap clustering ?
*/
if (PageAnon(page) && !PageSwapCache(page)) {
- page_map_unlock(page);
if (!add_to_swap(page))
goto activate_locked;
- page_map_lock(page);
}
#endif /* CONFIG_SWAP */
if (page_mapped(page) && mapping) {
switch (try_to_unmap(page)) {
case SWAP_FAIL:
- page_map_unlock(page);
goto activate_locked;
case SWAP_AGAIN:
- page_map_unlock(page);
goto keep_locked;
case SWAP_SUCCESS:
; /* try to free the page below */
}
}
- page_map_unlock(page);
if (PageDirty(page)) {
if (referenced)
page = lru_to_page(&l_hold);
list_del(&page->lru);
if (page_mapped(page)) {
- if (!reclaim_mapped) {
- list_add(&page->lru, &l_active);
- continue;
- }
- page_map_lock(page);
- if (page_referenced(page)) {
- page_map_unlock(page);
+ if (!reclaim_mapped ||
+ (total_swap_pages == 0 && PageAnon(page)) ||
+ page_referenced(page, 0)) {
list_add(&page->lru, &l_active);
continue;
}
- page_map_unlock(page);
- }
- /*
- * FIXME: need to consider page_count(page) here if/when we
- * reap orphaned pages via the LRU (Daniel's locking stuff)
- */
- if (total_swap_pages == 0 && PageAnon(page)) {
- list_add(&page->lru, &l_active);
- continue;
}
list_add(&page->lru, &l_inactive);
}
for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
+ if (zone->present_pages == 0)
+ continue;
+
zone->temp_priority = sc->priority;
if (zone->prev_priority > sc->priority)
zone->prev_priority = sc->priority;
static int balance_pgdat(pg_data_t *pgdat, int nr_pages)
{
int to_free = nr_pages;
+ int all_zones_ok;
int priority;
int i;
- int total_scanned = 0, total_reclaimed = 0;
+ int total_scanned, total_reclaimed;
struct reclaim_state *reclaim_state = current->reclaim_state;
struct scan_control sc;
+loop_again:
+ total_scanned = 0;
+ total_reclaimed = 0;
sc.gfp_mask = GFP_KERNEL;
sc.may_writepage = 0;
sc.nr_mapped = read_page_state(nr_mapped);
}
for (priority = DEF_PRIORITY; priority >= 0; priority--) {
- int all_zones_ok = 1;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
unsigned long lru_pages = 0;
+ all_zones_ok = 1;
+
if (nr_pages == 0) {
/*
* Scan in the highmem->dma direction for the highest
for (i = pgdat->nr_zones - 1; i >= 0; i--) {
struct zone *zone = pgdat->node_zones + i;
+ if (zone->present_pages == 0)
+ continue;
+
if (zone->all_unreclaimable &&
priority != DEF_PRIORITY)
continue;
for (i = 0; i <= end_zone; i++) {
struct zone *zone = pgdat->node_zones + i;
+ if (zone->present_pages == 0)
+ continue;
+
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue;
total_reclaimed += sc.nr_reclaimed;
if (zone->all_unreclaimable)
continue;
- if (zone->pages_scanned > zone->present_pages * 2)
+ if (zone->pages_scanned >= (zone->nr_active +
+ zone->nr_inactive) * 4)
zone->all_unreclaimable = 1;
/*
* If we've done a decent amount of scanning and
*/
if (total_scanned && priority < DEF_PRIORITY - 2)
blk_congestion_wait(WRITE, HZ/10);
+
+ /*
+ * We do this so kswapd doesn't build up large priorities for
+ * example when it is freeing in parallel with allocators. It
+ * matches the direct reclaim path behaviour in terms of impact
+ * on zone->*_priority.
+ */
+ if (total_reclaimed >= SWAP_CLUSTER_MAX)
+ break;
}
out:
for (i = 0; i < pgdat->nr_zones; i++) {
zone->prev_priority = zone->temp_priority;
}
+ if (!all_zones_ok) {
+ cond_resched();
+ goto loop_again;
+ }
+
return total_reclaimed;
}
*/
void wakeup_kswapd(struct zone *zone)
{
+ if (zone->present_pages == 0)
+ return;
if (zone->free_pages > zone->pages_low)
return;
if (!waitqueue_active(&zone->zone_pgdat->kswapd_wait))
swap_setup();
for_each_pgdat(pgdat)
pgdat->kswapd
- = find_task_by_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
+ = find_task_by_real_pid(kernel_thread(kswapd, pgdat, CLONE_KERNEL));
total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;