#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
+#include <linux/buffer_head.h>
-void default_unplug_io_fn(struct backing_dev_info *bdi)
+void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
{
}
EXPORT_SYMBOL(default_unplug_io_fn);
struct backing_dev_info default_backing_dev_info = {
.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
.state = 0,
+ .capabilities = BDI_CAP_MAP_COPY,
.unplug_io_fn = default_unplug_io_fn,
};
EXPORT_SYMBOL_GPL(default_backing_dev_info);
/*
- * Initialise a struct file's readahead state
+ * Initialise a struct file's readahead state. Assumes that the caller has
+ * memset *ra to zero.
*/
void
file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
{
- memset(ra, 0, sizeof(*ra));
ra->ra_pages = mapping->backing_dev_info->ra_pages;
- ra->average = ra->ra_pages / 2;
+ ra->prev_page = -1;
}
-EXPORT_SYMBOL(file_ra_state_init);
/*
* Return max readahead size for this inode in number-of-pages.
return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
}
+static inline void reset_ahead_window(struct file_ra_state *ra)
+{
+ /*
+ * ... but preserve ahead_start + ahead_size value,
+ * see 'recheck:' label in page_cache_readahead().
+ * Note: We never use ->ahead_size as rvalue without
+ * checking ->ahead_start != 0 first.
+ */
+ ra->ahead_size += ra->ahead_start;
+ ra->ahead_start = 0;
+}
+
+static inline void ra_off(struct file_ra_state *ra)
+{
+ ra->start = 0;
+ ra->flags = 0;
+ ra->size = 0;
+ reset_ahead_window(ra);
+ return;
+}
+
+/*
+ * Set the initial window size, round to next power of 2 and square
+ * for small size, x 4 for medium, and x 2 for large
+ * for 128k (32 page) max ra
+ * 1-8 page = 32k initial, > 8 page = 128k initial
+ */
+static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
+{
+ unsigned long newsize = roundup_pow_of_two(size);
+
+ if (newsize <= max / 32)
+ newsize = newsize * 4;
+ else if (newsize <= max / 4)
+ newsize = newsize * 2;
+ else
+ newsize = max;
+ return newsize;
+}
+
+/*
+ * Set the new window size, this is called only when I/O is to be submitted,
+ * not for each call to readahead. If a cache miss occured, reduce next I/O
+ * size, else increase depending on how close to max we are.
+ */
+static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
+{
+ unsigned long max = get_max_readahead(ra);
+ unsigned long min = get_min_readahead(ra);
+ unsigned long cur = ra->size;
+ unsigned long newsize;
+
+ if (ra->flags & RA_FLAG_MISS) {
+ ra->flags &= ~RA_FLAG_MISS;
+ newsize = max((cur - 2), min);
+ } else if (cur < max / 16) {
+ newsize = 4 * cur;
+ } else {
+ newsize = 2 * cur;
+ }
+ return min(newsize, max);
+}
+
#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
+/*
+ * see if a page needs releasing upon read_cache_pages() failure
+ * - the caller of read_cache_pages() may have set PG_private before calling,
+ * such as the NFS fs marking pages that are cached locally on disk, thus we
+ * need to give the fs a chance to clean up in the event of an error
+ */
+static void read_cache_pages_release_page(struct address_space *mapping,
+ struct page *page)
+{
+ if (PagePrivate(page)) {
+ if (TestSetPageLocked(page))
+ BUG();
+ page->mapping = mapping;
+ try_to_release_page(page, GFP_KERNEL);
+ page->mapping = NULL;
+ unlock_page(page);
+ }
+ page_cache_release(page);
+}
+
/**
- * read_cache_pages - populate an address space with some pages, and
- * start reads against them.
+ * read_cache_pages - populate an address space with some pages & start reads against them
* @mapping: the address_space
* @pages: The address of a list_head which contains the target pages. These
* pages have their ->index populated and are otherwise uninitialised.
* Hides the details of the LRU cache etc from the filesystems.
*/
int read_cache_pages(struct address_space *mapping, struct list_head *pages,
- int (*filler)(void *, struct page *), void *data)
+ int (*filler)(void *, struct page *), void *data)
{
struct page *page;
struct pagevec lru_pvec;
page = list_to_page(pages);
list_del(&page->lru);
if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
- page_cache_release(page);
+ read_cache_pages_release_page(mapping, page);
continue;
}
ret = filler(data, page);
victim = list_to_page(pages);
list_del(&victim->lru);
- page_cache_release(victim);
+ read_cache_pages_release_page(mapping, victim);
}
break;
}
{
unsigned page_idx;
struct pagevec lru_pvec;
- int ret = 0;
+ int ret;
if (mapping->a_ops->readpages) {
ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
mapping->a_ops->readpage(filp, page);
if (!pagevec_add(&lru_pvec, page))
__pagevec_lru_add(&lru_pvec);
- } else {
+ } else
page_cache_release(page);
- }
}
pagevec_lru_add(&lru_pvec);
+ ret = 0;
out:
return ret;
}
* size: Number of pages in that read
* Together, these form the "current window".
* Together, start and size represent the `readahead window'.
- * next_size: The number of pages to read on the next readahead miss.
- * Has the magical value -1UL if readahead has been disabled.
* prev_page: The page which the readahead algorithm most-recently inspected.
- * prev_page is mainly an optimisation: if page_cache_readahead
- * sees that it is again being called for a page which it just
- * looked at, it can return immediately without making any state
- * changes.
+ * It is mainly used to detect sequential file reading.
+ * If page_cache_readahead sees that it is again being called for
+ * a page which it just looked at, it can return immediately without
+ * making any state changes.
* ahead_start,
* ahead_size: Together, these form the "ahead window".
* ra_pages: The externally controlled max readahead for this fd.
*
- * When readahead is in the "maximally shrunk" state (next_size == -1UL),
- * readahead is disabled. In this state, prev_page and size are used, inside
- * handle_ra_miss(), to detect the resumption of sequential I/O. Once there
- * has been a decent run of sequential I/O (defined by get_min_readahead),
- * readahead is reenabled.
+ * When readahead is in the off state (size == 0), readahead is disabled.
+ * In this state, prev_page is used to detect the resumption of sequential I/O.
*
* The readahead code manages two windows - the "current" and the "ahead"
* windows. The intent is that while the application is walking the pages
* in the current window, I/O is underway on the ahead window. When the
* current window is fully traversed, it is replaced by the ahead window
* and the ahead window is invalidated. When this copying happens, the
- * new current window's pages are probably still locked. When I/O has
- * completed, we submit a new batch of I/O, creating a new ahead window.
+ * new current window's pages are probably still locked. So
+ * we submit a new batch of I/O immediately, creating a new ahead window.
*
* So:
*
* ahead window.
*
* A `readahead hit' occurs when a read request is made against a page which is
- * inside the current window. Hits are good, and the window size (next_size)
- * is grown aggressively when hits occur. Two pages are added to the next
- * window size on each hit, which will end up doubling the next window size by
- * the time I/O is submitted for it.
- *
- * If readahead hits are more sparse (say, the application is only reading
- * every second page) then the window will build more slowly.
+ * the next sequential page. Ahead window calculations are done only when it
+ * is time to submit a new IO. The code ramps up the size agressively at first,
+ * but slow down as it approaches max_readhead.
*
- * On a readahead miss (the application seeked away) the readahead window is
- * shrunk by 25%. We don't want to drop it too aggressively, because it is a
- * good assumption that an application which has built a good readahead window
- * will continue to perform linear reads. Either at the new file position, or
- * at the old one after another seek.
- *
- * After enough misses, readahead is fully disabled. (next_size = -1UL).
+ * Any seek/ramdom IO will result in readahead being turned off. It will resume
+ * at the first sequential access.
*
* There is a special-case: if the first page which the application tries to
* read happens to be the first page of the file, it is assumed that a linear
- * read is about to happen and the window is immediately set to half of the
- * device maximum.
- *
- * A page request at (start + size) is not a miss at all - it's just a part of
- * sequential file reading.
+ * read is about to happen and the window is immediately set to the initial size
+ * based on I/O request size and the max_readahead.
*
- * This function is to be called for every page which is read, rather than when
- * it is time to perform readahead. This is so the readahead algorithm can
- * centrally work out the access patterns. This could be costly with many tiny
- * read()s, so we specifically optimise for that case with prev_page.
+ * This function is to be called for every read request, rather than when
+ * it is time to perform readahead. It is called only once for the entire I/O
+ * regardless of size unless readahead is unable to start enough I/O to satisfy
+ * the request (I/O request > max_readahead).
*/
/*
* behaviour which would occur if page allocations are causing VM writeback.
* We really don't want to intermingle reads and writes like that.
*
- * Returns the number of pages which actually had IO started against them.
+ * Returns the number of pages requested, or the maximum amount of I/O allowed.
+ *
+ * do_page_cache_readahead() returns -1 if it encountered request queue
+ * congestion.
*/
-static inline int
+static int
__do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
struct inode *inode = mapping->host;
struct page *page;
/*
* Preallocate as many pages as we will need.
*/
- spin_lock_irq(&mapping->tree_lock);
+ read_lock_irq(&mapping->tree_lock);
for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
- unsigned long page_offset = offset + page_idx;
+ pgoff_t page_offset = offset + page_idx;
if (page_offset > end_index)
break;
if (page)
continue;
- spin_unlock_irq(&mapping->tree_lock);
+ read_unlock_irq(&mapping->tree_lock);
page = page_cache_alloc_cold(mapping);
- spin_lock_irq(&mapping->tree_lock);
+ read_lock_irq(&mapping->tree_lock);
if (!page)
break;
page->index = page_offset;
list_add(&page->lru, &page_pool);
ret++;
}
- spin_unlock_irq(&mapping->tree_lock);
+ read_unlock_irq(&mapping->tree_lock);
/*
* Now start the IO. We ignore I/O errors - if the page is not
* memory at once.
*/
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
int ret = 0;
return ret;
}
+/*
+ * Check how effective readahead is being. If the amount of started IO is
+ * less than expected then the file is partly or fully in pagecache and
+ * readahead isn't helping.
+ *
+ */
+static inline int check_ra_success(struct file_ra_state *ra,
+ unsigned long nr_to_read, unsigned long actual)
+{
+ if (actual == 0) {
+ ra->cache_hit += nr_to_read;
+ if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
+ ra_off(ra);
+ ra->flags |= RA_FLAG_INCACHE;
+ return 0;
+ }
+ } else {
+ ra->cache_hit=0;
+ }
+ return 1;
+}
+
/*
* This version skips the IO if the queue is read-congested, and will tell the
* block layer to abandon the readahead if request allocation would block.
* request queues.
*/
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
- unsigned long offset, unsigned long nr_to_read)
+ pgoff_t offset, unsigned long nr_to_read)
{
- if (!bdi_read_congested(mapping->backing_dev_info))
- return __do_page_cache_readahead(mapping, filp,
- offset, nr_to_read);
- return 0;
+ if (bdi_read_congested(mapping->backing_dev_info))
+ return -1;
+
+ return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
}
/*
- * Check how effective readahead is being. If the amount of started IO is
- * less than expected then the file is partly or fully in pagecache and
- * readahead isn't helping. Shrink the window.
- *
- * But don't shrink it too much - the application may read the same page
- * occasionally.
+ * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
+ * is set wait till the read completes. Otherwise attempt to read without
+ * blocking.
+ * Returns 1 meaning 'success' if read is successful without switching off
+ * readahead mode. Otherwise return failure.
*/
-static inline void
-check_ra_success(struct file_ra_state *ra, pgoff_t attempt,
- pgoff_t actual, pgoff_t orig_next_size)
+static int
+blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t offset, unsigned long nr_to_read,
+ struct file_ra_state *ra, int block)
{
- if (actual == 0) {
- if (orig_next_size > 1) {
- ra->next_size = orig_next_size - 1;
- if (ra->ahead_size)
- ra->ahead_size = ra->next_size;
- } else {
- ra->next_size = -1UL;
- ra->size = 0;
- }
+ int actual;
+
+ if (!block && bdi_read_congested(mapping->backing_dev_info))
+ return 0;
+
+ actual = __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
+
+ return check_ra_success(ra, nr_to_read, actual);
+}
+
+static int make_ahead_window(struct address_space *mapping, struct file *filp,
+ struct file_ra_state *ra, int force)
+{
+ int block, ret;
+
+ ra->ahead_size = get_next_ra_size(ra);
+ ra->ahead_start = ra->start + ra->size;
+
+ block = force || (ra->prev_page >= ra->ahead_start);
+ ret = blockable_page_cache_readahead(mapping, filp,
+ ra->ahead_start, ra->ahead_size, ra, block);
+
+ if (!ret && !force) {
+ /* A read failure in blocking mode, implies pages are
+ * all cached. So we can safely assume we have taken
+ * care of all the pages requested in this call.
+ * A read failure in non-blocking mode, implies we are
+ * reading more pages than requested in this call. So
+ * we safely assume we have taken care of all the pages
+ * requested in this call.
+ *
+ * Just reset the ahead window in case we failed due to
+ * congestion. The ahead window will any way be closed
+ * in case we failed due to excessive page cache hits.
+ */
+ reset_ahead_window(ra);
}
+
+ return ret;
}
-/*
- * page_cache_readahead is the main function. If performs the adaptive
+/**
+ * page_cache_readahead - generic adaptive readahead
+ * @mapping: address_space which holds the pagecache and I/O vectors
+ * @ra: file_ra_state which holds the readahead state
+ * @filp: passed on to ->readpage() and ->readpages()
+ * @offset: start offset into @mapping, in PAGE_CACHE_SIZE units
+ * @req_size: hint: total size of the read which the caller is performing in
+ * PAGE_CACHE_SIZE units
+ *
+ * page_cache_readahead() is the main function. If performs the adaptive
* readahead window size management and submits the readahead I/O.
+ *
+ * Note that @filp is purely used for passing on to the ->readpage[s]()
+ * handler: it may refer to a different file from @mapping (so we may not use
+ * @filp->f_mapping or @filp->f_dentry->d_inode here).
+ * Also, @ra may not be equal to &@filp->f_ra.
+ *
*/
-void
+unsigned long
page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
- struct file *filp, unsigned long offset)
+ struct file *filp, pgoff_t offset, unsigned long req_size)
{
- unsigned max;
- unsigned min;
- unsigned orig_next_size;
- unsigned actual;
- int first_access=0;
- unsigned long preoffset=0;
+ unsigned long max, newsize;
+ int sequential;
/*
- * Here we detect the case where the application is performing
- * sub-page sized reads. We avoid doing extra work and bogusly
- * perturbing the readahead window expansion logic.
- * If next_size is zero, this is the very first read for this
- * file handle, or the window is maximally shrunk.
+ * We avoid doing extra work and bogusly perturbing the readahead
+ * window expansion logic.
*/
- if (offset == ra->prev_page) {
- if (ra->next_size != 0)
- goto out;
- }
+ if (offset == ra->prev_page && --req_size)
+ ++offset;
- if (ra->next_size == -1UL)
- goto out; /* Maximally shrunk */
+ /* Note that prev_page == -1 if it is a first read */
+ sequential = (offset == ra->prev_page + 1);
+ ra->prev_page = offset;
max = get_max_readahead(ra);
- if (max == 0)
- goto out; /* No readahead */
+ newsize = min(req_size, max);
+
+ /* No readahead or sub-page sized read or file already in cache */
+ if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE))
+ goto out;
- min = get_min_readahead(ra);
- orig_next_size = ra->next_size;
+ ra->prev_page += newsize - 1;
+
+ /*
+ * Special case - first read at start of file. We'll assume it's
+ * a whole-file read and grow the window fast. Or detect first
+ * sequential access
+ */
+ if (sequential && ra->size == 0) {
+ ra->size = get_init_ra_size(newsize, max);
+ ra->start = offset;
+ if (!blockable_page_cache_readahead(mapping, filp, offset,
+ ra->size, ra, 1))
+ goto out;
- if (ra->next_size == 0) {
/*
- * Special case - first read.
- * We'll assume it's a whole-file read, and
- * grow the window fast.
+ * If the request size is larger than our max readahead, we
+ * at least want to be sure that we get 2 IOs in flight and
+ * we know that we will definitly need the new I/O.
+ * once we do this, subsequent calls should be able to overlap
+ * IOs,* thus preventing stalls. so issue the ahead window
+ * immediately.
*/
- first_access=1;
- ra->next_size = max / 2;
- ra->prev_page = offset;
- ra->serial_cnt++;
- goto do_io;
- }
+ if (req_size >= max)
+ make_ahead_window(mapping, filp, ra, 1);
- if (offset == ra->prev_page + 1) {
- if (ra->serial_cnt <= (max * 2))
- ra->serial_cnt++;
- } else {
- ra->average = (ra->average + ra->serial_cnt) / 2;
- ra->serial_cnt = 1;
+ goto out;
}
- preoffset = ra->prev_page;
- ra->prev_page = offset;
- if (offset >= ra->start && offset <= (ra->start + ra->size)) {
- /*
- * A readahead hit. Either inside the window, or one
- * page beyond the end. Expand the next readahead size.
- */
- ra->next_size += 2;
- } else {
- /*
- * A miss - lseek, pagefault, pread, etc. Shrink the readahead
- * window.
- */
- ra->next_size -= 2;
+ /*
+ * Now handle the random case:
+ * partial page reads and first access were handled above,
+ * so this must be the next page otherwise it is random
+ */
+ if (!sequential) {
+ ra_off(ra);
+ blockable_page_cache_readahead(mapping, filp, offset,
+ newsize, ra, 1);
+ goto out;
}
- if ((long)ra->next_size > (long)max)
- ra->next_size = max;
- if ((long)ra->next_size <= 0L) {
- ra->next_size = -1UL;
- ra->size = 0;
- goto out; /* Readahead is off */
+ /*
+ * If we get here we are doing sequential IO and this was not the first
+ * occurence (ie we have an existing window)
+ */
+ if (ra->ahead_start == 0) { /* no ahead window yet */
+ if (!make_ahead_window(mapping, filp, ra, 0))
+ goto recheck;
}
/*
- * Is this request outside the current window?
+ * Already have an ahead window, check if we crossed into it.
+ * If so, shift windows and issue a new ahead window.
+ * Only return the #pages that are in the current window, so that
+ * we get called back on the first page of the ahead window which
+ * will allow us to submit more IO.
*/
- if (offset < ra->start || offset >= (ra->start + ra->size)) {
- /*
- * A miss against the current window. Have we merely
- * advanced into the ahead window?
- */
- if (offset == ra->ahead_start) {
- /*
- * Yes, we have. The ahead window now becomes
- * the current window.
- */
- ra->start = ra->ahead_start;
- ra->size = ra->ahead_size;
- ra->prev_page = ra->start;
- ra->ahead_start = 0;
- ra->ahead_size = 0;
-
- /*
- * Control now returns, probably to sleep until I/O
- * completes against the first ahead page.
- * When the second page in the old ahead window is
- * requested, control will return here and more I/O
- * will be submitted to build the new ahead window.
- */
- goto out;
- }
-do_io:
- /*
- * This is the "unusual" path. We come here during
- * startup or after an lseek. We invalidate the
- * ahead window and get some I/O underway for the new
- * current window.
- */
- if (!first_access && preoffset >= ra->start &&
- preoffset < (ra->start + ra->size)) {
- /* Heuristic: If 'n' pages were
- * accessed in the current window, there
- * is a high probability that around 'n' pages
- * shall be used in the next current window.
- *
- * To minimize lazy-readahead triggered
- * in the next current window, read in
- * an extra page.
- */
- ra->next_size = preoffset - ra->start + 2;
- }
- ra->start = offset;
- ra->size = ra->next_size;
- ra->ahead_start = 0; /* Invalidate these */
- ra->ahead_size = 0;
- actual = do_page_cache_readahead(mapping, filp, offset,
- ra->size);
- if(!first_access) {
- /*
- * do not adjust the readahead window size the first
- * time, the ahead window might get closed if all
- * the pages are already in the cache.
- */
- check_ra_success(ra, ra->size, actual, orig_next_size);
- }
- } else {
- /*
- * This read request is within the current window. It may be
- * time to submit I/O for the ahead window while the
- * application is about to step into the ahead window.
- */
- if (ra->ahead_start == 0) {
- /*
- * if the average io-size is less than maximum
- * readahead size of the file the io pattern is
- * sequential. Hence bring in the readahead window
- * immediately.
- * Else the i/o pattern is random. Bring
- * in the readahead window only if the last page of
- * the current window is accessed (lazy readahead).
- */
- unsigned long average = ra->average;
-
- if (ra->serial_cnt > average)
- average = (ra->serial_cnt + ra->average) / 2;
-
- if ((average >= max) || (offset == (ra->start +
- ra->size - 1))) {
- ra->ahead_start = ra->start + ra->size;
- ra->ahead_size = ra->next_size;
- actual = do_page_cache_readahead(mapping, filp,
- ra->ahead_start, ra->ahead_size);
- check_ra_success(ra, ra->ahead_size,
- actual, orig_next_size);
- }
- }
+ if (ra->prev_page >= ra->ahead_start) {
+ ra->start = ra->ahead_start;
+ ra->size = ra->ahead_size;
+ make_ahead_window(mapping, filp, ra, 0);
+recheck:
+ /* prev_page shouldn't overrun the ahead window */
+ ra->prev_page = min(ra->prev_page,
+ ra->ahead_start + ra->ahead_size - 1);
}
+
out:
- return;
+ return ra->prev_page + 1;
}
-
+EXPORT_SYMBOL_GPL(page_cache_readahead);
/*
* handle_ra_miss() is called when it is known that a page which should have
* been present in the pagecache (we just did some readahead there) was in fact
* not found. This will happen if it was evicted by the VM (readahead
- * thrashing) or if the readahead window is maximally shrunk.
- *
- * If the window has been maximally shrunk (next_size == -1UL) then look to see
- * if we are getting misses against sequential file offsets. If so, and this
- * persists then resume readahead.
+ * thrashing)
*
- * Otherwise we're thrashing, so shrink the readahead window by three pages.
- * This is because it is grown by two pages on a readahead hit. Theory being
- * that the readahead window size will stabilise around the maximum level at
- * which there is no thrashing.
+ * Turn on the cache miss flag in the RA struct, this will cause the RA code
+ * to reduce the RA size on the next read.
*/
void handle_ra_miss(struct address_space *mapping,
struct file_ra_state *ra, pgoff_t offset)
{
- if (ra->next_size == -1UL) {
- const unsigned long max = get_max_readahead(ra);
-
- if (offset != ra->prev_page + 1) {
- ra->size = ra->size?ra->size-1:0; /* Not sequential */
- } else {
- ra->size++; /* A sequential read */
- if (ra->size >= max) { /* Resume readahead */
- ra->start = offset - max;
- ra->next_size = max;
- ra->size = max;
- ra->ahead_start = 0;
- ra->ahead_size = 0;
- }
- }
- ra->prev_page = offset;
- } else {
- const unsigned long min = get_min_readahead(ra);
-
- ra->next_size -= 3;
- if (ra->next_size < min)
- ra->next_size = min;
- }
+ ra->flags |= RA_FLAG_MISS;
+ ra->flags &= ~RA_FLAG_INCACHE;
+ ra->cache_hit = 0;
}
/*
unsigned long inactive;
unsigned long free;
- get_zone_counts(&active, &inactive, &free);
+ __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
return min(nr, (inactive + free) / 2);
}