2 * mm/readahead.c - address_space-level file readahead.
4 * Copyright (C) 2002, Linus Torvalds
6 * 09Apr2002 akpm@zip.com.au
10 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/pagevec.h>
18 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
21 EXPORT_SYMBOL(default_unplug_io_fn);
23 struct backing_dev_info default_backing_dev_info = {
24 .ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE,
26 .unplug_io_fn = default_unplug_io_fn,
28 EXPORT_SYMBOL_GPL(default_backing_dev_info);
31 * Initialise a struct file's readahead state. Assumes that the caller has
35 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
37 ra->ra_pages = mapping->backing_dev_info->ra_pages;
42 * Return max readahead size for this inode in number-of-pages.
44 static inline unsigned long get_max_readahead(struct file_ra_state *ra)
49 static inline unsigned long get_min_readahead(struct file_ra_state *ra)
51 return (VM_MIN_READAHEAD * 1024) / PAGE_CACHE_SIZE;
54 static inline void ra_off(struct file_ra_state *ra)
65 * Set the initial window size, round to next power of 2 and square
66 * for small size, x 4 for medium, and x 2 for large
67 * for 128k (32 page) max ra
68 * 1-8 page = 32k initial, > 8 page = 128k initial
70 static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
72 unsigned long newsize = roundup_pow_of_two(size);
74 if (newsize <= max / 64)
75 newsize = newsize * newsize;
76 else if (newsize <= max / 4)
84 * Set the new window size, this is called only when I/O is to be submitted,
85 * not for each call to readahead. If a cache miss occured, reduce next I/O
86 * size, else increase depending on how close to max we are.
88 static unsigned long get_next_ra_size(unsigned long cur, unsigned long max,
89 unsigned long min, unsigned long * flags)
91 unsigned long newsize;
93 if (*flags & RA_FLAG_MISS) {
94 newsize = max((cur - 2), min);
95 *flags &= ~RA_FLAG_MISS;
96 } else if (cur < max / 16) {
101 return min(newsize, max);
104 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
107 * read_cache_pages - populate an address space with some pages, and
108 * start reads against them.
109 * @mapping: the address_space
110 * @pages: The address of a list_head which contains the target pages. These
111 * pages have their ->index populated and are otherwise uninitialised.
112 * @filler: callback routine for filling a single page.
113 * @data: private data for the callback routine.
115 * Hides the details of the LRU cache etc from the filesystems.
117 int read_cache_pages(struct address_space *mapping, struct list_head *pages,
118 int (*filler)(void *, struct page *), void *data)
121 struct pagevec lru_pvec;
124 pagevec_init(&lru_pvec, 0);
126 while (!list_empty(pages)) {
127 page = list_to_page(pages);
128 list_del(&page->lru);
129 if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
130 page_cache_release(page);
133 ret = filler(data, page);
134 if (!pagevec_add(&lru_pvec, page))
135 __pagevec_lru_add(&lru_pvec);
137 while (!list_empty(pages)) {
140 victim = list_to_page(pages);
141 list_del(&victim->lru);
142 page_cache_release(victim);
147 pagevec_lru_add(&lru_pvec);
151 EXPORT_SYMBOL(read_cache_pages);
153 static int read_pages(struct address_space *mapping, struct file *filp,
154 struct list_head *pages, unsigned nr_pages)
157 struct pagevec lru_pvec;
160 if (mapping->a_ops->readpages) {
161 ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
165 pagevec_init(&lru_pvec, 0);
166 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
167 struct page *page = list_to_page(pages);
168 list_del(&page->lru);
169 if (!add_to_page_cache(page, mapping,
170 page->index, GFP_KERNEL)) {
171 mapping->a_ops->readpage(filp, page);
172 if (!pagevec_add(&lru_pvec, page))
173 __pagevec_lru_add(&lru_pvec);
175 page_cache_release(page);
178 pagevec_lru_add(&lru_pvec);
186 * The fields in struct file_ra_state represent the most-recently-executed
189 * start: Page index at which we started the readahead
190 * size: Number of pages in that read
191 * Together, these form the "current window".
192 * Together, start and size represent the `readahead window'.
193 * next_size: The number of pages to read on the next readahead miss.
194 * Has the magical value -1UL if readahead has been disabled.
195 * prev_page: The page which the readahead algorithm most-recently inspected.
196 * prev_page is mainly an optimisation: if page_cache_readahead
197 * sees that it is again being called for a page which it just
198 * looked at, it can return immediately without making any state
201 * ahead_size: Together, these form the "ahead window".
202 * ra_pages: The externally controlled max readahead for this fd.
204 * When readahead is in the off state (size == -1UL), readahead is disabled.
205 * In this state, prev_page is used to detect the resumption of sequential I/O.
207 * The readahead code manages two windows - the "current" and the "ahead"
208 * windows. The intent is that while the application is walking the pages
209 * in the current window, I/O is underway on the ahead window. When the
210 * current window is fully traversed, it is replaced by the ahead window
211 * and the ahead window is invalidated. When this copying happens, the
212 * new current window's pages are probably still locked. So
213 * we submit a new batch of I/O immediately, creating a new ahead window.
217 * ----|----------------|----------------|-----
219 * ^ahead_start ^ahead_start+ahead_size
221 * ^ When this page is read, we submit I/O for the
224 * A `readahead hit' occurs when a read request is made against a page which is
225 * the next sequential page. Ahead windowe calculations are done only when it
226 * is time to submit a new IO. The code ramps up the size agressively at first,
227 * but slow down as it approaches max_readhead.
229 * Any seek/ramdom IO will result in readahead being turned off. It will resume
230 * at the first sequential access.
232 * There is a special-case: if the first page which the application tries to
233 * read happens to be the first page of the file, it is assumed that a linear
234 * read is about to happen and the window is immediately set to the initial size
235 * based on I/O request size and the max_readahead.
237 * A page request at (start + size) is not a miss at all - it's just a part of
238 * sequential file reading.
240 * This function is to be called for every read request, rather than when
241 * it is time to perform readahead. It is called only oce for the entire I/O
242 * regardless of size unless readahead is unable to start enough I/O to satisfy
243 * the request (I/O request > max_readahead).
247 * do_page_cache_readahead actually reads a chunk of disk. It allocates all
248 * the pages first, then submits them all for I/O. This avoids the very bad
249 * behaviour which would occur if page allocations are causing VM writeback.
250 * We really don't want to intermingle reads and writes like that.
252 * Returns the number of pages requested, or the maximum amount of I/O allowed.
254 * do_page_cache_readahead() returns -1 if it encountered request queue
258 __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
259 unsigned long offset, unsigned long nr_to_read)
261 struct inode *inode = mapping->host;
263 unsigned long end_index; /* The last page we want to read */
264 LIST_HEAD(page_pool);
267 loff_t isize = i_size_read(inode);
272 end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
275 * Preallocate as many pages as we will need.
277 spin_lock_irq(&mapping->tree_lock);
278 for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
279 unsigned long page_offset = offset + page_idx;
281 if (page_offset > end_index)
284 page = radix_tree_lookup(&mapping->page_tree, page_offset);
288 spin_unlock_irq(&mapping->tree_lock);
289 page = page_cache_alloc_cold(mapping);
290 spin_lock_irq(&mapping->tree_lock);
293 page->index = page_offset;
294 list_add(&page->lru, &page_pool);
297 spin_unlock_irq(&mapping->tree_lock);
300 * Now start the IO. We ignore I/O errors - if the page is not
301 * uptodate then the caller will launch readpage again, and
302 * will then handle the error.
305 read_pages(mapping, filp, &page_pool, ret);
306 BUG_ON(!list_empty(&page_pool));
312 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
315 int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
316 unsigned long offset, unsigned long nr_to_read)
320 if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
326 unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
328 if (this_chunk > nr_to_read)
329 this_chunk = nr_to_read;
330 err = __do_page_cache_readahead(mapping, filp,
337 offset += this_chunk;
338 nr_to_read -= this_chunk;
344 * Check how effective readahead is being. If the amount of started IO is
345 * less than expected then the file is partly or fully in pagecache and
346 * readahead isn't helping.
349 int check_ra_success(struct file_ra_state *ra, unsigned long nr_to_read,
350 unsigned long actual)
353 ra->cache_hit += nr_to_read;
354 if (ra->cache_hit >= VM_MAX_CACHE_HIT) {
356 ra->flags |= RA_FLAG_INCACHE;
366 * This version skips the IO if the queue is read-congested, and will tell the
367 * block layer to abandon the readahead if request allocation would block.
369 * force_page_cache_readahead() will ignore queue congestion and will block on
372 int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
373 unsigned long offset, unsigned long nr_to_read)
375 if (bdi_read_congested(mapping->backing_dev_info))
378 return __do_page_cache_readahead(mapping, filp, offset, nr_to_read);
382 * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
383 * is set wait till the read completes. Otherwise attempt to read without
385 * Returns 1 meaning 'success' if read is succesfull without switching off
386 * readhaead mode. Otherwise return failure.
389 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,
390 unsigned long offset, unsigned long nr_to_read,
391 struct file_ra_state *ra, int block)
396 actual = __do_page_cache_readahead(mapping, filp,
399 actual = do_page_cache_readahead(mapping, filp,
404 return check_ra_success(ra, nr_to_read, actual);
408 * page_cache_readahead is the main function. If performs the adaptive
409 * readahead window size management and submits the readahead I/O.
412 page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra,
413 struct file *filp, unsigned long offset,
414 unsigned long req_size)
416 unsigned long max, min;
417 unsigned long newsize = req_size;
421 * Here we detect the case where the application is performing
422 * sub-page sized reads. We avoid doing extra work and bogusly
423 * perturbing the readahead window expansion logic.
424 * If size is zero, there is no read ahead window so we need one
426 if (offset == ra->prev_page && req_size == 1 && ra->size != 0)
429 max = get_max_readahead(ra);
430 min = get_min_readahead(ra);
431 newsize = min(req_size, max);
433 if (newsize == 0 || (ra->flags & RA_FLAG_INCACHE)) {
435 ra->prev_page = offset;
436 goto out; /* No readahead or file already in cache */
439 * Special case - first read. We'll assume it's a whole-file read if
440 * at start of file, and grow the window fast. Or detect first
443 if ((ra->size == 0 && offset == 0) /* first io and start of file */
444 || (ra->size == -1 && ra->prev_page == offset - 1)) {
445 /* First sequential */
446 ra->prev_page = offset + newsize - 1;
447 ra->size = get_init_ra_size(newsize, max);
449 if (!blockable_page_cache_readahead(mapping, filp, offset,
454 * If the request size is larger than our max readahead, we
455 * at least want to be sure that we get 2 IOs in flight and
456 * we know that we will definitly need the new I/O.
457 * once we do this, subsequent calls should be able to overlap
458 * IOs,* thus preventing stalls. so issue the ahead window
461 if (req_size >= max) {
462 ra->ahead_size = get_next_ra_size(ra->size, max, min,
464 ra->ahead_start = ra->start + ra->size;
465 blockable_page_cache_readahead(mapping, filp,
466 ra->ahead_start, ra->ahead_size, ra, 1);
472 * Now handle the random case:
473 * partial page reads and first access were handled above,
474 * so this must be the next page otherwise it is random
476 if ((offset != (ra->prev_page+1) || (ra->size == 0))) {
478 ra->prev_page = offset + newsize - 1;
479 blockable_page_cache_readahead(mapping, filp, offset,
485 * If we get here we are doing sequential IO and this was not the first
486 * occurence (ie we have an existing window)
489 if (ra->ahead_start == 0) { /* no ahead window yet */
490 ra->ahead_size = get_next_ra_size(ra->size, max, min,
492 ra->ahead_start = ra->start + ra->size;
493 block = ((offset + newsize -1) >= ra->ahead_start);
494 if (!blockable_page_cache_readahead(mapping, filp,
495 ra->ahead_start, ra->ahead_size, ra, block)) {
496 /* A read failure in blocking mode, implies pages are
497 * all cached. So we can safely assume we have taken
498 * care of all the pages requested in this call. A read
499 * failure in non-blocking mode, implies we are reading
500 * more pages than requested in this call. So we safely
501 * assume we have taken care of all the pages requested
504 * Just reset the ahead window in case we failed due to
505 * congestion. The ahead window will any way be closed
506 * in case we failed due to exessive page cache hits.
514 * Already have an ahead window, check if we crossed into it.
515 * If so, shift windows and issue a new ahead window.
516 * Only return the #pages that are in the current window, so that
517 * we get called back on the first page of the ahead window which
518 * will allow us to submit more IO.
520 if ((offset + newsize - 1) >= ra->ahead_start) {
521 ra->start = ra->ahead_start;
522 ra->size = ra->ahead_size;
523 ra->ahead_start = ra->ahead_start + ra->ahead_size;
524 ra->ahead_size = get_next_ra_size(ra->ahead_size,
525 max, min, &ra->flags);
526 block = ((offset + newsize - 1) >= ra->ahead_start);
527 if (!blockable_page_cache_readahead(mapping, filp,
528 ra->ahead_start, ra->ahead_size, ra, block)) {
529 /* A read failure in blocking mode, implies pages are
530 * all cached. So we can safely assume we have taken
531 * care of all the pages requested in this call.
532 * A read failure in non-blocking mode, implies we are
533 * reading more pages than requested in this call. So
534 * we safely assume we have taken care of all the pages
535 * requested in this call.
537 * Just reset the ahead window in case we failed due to
538 * congestion. The ahead window will any way be closed
539 * in case we failed due to excessive page cache hits.
547 ra->prev_page = offset + newsize - 1;
552 * handle_ra_miss() is called when it is known that a page which should have
553 * been present in the pagecache (we just did some readahead there) was in fact
554 * not found. This will happen if it was evicted by the VM (readahead
557 * Turn on the cache miss flag in the RA struct, this will cause the RA code
558 * to reduce the RA size on the next read.
560 void handle_ra_miss(struct address_space *mapping,
561 struct file_ra_state *ra, pgoff_t offset)
563 ra->flags |= RA_FLAG_MISS;
564 ra->flags &= ~RA_FLAG_INCACHE;
568 * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
569 * sensible upper limit.
571 unsigned long max_sane_readahead(unsigned long nr)
573 unsigned long active;
574 unsigned long inactive;
577 __get_zone_counts(&active, &inactive, &free, NODE_DATA(numa_node_id()));
578 return min(nr, (inactive + free) / 2);