X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=mm%2Freadahead.c;h=a30e0dc60e9474413b75531b3ca727219625bdea;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=54d3393219345f0f6c593f535917caba20fcc1d2;hpb=16c70f8c1b54b61c3b951b6fb220df250fe09b32;p=linux-2.6.git diff --git a/mm/readahead.c b/mm/readahead.c index 54d339321..a30e0dc60 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -13,8 +13,8 @@ #include #include #include +#include #include -#include void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) { @@ -39,6 +39,7 @@ file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) ra->ra_pages = mapping->backing_dev_info->ra_pages; ra->prev_page = -1; } +EXPORT_SYMBOL_GPL(file_ra_state_init); /* * Return max readahead size for this inode in number-of-pages. @@ -118,26 +119,6 @@ static inline unsigned long get_next_ra_size(struct file_ra_state *ra) #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) -/* - * see if a page needs releasing upon read_cache_pages() failure - * - the caller of read_cache_pages() may have set PG_private before calling, - * such as the NFS fs marking pages that are cached locally on disk, thus we - * need to give the fs a chance to clean up in the event of an error - */ -static void read_cache_pages_release_page(struct address_space *mapping, - struct page *page) -{ - if (PagePrivate(page)) { - if (TestSetPageLocked(page)) - BUG(); - page->mapping = mapping; - try_to_release_page(page, GFP_KERNEL); - page->mapping = NULL; - unlock_page(page); - } - page_cache_release(page); -} - /** * read_cache_pages - populate an address space with some pages & start reads against them * @mapping: the address_space @@ -161,22 +142,17 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, page = list_to_page(pages); list_del(&page->lru); if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { - read_cache_pages_release_page(mapping, page); + page_cache_release(page); continue; } ret = filler(data, page); if (!pagevec_add(&lru_pvec, page)) __pagevec_lru_add(&lru_pvec); if (ret) { - while (!list_empty(pages)) { - struct page *victim; - - victim = list_to_page(pages); - list_del(&victim->lru); - read_cache_pages_release_page(mapping, victim); - } + put_pages_list(pages); break; } + task_io_account_read(PAGE_CACHE_SIZE); } pagevec_lru_add(&lru_pvec); return ret; @@ -193,6 +169,8 @@ static int read_pages(struct address_space *mapping, struct file *filp, if (mapping->a_ops->readpages) { ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); + /* Clean up the remaining pages */ + put_pages_list(pages); goto out; } @@ -229,6 +207,8 @@ out: * If page_cache_readahead sees that it is again being called for * a page which it just looked at, it can return immediately without * making any state changes. + * offset: Offset in the prev_page where the last read ended - used for + * detection of sequential file reading. * ahead_start, * ahead_size: Together, these form the "ahead window". * ra_pages: The externally controlled max readahead for this fd. @@ -474,7 +454,7 @@ static int make_ahead_window(struct address_space *mapping, struct file *filp, * * Note that @filp is purely used for passing on to the ->readpage[s]() * handler: it may refer to a different file from @mapping (so we may not use - * @filp->f_mapping or @filp->f_dentry->d_inode here). + * @filp->f_mapping or @filp->f_path.dentry->d_inode here). * Also, @ra may not be equal to &@filp->f_ra. * */ @@ -495,6 +475,7 @@ page_cache_readahead(struct address_space *mapping, struct file_ra_state *ra, /* Note that prev_page == -1 if it is a first read */ sequential = (offset == ra->prev_page + 1); ra->prev_page = offset; + ra->offset = 0; max = get_max_readahead(ra); newsize = min(req_size, max);