Merge to Fedora kernel-2.6.18-1.2224_FC5 patched with stable patch-2.6.18.1-vs2.0...
[linux-2.6.git] / mm / readahead.c
index 0f142a4..54d3393 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/blkdev.h>
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
+#include <linux/buffer_head.h>
 
 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
 {
@@ -117,9 +118,28 @@ static inline unsigned long get_next_ra_size(struct file_ra_state *ra)
 
 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
 
+/*
+ * see if a page needs releasing upon read_cache_pages() failure
+ * - the caller of read_cache_pages() may have set PG_private before calling,
+ *   such as the NFS fs marking pages that are cached locally on disk, thus we
+ *   need to give the fs a chance to clean up in the event of an error
+ */
+static void read_cache_pages_release_page(struct address_space *mapping,
+                                         struct page *page)
+{
+       if (PagePrivate(page)) {
+               if (TestSetPageLocked(page))
+                       BUG();
+               page->mapping = mapping;
+               try_to_release_page(page, GFP_KERNEL);
+               page->mapping = NULL;
+               unlock_page(page);
+       }
+       page_cache_release(page);
+}
+
 /**
- * read_cache_pages - populate an address space with some pages, and
- *                     start reads against them.
+ * read_cache_pages - populate an address space with some pages & start reads against them
  * @mapping: the address_space
  * @pages: The address of a list_head which contains the target pages.  These
  *   pages have their ->index populated and are otherwise uninitialised.
@@ -141,7 +161,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
                page = list_to_page(pages);
                list_del(&page->lru);
                if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
-                       page_cache_release(page);
+                       read_cache_pages_release_page(mapping, page);
                        continue;
                }
                ret = filler(data, page);
@@ -153,7 +173,7 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
 
                                victim = list_to_page(pages);
                                list_del(&victim->lru);
-                               page_cache_release(victim);
+                               read_cache_pages_release_page(mapping, victim);
                        }
                        break;
                }
@@ -182,14 +202,11 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                list_del(&page->lru);
                if (!add_to_page_cache(page, mapping,
                                        page->index, GFP_KERNEL)) {
-                       ret = mapping->a_ops->readpage(filp, page);
-                       if (ret != AOP_TRUNCATED_PAGE) {
-                               if (!pagevec_add(&lru_pvec, page))
-                                       __pagevec_lru_add(&lru_pvec);
-                               continue;
-                       } /* else fall through to release */
-               }
-               page_cache_release(page);
+                       mapping->a_ops->readpage(filp, page);
+                       if (!pagevec_add(&lru_pvec, page))
+                               __pagevec_lru_add(&lru_pvec);
+               } else
+                       page_cache_release(page);
        }
        pagevec_lru_add(&lru_pvec);
        ret = 0;
@@ -394,8 +411,8 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
  * Read 'nr_to_read' pages starting at page 'offset'. If the flag 'block'
  * is set wait till the read completes.  Otherwise attempt to read without
  * blocking.
- * Returns 1 meaning 'success' if read is succesfull without switching off
- * readhaead mode. Otherwise return failure.
+ * Returns 1 meaning 'success' if read is successful without switching off
+ * readahead mode. Otherwise return failure.
  */
 static int
 blockable_page_cache_readahead(struct address_space *mapping, struct file *filp,