6 * Partial copy of Linus' read cache modifications to fs/nfs/file.c
7 * modified for async RPC by okir@monad.swb.de
9 * We do an ugly hack here in order to return proper error codes to the
10 * user program when a read request failed: since generic_file_read
11 * only checks the return value of inode->i_op->readpage() which is always 0
12 * for async RPC, we set the error bit of the page to 1 when an error occurs,
13 * and make nfs_readpage transmit requests synchronously when encountering this.
14 * This is only a small problem, though, since we now retry all operations
15 * within the RPC code when root squashing is suspected.
18 #include <linux/time.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/fcntl.h>
22 #include <linux/stat.h>
24 #include <linux/slab.h>
25 #include <linux/pagemap.h>
26 #include <linux/sunrpc/clnt.h>
27 #include <linux/nfs_fs.h>
28 #include <linux/nfs_page.h>
29 #include <linux/nfs_mount.h>
30 #include <linux/smp_lock.h>
32 #include <asm/system.h>
37 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
39 static int nfs_pagein_one(struct list_head *, struct inode *);
40 static const struct rpc_call_ops nfs_read_partial_ops;
41 static const struct rpc_call_ops nfs_read_full_ops;
43 static kmem_cache_t *nfs_rdata_cachep;
44 static mempool_t *nfs_rdata_mempool;
46 #define MIN_POOL_READ (32)
48 struct nfs_read_data *nfs_readdata_alloc(size_t len)
50 unsigned int pagecount = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
51 struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
54 memset(p, 0, sizeof(*p));
55 INIT_LIST_HEAD(&p->pages);
56 p->npages = pagecount;
57 if (pagecount <= ARRAY_SIZE(p->page_array))
58 p->pagevec = p->page_array;
60 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
62 mempool_free(p, nfs_rdata_mempool);
70 static void nfs_readdata_free(struct nfs_read_data *p)
72 if (p && (p->pagevec != &p->page_array[0]))
74 mempool_free(p, nfs_rdata_mempool);
77 void nfs_readdata_release(void *data)
79 nfs_readdata_free(data);
83 unsigned int nfs_page_length(struct inode *inode, struct page *page)
85 loff_t i_size = i_size_read(inode);
90 idx = (i_size - 1) >> PAGE_CACHE_SHIFT;
91 if (page->index > idx)
93 if (page->index != idx)
94 return PAGE_CACHE_SIZE;
95 return 1 + ((i_size - 1) & (PAGE_CACHE_SIZE - 1));
99 int nfs_return_empty_page(struct page *page)
101 memclear_highpage_flush(page, 0, PAGE_CACHE_SIZE);
102 SetPageUptodate(page);
107 static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
109 unsigned int remainder = data->args.count - data->res.count;
110 unsigned int base = data->args.pgbase + data->res.count;
114 if (data->res.eof == 0 || remainder == 0)
117 * Note: "remainder" can never be negative, since we check for
118 * this in the XDR code.
120 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
121 base &= ~PAGE_CACHE_MASK;
122 pglen = PAGE_CACHE_SIZE - base;
124 if (remainder <= pglen) {
125 memclear_highpage_flush(*pages, base, remainder);
128 memclear_highpage_flush(*pages, base, pglen);
131 pglen = PAGE_CACHE_SIZE;
137 * Read a page synchronously.
139 static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
142 unsigned int rsize = NFS_SERVER(inode)->rsize;
143 unsigned int count = PAGE_CACHE_SIZE;
145 struct nfs_read_data *rdata;
147 rdata = nfs_readdata_alloc(count);
151 memset(rdata, 0, sizeof(*rdata));
152 rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
153 rdata->cred = ctx->cred;
154 rdata->inode = inode;
155 INIT_LIST_HEAD(&rdata->pages);
156 rdata->args.fh = NFS_FH(inode);
157 rdata->args.context = ctx;
158 rdata->args.pages = &page;
159 rdata->args.pgbase = 0UL;
160 rdata->args.count = rsize;
161 rdata->res.fattr = &rdata->fattr;
163 dprintk("NFS: nfs_readpage_sync(%p)\n", page);
166 * This works now because the socket layer never tries to DMA
167 * into this buffer directly.
171 rdata->args.count = count;
172 rdata->res.count = rdata->args.count;
173 rdata->args.offset = page_offset(page) + rdata->args.pgbase;
175 dprintk("NFS: nfs_proc_read(%s, (%s/%Ld), %Lu, %u)\n",
176 NFS_SERVER(inode)->nfs_client->cl_hostname,
178 (long long)NFS_FILEID(inode),
179 (unsigned long long)rdata->args.pgbase,
183 result = NFS_PROTO(inode)->read(rdata);
187 * Even if we had a partial success we can't mark the page
191 if (result == -EISDIR)
196 rdata->args.pgbase += result;
197 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, result);
199 /* Note: result == 0 should only happen if we're caching
200 * a write that extends the file and punches a hole.
202 if (rdata->res.eof != 0 || result == 0)
205 spin_lock(&inode->i_lock);
206 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
207 spin_unlock(&inode->i_lock);
209 if (rdata->res.eof || rdata->res.count == rdata->args.count) {
210 SetPageUptodate(page);
211 if (rdata->res.eof && count != 0)
212 memclear_highpage_flush(page, rdata->args.pgbase, count);
216 nfs_readpage_to_fscache(inode, page, 1);
223 nfs_readdata_free(rdata);
227 int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
230 LIST_HEAD(one_request);
231 struct nfs_page *new;
234 len = nfs_page_length(inode, page);
236 return nfs_return_empty_page(page);
237 new = nfs_create_request(ctx, inode, page, 0, len);
242 if (len < PAGE_CACHE_SIZE)
243 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
245 nfs_list_add_request(new, &one_request);
246 nfs_pagein_one(&one_request, inode);
250 static void nfs_readpage_release(struct nfs_page *req)
252 struct inode *d_inode = req->wb_context->dentry->d_inode;
254 if (PageUptodate(req->wb_page))
255 nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
257 unlock_page(req->wb_page);
259 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
260 req->wb_context->dentry->d_inode->i_sb->s_id,
261 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
263 (long long)req_offset(req));
264 nfs_clear_request(req);
265 nfs_release_request(req);
269 * Set up the NFS read request struct
271 static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
272 const struct rpc_call_ops *call_ops,
273 unsigned int count, unsigned int offset)
279 data->inode = inode = req->wb_context->dentry->d_inode;
280 data->cred = req->wb_context->cred;
282 data->args.fh = NFS_FH(inode);
283 data->args.offset = req_offset(req) + offset;
284 data->args.pgbase = req->wb_pgbase + offset;
285 data->args.pages = data->pagevec;
286 data->args.count = count;
287 data->args.context = req->wb_context;
289 data->res.fattr = &data->fattr;
290 data->res.count = count;
292 nfs_fattr_init(&data->fattr);
294 /* Set up the initial task struct. */
295 flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
296 rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
297 NFS_PROTO(inode)->read_setup(data);
299 data->task.tk_cookie = (unsigned long)inode;
301 dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
304 (long long)NFS_FILEID(inode),
306 (unsigned long long)data->args.offset);
310 nfs_async_read_error(struct list_head *head)
312 struct nfs_page *req;
314 while (!list_empty(head)) {
315 req = nfs_list_entry(head->next);
316 nfs_list_remove_request(req);
317 SetPageError(req->wb_page);
318 nfs_readpage_release(req);
323 * Start an async read operation
325 static void nfs_execute_read(struct nfs_read_data *data)
327 struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
330 rpc_clnt_sigmask(clnt, &oldset);
332 rpc_execute(&data->task);
334 rpc_clnt_sigunmask(clnt, &oldset);
338 * Generate multiple requests to fill a single page.
340 * We optimize to reduce the number of read operations on the wire. If we
341 * detect that we're reading a page, or an area of a page, that is past the
342 * end of file, we do not generate NFS read operations but just clear the
343 * parts of the page that would have come back zero from the server anyway.
345 * We rely on the cached value of i_size to make this determination; another
346 * client can fill pages on the server past our cached end-of-file, but we
347 * won't see the new data until our attribute cache is updated. This is more
348 * or less conventional NFS client behavior.
350 static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
352 struct nfs_page *req = nfs_list_entry(head->next);
353 struct page *page = req->wb_page;
354 struct nfs_read_data *data;
355 size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
360 nfs_list_remove_request(req);
362 nbytes = req->wb_bytes;
364 size_t len = min(nbytes,rsize);
366 data = nfs_readdata_alloc(len);
369 INIT_LIST_HEAD(&data->pages);
370 list_add(&data->pages, &list);
373 } while(nbytes != 0);
374 atomic_set(&req->wb_complete, requests);
376 ClearPageError(page);
378 nbytes = req->wb_bytes;
380 data = list_entry(list.next, struct nfs_read_data, pages);
381 list_del_init(&data->pages);
383 data->pagevec[0] = page;
385 if (nbytes > rsize) {
386 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
391 nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
395 nfs_execute_read(data);
396 } while (nbytes != 0);
401 while (!list_empty(&list)) {
402 data = list_entry(list.next, struct nfs_read_data, pages);
403 list_del(&data->pages);
404 nfs_readdata_free(data);
407 nfs_readpage_release(req);
411 static int nfs_pagein_one(struct list_head *head, struct inode *inode)
413 struct nfs_page *req;
415 struct nfs_read_data *data;
418 if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
419 return nfs_pagein_multi(head, inode);
421 data = nfs_readdata_alloc(NFS_SERVER(inode)->rsize);
425 INIT_LIST_HEAD(&data->pages);
426 pages = data->pagevec;
428 while (!list_empty(head)) {
429 req = nfs_list_entry(head->next);
430 nfs_list_remove_request(req);
431 nfs_list_add_request(req, &data->pages);
432 ClearPageError(req->wb_page);
433 *pages++ = req->wb_page;
434 count += req->wb_bytes;
436 req = nfs_list_entry(data->pages.next);
438 nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
440 nfs_execute_read(data);
443 nfs_async_read_error(head);
448 nfs_pagein_list(struct list_head *head, int rpages)
450 LIST_HEAD(one_request);
451 struct nfs_page *req;
453 unsigned int pages = 0;
455 while (!list_empty(head)) {
456 pages += nfs_coalesce_requests(head, &one_request, rpages);
457 req = nfs_list_entry(one_request.next);
458 error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode);
465 nfs_async_read_error(head);
470 * Handle a read reply that fills part of a page.
472 static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
474 struct nfs_read_data *data = calldata;
475 struct nfs_page *req = data->req;
476 struct page *page = req->wb_page;
478 if (likely(task->tk_status >= 0))
479 nfs_readpage_truncate_uninitialised_page(data);
482 if (nfs_readpage_result(task, data) != 0)
484 if (atomic_dec_and_test(&req->wb_complete)) {
485 if (!PageError(page))
486 SetPageUptodate(page);
487 nfs_readpage_release(req);
491 static const struct rpc_call_ops nfs_read_partial_ops = {
492 .rpc_call_done = nfs_readpage_result_partial,
493 .rpc_release = nfs_readdata_release,
496 static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
498 unsigned int count = data->res.count;
499 unsigned int base = data->args.pgbase;
503 count = data->args.count;
504 if (unlikely(count == 0))
506 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
507 base &= ~PAGE_CACHE_MASK;
509 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
510 SetPageUptodate(*pages);
512 SetPageUptodate(*pages);
515 static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
517 unsigned int count = data->args.count;
518 unsigned int base = data->args.pgbase;
521 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
522 base &= ~PAGE_CACHE_MASK;
524 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
525 SetPageError(*pages);
527 SetPageError(*pages);
531 * This is the callback from RPC telling us whether a reply was
532 * received or some error occurred (timeout or socket shutdown).
534 static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
536 struct nfs_read_data *data = calldata;
539 * Note: nfs_readpage_result may change the values of
540 * data->args. In the multi-page case, we therefore need
541 * to ensure that we call the next nfs_readpage_set_page_uptodate()
542 * first in the multi-page case.
544 if (likely(task->tk_status >= 0)) {
545 nfs_readpage_truncate_uninitialised_page(data);
546 nfs_readpage_set_pages_uptodate(data);
548 nfs_readpage_set_pages_error(data);
549 if (nfs_readpage_result(task, data) != 0)
551 while (!list_empty(&data->pages)) {
552 struct nfs_page *req = nfs_list_entry(data->pages.next);
554 nfs_list_remove_request(req);
555 nfs_readpage_release(req);
559 static const struct rpc_call_ops nfs_read_full_ops = {
560 .rpc_call_done = nfs_readpage_result_full,
561 .rpc_release = nfs_readdata_release,
565 * This is the callback from RPC telling us whether a reply was
566 * received or some error occurred (timeout or socket shutdown).
568 int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
570 struct nfs_readargs *argp = &data->args;
571 struct nfs_readres *resp = &data->res;
574 dprintk("NFS: %4d nfs_readpage_result, (status %d)\n",
575 task->tk_pid, task->tk_status);
577 status = NFS_PROTO(data->inode)->read_done(task, data);
581 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count);
583 /* Is this a short read? */
584 if (task->tk_status >= 0 && resp->count < argp->count && !resp->eof) {
585 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
586 /* Has the server at least made some progress? */
587 if (resp->count != 0) {
588 /* Yes, so retry the read at the end of the data */
589 argp->offset += resp->count;
590 argp->pgbase += resp->count;
591 argp->count -= resp->count;
592 rpc_restart_call(task);
595 task->tk_status = -EIO;
597 spin_lock(&data->inode->i_lock);
598 NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME;
599 spin_unlock(&data->inode->i_lock);
604 * Read a page over NFS.
605 * We read the page synchronously in the following case:
606 * - The error flag is set for this page. This happens only when a
607 * previous async read operation failed.
609 int nfs_readpage(struct file *file, struct page *page)
611 struct nfs_open_context *ctx;
612 struct inode *inode = page->mapping->host;
615 dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
616 page, PAGE_CACHE_SIZE, page->index);
617 nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
618 nfs_add_stats(inode, NFSIOS_READPAGES, 1);
621 * Try to flush any pending writes to the file..
623 * NOTE! Because we own the page lock, there cannot
624 * be any new pending writes generated at this point
625 * for this page (other pages can be written to).
627 error = nfs_wb_page(inode, page);
632 ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
636 ctx = get_nfs_open_context((struct nfs_open_context *)
638 if (!IS_SYNC(inode)) {
639 error = nfs_readpage_from_fscache(ctx, inode, page);
643 error = nfs_readpage_async(ctx, inode, page);
647 error = nfs_readpage_sync(ctx, inode, page);
648 if (error < 0 && IS_SWAPFILE(inode))
649 printk("Aiee.. nfs swap-in of page failed!\n");
651 put_nfs_open_context(ctx);
659 struct nfs_readdesc {
660 struct list_head *head;
661 struct nfs_open_context *ctx;
665 readpage_async_filler(void *data, struct page *page)
667 struct nfs_readdesc *desc = (struct nfs_readdesc *)data;
668 struct inode *inode = page->mapping->host;
669 struct nfs_page *new;
672 nfs_wb_page(inode, page);
674 len = nfs_page_length(inode, page);
676 return nfs_return_empty_page(page);
677 new = nfs_create_request(desc->ctx, inode, page, 0, len);
683 if (len < PAGE_CACHE_SIZE)
684 memclear_highpage_flush(page, len, PAGE_CACHE_SIZE - len);
685 nfs_list_add_request(new, desc->head);
689 int nfs_readpages(struct file *filp, struct address_space *mapping,
690 struct list_head *pages, unsigned nr_pages)
693 struct nfs_readdesc desc = {
696 struct inode *inode = mapping->host;
697 struct nfs_server *server = NFS_SERVER(inode);
700 dprintk("NFS: nfs_readpages (%s/%Ld %d)\n",
702 (long long)NFS_FILEID(inode),
704 nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
707 desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
708 if (desc.ctx == NULL)
711 desc.ctx = get_nfs_open_context((struct nfs_open_context *)
714 /* attempt to read as many of the pages as possible from the cache
715 * - this returns -ENOBUFS immediately if the cookie is negative
717 ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
720 put_nfs_open_context(desc.ctx);
721 return ret; /* all read */
724 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
725 if (!list_empty(&head)) {
726 int err = nfs_pagein_list(&head, server->rpages);
728 nfs_add_stats(inode, NFSIOS_READPAGES, err);
731 put_nfs_open_context(desc.ctx);
735 int __init nfs_init_readpagecache(void)
737 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
738 sizeof(struct nfs_read_data),
739 0, SLAB_HWCACHE_ALIGN,
741 if (nfs_rdata_cachep == NULL)
744 nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ,
746 if (nfs_rdata_mempool == NULL)
752 void nfs_destroy_readpagecache(void)
754 mempool_destroy(nfs_rdata_mempool);
755 if (kmem_cache_destroy(nfs_rdata_cachep))
756 printk(KERN_INFO "nfs_read_data: not all structures were freed\n");