2 * linux/fs/nfs/direct.c
4 * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6 * High-performance uncached I/O for the Linux NFS client
8 * There are important applications whose performance or correctness
9 * depends on uncached access to file data. Database clusters
10 * (multiple copies of the same instance running on separate hosts)
11 * implement their own cache coherency protocol that subsumes file
12 * system cache protocols. Applications that process datasets
13 * considerably larger than the client's memory do not always benefit
14 * from a local cache. A streaming video server, for instance, has no
15 * need to cache the contents of a file.
17 * When an application requests uncached I/O, all read and write requests
18 * are made directly to the server; data stored or fetched via these
19 * requests is not cached in the Linux page cache. The client does not
20 * correct unaligned requests from applications. All requested bytes are
21 * held on permanent storage before a direct write system call returns to
24 * Solaris implements an uncached I/O facility called directio() that
25 * is used for backups and sequential I/O to very large files. Solaris
26 * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27 * an undocumented mount option.
29 * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30 * help from Andrew Morton.
32 * 18 Dec 2001 Initial implementation for 2.4 --cel
33 * 08 Jul 2002 Version for 2.4.19, with bug fixes --trondmy
34 * 08 Jun 2003 Port to 2.5 APIs --cel
35 * 31 Mar 2004 Handle direct I/O without VFS support --cel
39 #include <linux/config.h>
40 #include <linux/errno.h>
41 #include <linux/sched.h>
42 #include <linux/kernel.h>
43 #include <linux/smp_lock.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
47 #include <linux/nfs_fs.h>
48 #include <linux/nfs_page.h>
49 #include <linux/sunrpc/clnt.h>
51 #include <asm/system.h>
52 #include <asm/uaccess.h>
54 #define NFSDBG_FACILITY NFSDBG_VFS
55 #define VERF_SIZE (2 * sizeof(__u32))
56 #define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
60 * nfs_get_user_pages - find and set up pages underlying user's buffer
61 * rw: direction (read or write)
62 * user_addr: starting address of this segment of user's buffer
63 * count: size of this segment
64 * @pages: returned array of page struct pointers underlying user's buffer
67 nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
71 unsigned long page_count;
74 /* set an arbitrary limit to prevent arithmetic overflow */
75 if (size > MAX_DIRECTIO_SIZE)
78 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
79 page_count -= user_addr >> PAGE_SHIFT;
81 array_size = (page_count * sizeof(struct page *));
82 *pages = kmalloc(array_size, GFP_KERNEL);
84 down_read(¤t->mm->mmap_sem);
85 result = get_user_pages(current, current->mm, user_addr,
86 page_count, (rw == READ), 0,
88 up_read(¤t->mm->mmap_sem);
94 * nfs_free_user_pages - tear down page struct array
95 * @pages: array of page struct pointers underlying target buffer
98 nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
101 for (i = 0; i < npages; i++) {
103 set_page_dirty_lock(pages[i]);
104 page_cache_release(pages[i]);
110 * nfs_direct_read_seg - Read in one iov segment. Generate separate
111 * read RPCs for each "rsize" bytes.
112 * @inode: target inode
113 * @file: target file (may be NULL)
114 * user_addr: starting address of this segment of user's buffer
115 * count: size of this segment
116 * file_offset: offset in file to begin the operation
117 * @pages: array of addresses of page structs defining user's buffer
118 * nr_pages: size of pages array
121 nfs_direct_read_seg(struct inode *inode, struct file *file,
122 unsigned long user_addr, size_t count, loff_t file_offset,
123 struct page **pages, int nr_pages)
125 const unsigned int rsize = NFS_SERVER(inode)->rsize;
128 struct nfs_read_data rdata = {
132 .lockowner = current->files,
135 .fattr = &rdata.fattr,
139 rdata.args.pgbase = user_addr & ~PAGE_MASK;
140 rdata.args.offset = file_offset;
144 rdata.args.count = count;
145 if (rdata.args.count > rsize)
146 rdata.args.count = rsize;
147 rdata.args.pages = &pages[curpage];
149 dprintk("NFS: direct read: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
150 rdata.args.count, (long long) rdata.args.offset,
151 user_addr + tot_bytes, rdata.args.pgbase, curpage);
154 result = NFS_PROTO(inode)->read(&rdata, file);
160 if (result == -EISDIR)
169 rdata.args.offset += result;
170 rdata.args.pgbase += result;
171 curpage += rdata.args.pgbase >> PAGE_SHIFT;
172 rdata.args.pgbase &= ~PAGE_MASK;
174 } while (count != 0);
176 /* XXX: should we zero the rest of the user's buffer if we
183 * nfs_direct_read - For each iov segment, map the user's buffer
184 * then generate read RPCs.
185 * @inode: target inode
186 * @file: target file (may be NULL)
187 * @iov: array of vectors that define I/O buffer
188 * file_offset: offset in file to begin the operation
189 * nr_segs: size of iovec array
191 * generic_file_direct_IO has already pushed out any non-direct
192 * writes so that this read will see them when we read from the
196 nfs_direct_read(struct inode *inode, struct file *file,
197 const struct iovec *iov, loff_t file_offset,
198 unsigned long nr_segs)
200 ssize_t tot_bytes = 0;
201 unsigned long seg = 0;
203 while ((seg < nr_segs) && (tot_bytes >= 0)) {
207 const struct iovec *vec = &iov[seg++];
208 unsigned long user_addr = (unsigned long) vec->iov_base;
209 size_t size = vec->iov_len;
211 page_count = nfs_get_user_pages(READ, user_addr, size, &pages);
212 if (page_count < 0) {
213 nfs_free_user_pages(pages, 0, 0);
219 result = nfs_direct_read_seg(inode, file, user_addr, size,
220 file_offset, pages, page_count);
222 nfs_free_user_pages(pages, page_count, 1);
230 file_offset += result;
239 * nfs_direct_write_seg - Write out one iov segment. Generate separate
240 * write RPCs for each "wsize" bytes, then commit.
241 * @inode: target inode
242 * @file: target file (may be NULL)
243 * user_addr: starting address of this segment of user's buffer
244 * count: size of this segment
245 * file_offset: offset in file to begin the operation
246 * @pages: array of addresses of page structs defining user's buffer
247 * nr_pages: size of pages array
250 nfs_direct_write_seg(struct inode *inode, struct file *file,
251 unsigned long user_addr, size_t count, loff_t file_offset,
252 struct page **pages, int nr_pages)
254 const unsigned int wsize = NFS_SERVER(inode)->wsize;
256 int curpage, need_commit, result, tot_bytes;
257 struct nfs_writeverf first_verf;
258 struct nfs_write_data wdata = {
262 .lockowner = current->files,
265 .fattr = &wdata.fattr,
270 wdata.args.stable = NFS_UNSTABLE;
271 if (IS_SYNC(inode) || NFS_PROTO(inode)->version == 2 || count <= wsize)
272 wdata.args.stable = NFS_FILE_SYNC;
274 nfs_begin_data_update(inode);
280 wdata.args.pgbase = user_addr & ~PAGE_MASK;
281 wdata.args.offset = file_offset;
283 wdata.args.count = request;
284 if (wdata.args.count > wsize)
285 wdata.args.count = wsize;
286 wdata.args.pages = &pages[curpage];
288 dprintk("NFS: direct write: c=%u o=%Ld ua=%lu, pb=%u, cp=%u\n",
289 wdata.args.count, (long long) wdata.args.offset,
290 user_addr + tot_bytes, wdata.args.pgbase, curpage);
293 result = NFS_PROTO(inode)->write(&wdata, file);
303 memcpy(&first_verf.verifier, &wdata.verf.verifier,
305 if (wdata.verf.committed != NFS_FILE_SYNC) {
307 if (memcmp(&first_verf.verifier,
308 &wdata.verf.verifier, VERF_SIZE))
313 wdata.args.offset += result;
314 wdata.args.pgbase += result;
315 curpage += wdata.args.pgbase >> PAGE_SHIFT;
316 wdata.args.pgbase &= ~PAGE_MASK;
318 } while (request != 0);
321 * Commit data written so far, even in the event of an error
324 wdata.args.count = tot_bytes;
325 wdata.args.offset = file_offset;
328 result = NFS_PROTO(inode)->commit(&wdata, file);
331 if (result < 0 || memcmp(&first_verf.verifier,
332 &wdata.verf.verifier,
339 nfs_end_data_update_defer(inode);
344 wdata.args.stable = NFS_FILE_SYNC;
349 * nfs_direct_write - For each iov segment, map the user's buffer
350 * then generate write and commit RPCs.
351 * @inode: target inode
352 * @file: target file (may be NULL)
353 * @iov: array of vectors that define I/O buffer
354 * file_offset: offset in file to begin the operation
355 * nr_segs: size of iovec array
357 * Upon return, generic_file_direct_IO invalidates any cached pages
358 * that non-direct readers might access, so they will pick up these
359 * writes immediately.
362 nfs_direct_write(struct inode *inode, struct file *file,
363 const struct iovec *iov, loff_t file_offset,
364 unsigned long nr_segs)
366 ssize_t tot_bytes = 0;
367 unsigned long seg = 0;
369 while ((seg < nr_segs) && (tot_bytes >= 0)) {
373 const struct iovec *vec = &iov[seg++];
374 unsigned long user_addr = (unsigned long) vec->iov_base;
375 size_t size = vec->iov_len;
377 page_count = nfs_get_user_pages(WRITE, user_addr, size, &pages);
378 if (page_count < 0) {
379 nfs_free_user_pages(pages, 0, 0);
385 result = nfs_direct_write_seg(inode, file, user_addr, size,
386 file_offset, pages, page_count);
387 nfs_free_user_pages(pages, page_count, 0);
395 file_offset += result;
403 * nfs_direct_IO - NFS address space operation for direct I/O
404 * rw: direction (read or write)
405 * @iocb: target I/O control block
406 * @iov: array of vectors that define I/O buffer
407 * file_offset: offset in file to begin the operation
408 * nr_segs: size of iovec array
412 nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
413 loff_t file_offset, unsigned long nr_segs)
415 ssize_t result = -EINVAL;
416 struct file *file = iocb->ki_filp;
417 struct dentry *dentry = file->f_dentry;
418 struct inode *inode = dentry->d_inode;
421 * No support for async yet
423 if (!is_sync_kiocb(iocb))
428 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
429 dentry->d_name.name, file_offset, nr_segs);
431 result = nfs_direct_read(inode, file, iov,
432 file_offset, nr_segs);
435 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
436 dentry->d_name.name, file_offset, nr_segs);
438 result = nfs_direct_write(inode, file, iov,
439 file_offset, nr_segs);
448 * nfs_file_direct_read - file direct read operation for NFS files
449 * @iocb: target I/O control block
450 * @buf: user's buffer into which to read data
451 * count: number of bytes to read
452 * pos: byte offset in file where reading starts
454 * We use this function for direct reads instead of calling
455 * generic_file_aio_read() in order to avoid gfar's check to see if
456 * the request starts before the end of the file. For that check
457 * to work, we must generate a GETATTR before each direct read, and
458 * even then there is a window between the GETATTR and the subsequent
459 * READ where the file size could change. So our preference is simply
460 * to do all reads the application wants, and the server will take
461 * care of managing the end of file boundary.
463 * This function also eliminates unnecessarily updating the file's
464 * atime locally, as the NFS server sets the file's atime, and this
465 * client must read the updated atime from the server back into its
469 nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
471 ssize_t retval = -EINVAL;
472 loff_t *ppos = &iocb->ki_pos;
473 struct file *file = iocb->ki_filp;
474 struct dentry *dentry = file->f_dentry;
475 struct address_space *mapping = file->f_mapping;
476 struct inode *inode = mapping->host;
478 .iov_base = (char *)buf,
482 dprintk("nfs: direct read(%s/%s, %lu@%lu)\n",
483 dentry->d_parent->d_name.name, dentry->d_name.name,
484 (unsigned long) count, (unsigned long) pos);
486 if (!is_sync_kiocb(iocb))
491 if (!access_ok(VERIFY_WRITE, iov.iov_base, iov.iov_len))
497 if (mapping->nrpages) {
498 retval = filemap_fdatawrite(mapping);
500 retval = filemap_fdatawait(mapping);
505 retval = nfs_direct_read(inode, file, &iov, pos, 1);
507 *ppos = pos + retval;
514 * nfs_file_direct_write - file direct write operation for NFS files
515 * @iocb: target I/O control block
516 * @buf: user's buffer from which to write data
517 * count: number of bytes to write
518 * pos: byte offset in file where writing starts
520 * We use this function for direct writes instead of calling
521 * generic_file_aio_write() in order to avoid taking the inode
522 * semaphore and updating the i_size. The NFS server will set
523 * the new i_size and this client must read the updated size
524 * back into its cache. We let the server do generic write
525 * parameter checking and report problems.
527 * We also avoid an unnecessary invocation of generic_osync_inode(),
528 * as it is fairly meaningless to sync the metadata of an NFS file.
530 * We eliminate local atime updates, see direct read above.
532 * We avoid unnecessary page cache invalidations for normal cached
533 * readers of this file.
535 * Note that O_APPEND is not supported for NFS direct writes, as there
536 * is no atomic O_APPEND write facility in the NFS protocol.
539 nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
541 ssize_t retval = -EINVAL;
542 loff_t *ppos = &iocb->ki_pos;
543 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
544 struct file *file = iocb->ki_filp;
545 struct dentry *dentry = file->f_dentry;
546 struct address_space *mapping = file->f_mapping;
547 struct inode *inode = mapping->host;
549 .iov_base = (char __user *)buf,
553 dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n",
554 dentry->d_parent->d_name.name, dentry->d_name.name,
555 inode->i_ino, (unsigned long) count, (unsigned long) pos);
557 if (!is_sync_kiocb(iocb))
564 if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
567 retval = file->f_error;
572 if (limit != RLIM_INFINITY) {
574 send_sig(SIGXFSZ, current, 0);
577 if (count > limit - (unsigned long) pos)
578 count = limit - (unsigned long) pos;
584 if (mapping->nrpages) {
585 retval = filemap_fdatawrite(mapping);
587 retval = filemap_fdatawait(mapping);
592 retval = nfs_direct_write(inode, file, &iov, pos, 1);
593 if (mapping->nrpages)
594 invalidate_inode_pages2(mapping);
596 *ppos = pos + retval;