2 * Copyright (c) 2000-2004 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
39 #include "xfs_trans.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42 #include "xfs_bmap_btree.h"
43 #include "xfs_alloc_btree.h"
44 #include "xfs_ialloc_btree.h"
45 #include "xfs_alloc.h"
46 #include "xfs_btree.h"
47 #include "xfs_attr_sf.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_dinode.h"
51 #include "xfs_inode.h"
52 #include "xfs_error.h"
54 #include "xfs_iomap.h"
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
58 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
59 STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
60 struct writeback_control *wbc, void *, int, int);
62 #if defined(XFS_RW_TRACE)
72 vnode_t *vp = LINVFS_GET_VP(inode);
73 loff_t isize = i_size_read(inode);
74 loff_t offset = page->index << PAGE_CACHE_SHIFT;
75 int delalloc = -1, unmapped = -1, unwritten = -1;
77 if (page_has_buffers(page))
78 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
80 bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
85 ktrace_enter(ip->i_rwtrace,
86 (void *)((unsigned long)tag),
90 (void *)((unsigned long)mask),
91 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
92 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
93 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
94 (void *)((unsigned long)(isize & 0xffffffff)),
95 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
96 (void *)((unsigned long)(offset & 0xffffffff)),
97 (void *)((unsigned long)delalloc),
98 (void *)((unsigned long)unmapped),
99 (void *)((unsigned long)unwritten),
104 #define xfs_page_trace(tag, inode, page, mask)
108 linvfs_unwritten_done(
109 struct buffer_head *bh,
112 xfs_buf_t *pb = (xfs_buf_t *)bh->b_private;
114 ASSERT(buffer_unwritten(bh));
116 clear_buffer_unwritten(bh);
118 pagebuf_ioerror(pb, EIO);
119 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
120 pagebuf_iodone(pb, 1, 1);
122 end_buffer_async_write(bh, uptodate);
126 * Issue transactions to convert a buffer range from unwritten
127 * to written extents (buffered IO).
130 linvfs_unwritten_convert(
133 vnode_t *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
136 BUG_ON(atomic_read(&bp->pb_hold) < 1);
137 VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
138 BMAPI_UNWRITTEN, NULL, NULL, error);
139 XFS_BUF_SET_FSPRIVATE(bp, NULL);
140 XFS_BUF_CLR_IODONE_FUNC(bp);
141 XFS_BUF_UNDATAIO(bp);
142 iput(LINVFS_GET_IP(vp));
143 pagebuf_iodone(bp, 0, 0);
147 * Issue transactions to convert a buffer range from unwritten
148 * to written extents (direct IO).
151 linvfs_unwritten_convert_direct(
157 ASSERT(!private || inode == (struct inode *)private);
159 /* private indicates an unwritten extent lay beneath this IO,
160 * see linvfs_get_block_core.
162 if (private && size > 0) {
163 vnode_t *vp = LINVFS_GET_VP(inode);
166 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
178 vnode_t *vp = LINVFS_GET_VP(inode);
179 int error, niomaps = 1;
181 if (((flags & (BMAPI_DIRECT|BMAPI_SYNC)) == BMAPI_DIRECT) &&
182 (offset >= i_size_read(inode)))
183 count = max_t(ssize_t, count, XFS_WRITE_IO_LOG);
185 VOP_BMAP(vp, offset, count, flags, iomapp, &niomaps, error);
186 if ((error == EAGAIN) || (error == EIO))
188 if (unlikely((flags & (BMAPI_WRITE|BMAPI_DIRECT)) ==
189 (BMAPI_WRITE|BMAPI_DIRECT) && niomaps &&
190 (iomapp->iomap_flags & IOMAP_DELAY))) {
191 flags = BMAPI_ALLOCATE;
194 if (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
201 * Finds the corresponding mapping in block @map array of the
202 * given @offset within a @page.
208 unsigned long offset)
210 loff_t full_offset; /* offset from start of file */
212 ASSERT(offset < PAGE_CACHE_SIZE);
214 full_offset = page->index; /* NB: using 64bit number */
215 full_offset <<= PAGE_CACHE_SHIFT; /* offset from file start */
216 full_offset += offset; /* offset from page start */
218 if (full_offset < iomapp->iomap_offset)
220 if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
228 struct buffer_head *bh,
229 unsigned long offset,
237 ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
238 ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
239 ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
242 delta <<= PAGE_CACHE_SHIFT;
244 delta -= iomapp->iomap_offset;
245 delta >>= block_bits;
247 sector_shift = block_bits - BBSHIFT;
248 bn = iomapp->iomap_bn >> sector_shift;
250 ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
254 bh->b_bdev = iomapp->iomap_target->pbr_bdev;
255 set_buffer_mapped(bh);
256 clear_buffer_delay(bh);
260 * Look for a page at index which is unlocked and contains our
261 * unwritten extent flagged buffers at its head. Returns page
262 * locked and with an extra reference count, and length of the
263 * unwritten extent component on this page that we can write,
264 * in units of filesystem blocks.
267 xfs_probe_unwritten_page(
268 struct address_space *mapping,
272 unsigned long max_offset,
278 page = find_trylock_page(mapping, index);
281 if (PageWriteback(page))
284 if (page->mapping && page_has_buffers(page)) {
285 struct buffer_head *bh, *head;
286 unsigned long p_offset = 0;
289 bh = head = page_buffers(page);
291 if (!buffer_unwritten(bh))
293 if (!xfs_offset_to_map(page, iomapp, p_offset))
295 if (p_offset >= max_offset)
297 xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
298 set_buffer_unwritten_io(bh);
300 p_offset += bh->b_size;
302 } while ((bh = bh->b_this_page) != head);
314 * Look for a page at index which is unlocked and not mapped
315 * yet - clustering for mmap write case.
318 xfs_probe_unmapped_page(
319 struct address_space *mapping,
321 unsigned int pg_offset)
326 page = find_trylock_page(mapping, index);
329 if (PageWriteback(page))
332 if (page->mapping && PageDirty(page)) {
333 if (page_has_buffers(page)) {
334 struct buffer_head *bh, *head;
336 bh = head = page_buffers(page);
338 if (buffer_mapped(bh) || !buffer_uptodate(bh))
341 if (ret >= pg_offset)
343 } while ((bh = bh->b_this_page) != head);
345 ret = PAGE_CACHE_SIZE;
354 xfs_probe_unmapped_cluster(
356 struct page *startpage,
357 struct buffer_head *bh,
358 struct buffer_head *head)
360 pgoff_t tindex, tlast, tloff;
361 unsigned int pg_offset, len, total = 0;
362 struct address_space *mapping = inode->i_mapping;
364 /* First sum forwards in this page */
366 if (buffer_mapped(bh))
369 } while ((bh = bh->b_this_page) != head);
371 /* If we reached the end of the page, sum forwards in
375 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
376 /* Prune this back to avoid pathological behavior */
377 tloff = min(tlast, startpage->index + 64);
378 for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
379 len = xfs_probe_unmapped_page(mapping, tindex,
385 if (tindex == tlast &&
386 (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
387 total += xfs_probe_unmapped_page(mapping,
395 * Probe for a given page (index) in the inode and test if it is delayed
396 * and without unwritten buffers. Returns page locked and with an extra
400 xfs_probe_delalloc_page(
406 page = find_trylock_page(inode->i_mapping, index);
409 if (PageWriteback(page))
412 if (page->mapping && page_has_buffers(page)) {
413 struct buffer_head *bh, *head;
416 bh = head = page_buffers(page);
418 if (buffer_unwritten(bh)) {
421 } else if (buffer_delay(bh)) {
424 } while ((bh = bh->b_this_page) != head);
438 struct page *start_page,
439 struct buffer_head *head,
440 struct buffer_head *curr,
441 unsigned long p_offset,
444 struct writeback_control *wbc,
448 struct buffer_head *bh = curr;
452 unsigned long nblocks = 0;
454 offset = start_page->index;
455 offset <<= PAGE_CACHE_SHIFT;
458 /* get an "empty" pagebuf to manage IO completion
459 * Proper values will be set before returning */
460 pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
464 /* Take a reference to the inode to prevent it from
465 * being reclaimed while we have outstanding unwritten
468 if ((igrab(inode)) != inode) {
473 /* Set the count to 1 initially, this will stop an I/O
474 * completion callout which happens before we have started
475 * all the I/O from calling pagebuf_iodone too early.
477 atomic_set(&pb->pb_io_remaining, 1);
479 /* First map forwards in the page consecutive buffers
480 * covering this unwritten extent
483 if (!buffer_unwritten(bh))
485 tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
488 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
489 set_buffer_unwritten_io(bh);
491 p_offset += bh->b_size;
493 } while ((bh = bh->b_this_page) != head);
495 atomic_add(nblocks, &pb->pb_io_remaining);
497 /* If we reached the end of the page, map forwards in any
498 * following pages which are also covered by this extent.
501 struct address_space *mapping = inode->i_mapping;
502 pgoff_t tindex, tloff, tlast;
504 unsigned int pg_offset, bbits = inode->i_blkbits;
507 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
508 tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
509 tloff = min(tlast, tloff);
510 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
511 page = xfs_probe_unwritten_page(mapping,
513 PAGE_CACHE_SIZE, &bs, bbits);
517 atomic_add(bs, &pb->pb_io_remaining);
518 xfs_convert_page(inode, page, iomapp, wbc, pb,
520 /* stop if converting the next page might add
521 * enough blocks that the corresponding byte
522 * count won't fit in our ulong page buf length */
523 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
527 if (tindex == tlast &&
528 (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
529 page = xfs_probe_unwritten_page(mapping,
531 pg_offset, &bs, bbits);
534 atomic_add(bs, &pb->pb_io_remaining);
535 xfs_convert_page(inode, page, iomapp, wbc, pb,
537 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
544 size = nblocks; /* NB: using 64bit number here */
545 size <<= block_bits; /* convert fsb's to byte range */
549 XFS_BUF_SET_SIZE(pb, size);
550 XFS_BUF_SET_COUNT(pb, size);
551 XFS_BUF_SET_OFFSET(pb, offset);
552 XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
553 XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
555 if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
556 pagebuf_iodone(pb, 1, 1);
565 struct buffer_head *bh_arr[],
568 struct buffer_head *bh;
571 BUG_ON(PageWriteback(page));
572 set_page_writeback(page);
573 clear_page_dirty(page);
577 for (i = 0; i < cnt; i++) {
579 mark_buffer_async_write(bh);
580 if (buffer_unwritten(bh))
581 set_buffer_unwritten_io(bh);
582 set_buffer_uptodate(bh);
583 clear_buffer_dirty(bh);
586 for (i = 0; i < cnt; i++)
587 submit_bh(WRITE, bh_arr[i]);
589 end_page_writeback(page);
593 * Allocate & map buffers for page given the extent map. Write it out.
594 * except for the original page of a writepage, this is called on
595 * delalloc/unwritten pages only, for the original page it is possible
596 * that the page has no mapping at all.
603 struct writeback_control *wbc,
608 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
609 xfs_iomap_t *mp = iomapp, *tmp;
610 unsigned long end, offset;
612 int i = 0, index = 0;
613 int bbits = inode->i_blkbits;
615 end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
616 if (page->index < end_index) {
617 end = PAGE_CACHE_SIZE;
619 end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
621 bh = head = page_buffers(page);
624 if (!(PageUptodate(page) || buffer_uptodate(bh)))
626 if (buffer_mapped(bh) && all_bh &&
627 !buffer_unwritten(bh) && !buffer_delay(bh)) {
628 if (startio && (offset < end)) {
630 bh_arr[index++] = bh;
634 tmp = xfs_offset_to_map(page, mp, offset);
637 ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
638 ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
640 /* If this is a new unwritten extent buffer (i.e. one
641 * that we haven't passed in private data for, we must
642 * now map this buffer too.
644 if (buffer_unwritten(bh) && !bh->b_end_io) {
645 ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
646 xfs_map_unwritten(inode, page, head, bh, offset,
647 bbits, tmp, wbc, startio, all_bh);
648 } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
649 xfs_map_at_offset(page, bh, offset, bbits, tmp);
650 if (buffer_unwritten(bh)) {
651 set_buffer_unwritten_io(bh);
652 bh->b_private = private;
656 if (startio && (offset < end)) {
657 bh_arr[index++] = bh;
659 set_buffer_dirty(bh);
661 mark_buffer_dirty(bh);
663 } while (i++, (bh = bh->b_this_page) != head);
667 xfs_submit_page(page, bh_arr, index);
674 * Convert & write out a cluster of pages in the same extent as defined
675 * by mp and following the start page.
682 struct writeback_control *wbc,
689 tlast = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
690 for (; tindex < tlast; tindex++) {
691 page = xfs_probe_delalloc_page(inode, tindex);
694 xfs_convert_page(inode, page, iomapp, wbc, NULL,
700 * Calling this without startio set means we are being asked to make a dirty
701 * page ready for freeing it's buffers. When called with startio set then
702 * we are coming from writepage.
704 * When called with startio set it is important that we write the WHOLE
706 * The bh->b_state's cannot know if any of the blocks or which block for
707 * that matter are dirty due to mmap writes, and therefore bh uptodate is
708 * only vaild if the page itself isn't completely uptodate. Some layers
709 * may clear the page dirty flag prior to calling write page, under the
710 * assumption the entire page will be written out; by not writing out the
711 * whole page the page can be reused before all valid dirty data is
712 * written out. Note: in the case of a page that has been dirty'd by
713 * mapwrite and but partially setup by block_prepare_write the
714 * bh->b_states's will not agree and only ones setup by BPW/BCW will have
715 * valid state, thus the whole page must be written out thing.
719 xfs_page_state_convert(
722 struct writeback_control *wbc,
724 int unmapped) /* also implies page uptodate */
726 struct buffer_head *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
727 xfs_iomap_t *iomp, iomap;
728 unsigned long p_offset = 0;
731 unsigned long long end_offset;
732 int len, err, i, cnt = 0, uptodate = 1;
733 int flags = startio ? 0 : BMAPI_TRYLOCK;
737 /* Are we off the end of the file ? */
738 end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
739 if (page->index >= end_index) {
740 if ((page->index >= end_index + 1) ||
741 !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
747 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
748 end_offset = min_t(unsigned long long,
749 offset + PAGE_CACHE_SIZE, i_size_read(inode));
751 bh = head = page_buffers(page);
756 if (offset >= end_offset)
758 if (!buffer_uptodate(bh))
760 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
764 iomp = xfs_offset_to_map(page, &iomap, p_offset);
768 * First case, map an unwritten extent and prepare for
769 * extent state conversion transaction on completion.
771 if (buffer_unwritten(bh)) {
773 err = xfs_map_blocks(inode, offset, len, &iomap,
774 BMAPI_READ|BMAPI_IGNSTATE);
778 iomp = xfs_offset_to_map(page, &iomap,
781 if (iomp && startio) {
783 err = xfs_map_unwritten(inode, page,
785 inode->i_blkbits, iomp,
786 wbc, startio, unmapped);
795 * Second case, allocate space for a delalloc buffer.
796 * We can return EAGAIN here in the release page case.
798 } else if (buffer_delay(bh)) {
800 err = xfs_map_blocks(inode, offset, len, &iomap,
801 BMAPI_ALLOCATE | flags);
805 iomp = xfs_offset_to_map(page, &iomap,
809 xfs_map_at_offset(page, bh, p_offset,
810 inode->i_blkbits, iomp);
814 set_buffer_dirty(bh);
816 mark_buffer_dirty(bh);
820 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
821 (unmapped || startio)) {
823 if (!buffer_mapped(bh)) {
827 * Getting here implies an unmapped buffer
828 * was found, and we are in a path where we
829 * need to write the whole page out.
832 size = xfs_probe_unmapped_cluster(
833 inode, page, bh, head);
834 err = xfs_map_blocks(inode, offset,
836 BMAPI_WRITE|BMAPI_MMAP);
840 iomp = xfs_offset_to_map(page, &iomap,
844 xfs_map_at_offset(page,
846 inode->i_blkbits, iomp);
850 set_buffer_dirty(bh);
852 mark_buffer_dirty(bh);
856 } else if (startio) {
857 if (buffer_uptodate(bh) &&
858 !test_and_set_bit(BH_Lock, &bh->b_state)) {
864 } while (offset += len, p_offset += len,
865 ((bh = bh->b_this_page) != head));
867 if (uptodate && bh == head)
868 SetPageUptodate(page);
871 xfs_submit_page(page, bh_arr, cnt);
874 xfs_cluster_write(inode, page->index + 1, iomp, wbc,
881 for (i = 0; i < cnt; i++) {
882 unlock_buffer(bh_arr[i]);
886 * If it's delalloc and we have nowhere to put it,
887 * throw it away, unless the lower layers told
890 if (err != -EAGAIN) {
892 block_invalidatepage(page, 0);
894 ClearPageUptodate(page);
900 linvfs_get_block_core(
903 unsigned long blocks,
904 struct buffer_head *bh_result,
909 vnode_t *vp = LINVFS_GET_VP(inode);
914 loff_t offset = (loff_t)iblock << inode->i_blkbits;
917 size = blocks << inode->i_blkbits;
919 size = 1 << inode->i_blkbits;
921 VOP_BMAP(vp, offset, size,
922 create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
929 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
933 /* For unwritten extents do not report a disk address on
934 * the read case (treat as if we're reading into a hole).
936 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
937 delta = offset - iomap.iomap_offset;
938 delta >>= inode->i_blkbits;
940 bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
943 bh_result->b_blocknr = bn;
944 bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
945 set_buffer_mapped(bh_result);
947 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
949 bh_result->b_private = inode;
950 set_buffer_unwritten(bh_result);
951 set_buffer_delay(bh_result);
955 /* If this is a realtime file, data might be on a new device */
956 bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
958 /* If we previously allocated a block out beyond eof and
959 * we are now coming back to use it then we will need to
960 * flag it as new even if it has a disk address.
963 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
964 (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
965 set_buffer_new(bh_result);
968 if (iomap.iomap_flags & IOMAP_DELAY) {
969 if (unlikely(direct))
972 set_buffer_mapped(bh_result);
973 set_buffer_uptodate(bh_result);
975 bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
976 set_buffer_delay(bh_result);
981 iosize = (iomap.iomap_bsize - iomap.iomap_delta);
983 (ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
993 struct buffer_head *bh_result,
996 return linvfs_get_block_core(inode, iblock, 0, bh_result,
997 create, 0, BMAPI_WRITE);
1001 linvfs_get_block_sync(
1002 struct inode *inode,
1004 struct buffer_head *bh_result,
1007 return linvfs_get_block_core(inode, iblock, 0, bh_result,
1008 create, 0, BMAPI_SYNC|BMAPI_WRITE);
1012 linvfs_get_blocks_direct(
1013 struct inode *inode,
1015 unsigned long max_blocks,
1016 struct buffer_head *bh_result,
1019 return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
1020 create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1027 const struct iovec *iov,
1029 unsigned long nr_segs)
1031 struct file *file = iocb->ki_filp;
1032 struct inode *inode = file->f_mapping->host;
1033 vnode_t *vp = LINVFS_GET_VP(inode);
1038 VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1042 return blockdev_direct_IO_no_locking(rw, iocb, inode,
1043 iomap.iomap_target->pbr_bdev,
1044 iov, offset, nr_segs,
1045 linvfs_get_blocks_direct,
1046 linvfs_unwritten_convert_direct);
1052 struct address_space *mapping,
1055 struct inode *inode = (struct inode *)mapping->host;
1056 vnode_t *vp = LINVFS_GET_VP(inode);
1059 vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
1061 VOP_RWLOCK(vp, VRWLOCK_READ);
1062 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1063 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1064 return generic_block_bmap(mapping, block, linvfs_get_block);
1069 struct file *unused,
1072 return mpage_readpage(page, linvfs_get_block);
1077 struct file *unused,
1078 struct address_space *mapping,
1079 struct list_head *pages,
1082 return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
1086 xfs_count_page_state(
1092 struct buffer_head *bh, *head;
1094 *delalloc = *unmapped = *unwritten = 0;
1096 bh = head = page_buffers(page);
1098 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1100 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1101 clear_buffer_unwritten(bh);
1102 else if (buffer_unwritten(bh))
1104 else if (buffer_delay(bh))
1106 } while ((bh = bh->b_this_page) != head);
1111 * writepage: Called from one of two places:
1113 * 1. we are flushing a delalloc buffer head.
1115 * 2. we are writing out a dirty page. Typically the page dirty
1116 * state is cleared before we get here. In this case is it
1117 * conceivable we have no buffer heads.
1119 * For delalloc space on the page we need to allocate space and
1120 * flush it. For unmapped buffer heads on the page we should
1121 * allocate space if the page is uptodate. For any other dirty
1122 * buffer heads on the page we should flush them.
1124 * If we detect that a transaction would be required to flush
1125 * the page, we have to check the process flags first, if we
1126 * are already in a transaction or disk I/O during allocations
1127 * is off, we need to fail the writepage and redirty the page.
1133 struct writeback_control *wbc)
1137 int delalloc, unmapped, unwritten;
1138 struct inode *inode = page->mapping->host;
1140 xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1143 * We need a transaction if:
1144 * 1. There are delalloc buffers on the page
1145 * 2. The page is uptodate and we have unmapped buffers
1146 * 3. The page is uptodate and we have no buffers
1147 * 4. There are unwritten buffers on the page
1150 if (!page_has_buffers(page)) {
1154 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1155 if (!PageUptodate(page))
1157 need_trans = delalloc + unmapped + unwritten;
1161 * If we need a transaction and the process flags say
1162 * we are already in a transaction, or no IO is allowed
1163 * then mark the page dirty again and leave the page
1166 if (PFLAGS_TEST_FSTRANS() && need_trans)
1170 * Delay hooking up buffer heads until we have
1171 * made our go/no-go decision.
1173 if (!page_has_buffers(page))
1174 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1177 * Convert delayed allocate, unwritten or unmapped space
1178 * to real space and flush out to disk.
1180 error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1181 if (error == -EAGAIN)
1183 if (unlikely(error < 0))
1189 set_page_dirty(page);
1198 * Called to move a page into cleanable state - and from there
1199 * to be released. Possibly the page is already clean. We always
1200 * have buffer heads in this call.
1202 * Returns 0 if the page is ok to release, 1 otherwise.
1204 * Possible scenarios are:
1206 * 1. We are being called to release a page which has been written
1207 * to via regular I/O. buffer heads will be dirty and possibly
1208 * delalloc. If no delalloc buffer heads in this case then we
1209 * can just return zero.
1211 * 2. We are called to release a page which has been written via
1212 * mmap, all we need to do is ensure there is no delalloc
1213 * state in the buffer heads, if not we can let the caller
1214 * free them and we should come back later via writepage.
1217 linvfs_release_page(
1221 struct inode *inode = page->mapping->host;
1222 int dirty, delalloc, unmapped, unwritten;
1223 struct writeback_control wbc = {
1224 .sync_mode = WB_SYNC_ALL,
1228 xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1230 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1231 if (!delalloc && !unwritten)
1234 if (!(gfp_mask & __GFP_FS))
1237 /* If we are already inside a transaction or the thread cannot
1238 * do I/O, we cannot release this page.
1240 if (PFLAGS_TEST_FSTRANS())
1244 * Convert delalloc space to real space, do not flush the
1245 * data out to disk, that will be done by the caller.
1246 * Never need to allocate space here - we will always
1247 * come back to writepage in that case.
1249 dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1250 if (dirty == 0 && !unwritten)
1255 return try_to_free_buffers(page);
1259 linvfs_prepare_write(
1265 if (file && (file->f_flags & O_SYNC)) {
1266 return block_prepare_write(page, from, to,
1267 linvfs_get_block_sync);
1269 return block_prepare_write(page, from, to,
1274 struct address_space_operations linvfs_aops = {
1275 .readpage = linvfs_readpage,
1276 .readpages = linvfs_readpages,
1277 .writepage = linvfs_writepage,
1278 .sync_page = block_sync_page,
1279 .releasepage = linvfs_release_page,
1280 .prepare_write = linvfs_prepare_write,
1281 .commit_write = generic_commit_write,
1282 .bmap = linvfs_bmap,
1283 .direct_IO = linvfs_direct_IO,