This commit was manufactured by cvs2svn to create branch 'vserver'.
[linux-2.6.git] / fs / xfs / linux-2.6 / xfs_aops.c
1 /*
2  * Copyright (c) 2000-2004 Silicon Graphics, Inc.  All Rights Reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it would be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
11  *
12  * Further, this software is distributed without any warranty that it is
13  * free of the rightful claim of any third person regarding infringement
14  * or the like.  Any license provided herein, whether implied or
15  * otherwise, applies only to this software file.  Patent licenses, if
16  * any, provided herein do not apply to combinations of this program with
17  * other software, or any other product whatsoever.
18  *
19  * You should have received a copy of the GNU General Public License along
20  * with this program; if not, write the Free Software Foundation, Inc., 59
21  * Temple Place - Suite 330, Boston MA 02111-1307, USA.
22  *
23  * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24  * Mountain View, CA  94043, or:
25  *
26  * http://www.sgi.com
27  *
28  * For further information regarding this notice, see:
29  *
30  * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
31  */
32
33 #include "xfs.h"
34 #include "xfs_inum.h"
35 #include "xfs_log.h"
36 #include "xfs_sb.h"
37 #include "xfs_dir.h"
38 #include "xfs_dir2.h"
39 #include "xfs_trans.h"
40 #include "xfs_dmapi.h"
41 #include "xfs_mount.h"
42 #include "xfs_bmap_btree.h"
43 #include "xfs_alloc_btree.h"
44 #include "xfs_ialloc_btree.h"
45 #include "xfs_alloc.h"
46 #include "xfs_btree.h"
47 #include "xfs_attr_sf.h"
48 #include "xfs_dir_sf.h"
49 #include "xfs_dir2_sf.h"
50 #include "xfs_dinode.h"
51 #include "xfs_inode.h"
52 #include "xfs_error.h"
53 #include "xfs_rw.h"
54 #include "xfs_iomap.h"
55 #include <linux/mpage.h>
56 #include <linux/writeback.h>
57
58 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
59 STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
60                 struct writeback_control *wbc, void *, int, int);
61
62 #if defined(XFS_RW_TRACE)
63 void
64 xfs_page_trace(
65         int             tag,
66         struct inode    *inode,
67         struct page     *page,
68         int             mask)
69 {
70         xfs_inode_t     *ip;
71         bhv_desc_t      *bdp;
72         vnode_t         *vp = LINVFS_GET_VP(inode);
73         loff_t          isize = i_size_read(inode);
74         loff_t          offset = page->index << PAGE_CACHE_SHIFT;
75         int             delalloc = -1, unmapped = -1, unwritten = -1;
76
77         if (page_has_buffers(page))
78                 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
79
80         bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
81         ip = XFS_BHVTOI(bdp);
82         if (!ip->i_rwtrace)
83                 return;
84
85         ktrace_enter(ip->i_rwtrace,
86                 (void *)((unsigned long)tag),
87                 (void *)ip,
88                 (void *)inode,
89                 (void *)page,
90                 (void *)((unsigned long)mask),
91                 (void *)((unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff)),
92                 (void *)((unsigned long)(ip->i_d.di_size & 0xffffffff)),
93                 (void *)((unsigned long)((isize >> 32) & 0xffffffff)),
94                 (void *)((unsigned long)(isize & 0xffffffff)),
95                 (void *)((unsigned long)((offset >> 32) & 0xffffffff)),
96                 (void *)((unsigned long)(offset & 0xffffffff)),
97                 (void *)((unsigned long)delalloc),
98                 (void *)((unsigned long)unmapped),
99                 (void *)((unsigned long)unwritten),
100                 (void *)NULL,
101                 (void *)NULL);
102 }
103 #else
104 #define xfs_page_trace(tag, inode, page, mask)
105 #endif
106
107 void
108 linvfs_unwritten_done(
109         struct buffer_head      *bh,
110         int                     uptodate)
111 {
112         xfs_buf_t               *pb = (xfs_buf_t *)bh->b_private;
113
114         ASSERT(buffer_unwritten(bh));
115         bh->b_end_io = NULL;
116         clear_buffer_unwritten(bh);
117         if (!uptodate)
118                 pagebuf_ioerror(pb, EIO);
119         if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
120                 pagebuf_iodone(pb, 1, 1);
121         }
122         end_buffer_async_write(bh, uptodate);
123 }
124
125 /*
126  * Issue transactions to convert a buffer range from unwritten
127  * to written extents (buffered IO).
128  */
129 STATIC void
130 linvfs_unwritten_convert(
131         xfs_buf_t       *bp)
132 {
133         vnode_t         *vp = XFS_BUF_FSPRIVATE(bp, vnode_t *);
134         int             error;
135
136         BUG_ON(atomic_read(&bp->pb_hold) < 1);
137         VOP_BMAP(vp, XFS_BUF_OFFSET(bp), XFS_BUF_SIZE(bp),
138                         BMAPI_UNWRITTEN, NULL, NULL, error);
139         XFS_BUF_SET_FSPRIVATE(bp, NULL);
140         XFS_BUF_CLR_IODONE_FUNC(bp);
141         XFS_BUF_UNDATAIO(bp);
142         iput(LINVFS_GET_IP(vp));
143         pagebuf_iodone(bp, 0, 0);
144 }
145
146 /*
147  * Issue transactions to convert a buffer range from unwritten
148  * to written extents (direct IO).
149  */
150 STATIC void
151 linvfs_unwritten_convert_direct(
152         struct inode    *inode,
153         loff_t          offset,
154         ssize_t         size,
155         void            *private)
156 {
157         ASSERT(!private || inode == (struct inode *)private);
158
159         /* private indicates an unwritten extent lay beneath this IO,
160          * see linvfs_get_block_core.
161          */
162         if (private && size > 0) {
163                 vnode_t *vp = LINVFS_GET_VP(inode);
164                 int     error;
165
166                 VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
167         }
168 }
169
170 STATIC int
171 xfs_map_blocks(
172         struct inode            *inode,
173         loff_t                  offset,
174         ssize_t                 count,
175         xfs_iomap_t             *iomapp,
176         int                     flags)
177 {
178         vnode_t                 *vp = LINVFS_GET_VP(inode);
179         int                     error, niomaps = 1;
180
181         if (((flags & (BMAPI_DIRECT|BMAPI_SYNC)) == BMAPI_DIRECT) &&
182             (offset >= i_size_read(inode)))
183                 count = max_t(ssize_t, count, XFS_WRITE_IO_LOG);
184 retry:
185         VOP_BMAP(vp, offset, count, flags, iomapp, &niomaps, error);
186         if ((error == EAGAIN) || (error == EIO))
187                 return -error;
188         if (unlikely((flags & (BMAPI_WRITE|BMAPI_DIRECT)) ==
189                                         (BMAPI_WRITE|BMAPI_DIRECT) && niomaps &&
190                                         (iomapp->iomap_flags & IOMAP_DELAY))) {
191                 flags = BMAPI_ALLOCATE;
192                 goto retry;
193         }
194         if (flags & (BMAPI_WRITE|BMAPI_ALLOCATE)) {
195                 VMODIFY(vp);
196         }
197         return -error;
198 }
199
200 /*
201  * Finds the corresponding mapping in block @map array of the
202  * given @offset within a @page.
203  */
204 STATIC xfs_iomap_t *
205 xfs_offset_to_map(
206         struct page             *page,
207         xfs_iomap_t             *iomapp,
208         unsigned long           offset)
209 {
210         loff_t                  full_offset;    /* offset from start of file */
211
212         ASSERT(offset < PAGE_CACHE_SIZE);
213
214         full_offset = page->index;              /* NB: using 64bit number */
215         full_offset <<= PAGE_CACHE_SHIFT;       /* offset from file start */
216         full_offset += offset;                  /* offset from page start */
217
218         if (full_offset < iomapp->iomap_offset)
219                 return NULL;
220         if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
221                 return iomapp;
222         return NULL;
223 }
224
225 STATIC void
226 xfs_map_at_offset(
227         struct page             *page,
228         struct buffer_head      *bh,
229         unsigned long           offset,
230         int                     block_bits,
231         xfs_iomap_t             *iomapp)
232 {
233         xfs_daddr_t             bn;
234         loff_t                  delta;
235         int                     sector_shift;
236
237         ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
238         ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
239         ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
240
241         delta = page->index;
242         delta <<= PAGE_CACHE_SHIFT;
243         delta += offset;
244         delta -= iomapp->iomap_offset;
245         delta >>= block_bits;
246
247         sector_shift = block_bits - BBSHIFT;
248         bn = iomapp->iomap_bn >> sector_shift;
249         bn += delta;
250         ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
251
252         lock_buffer(bh);
253         bh->b_blocknr = bn;
254         bh->b_bdev = iomapp->iomap_target->pbr_bdev;
255         set_buffer_mapped(bh);
256         clear_buffer_delay(bh);
257 }
258
259 /*
260  * Look for a page at index which is unlocked and contains our
261  * unwritten extent flagged buffers at its head.  Returns page
262  * locked and with an extra reference count, and length of the
263  * unwritten extent component on this page that we can write,
264  * in units of filesystem blocks.
265  */
266 STATIC struct page *
267 xfs_probe_unwritten_page(
268         struct address_space    *mapping,
269         pgoff_t                 index,
270         xfs_iomap_t             *iomapp,
271         xfs_buf_t               *pb,
272         unsigned long           max_offset,
273         unsigned long           *fsbs,
274         unsigned int            bbits)
275 {
276         struct page             *page;
277
278         page = find_trylock_page(mapping, index);
279         if (!page)
280                 return 0;
281         if (PageWriteback(page))
282                 goto out;
283
284         if (page->mapping && page_has_buffers(page)) {
285                 struct buffer_head      *bh, *head;
286                 unsigned long           p_offset = 0;
287
288                 *fsbs = 0;
289                 bh = head = page_buffers(page);
290                 do {
291                         if (!buffer_unwritten(bh))
292                                 break;
293                         if (!xfs_offset_to_map(page, iomapp, p_offset))
294                                 break;
295                         if (p_offset >= max_offset)
296                                 break;
297                         xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
298                         set_buffer_unwritten_io(bh);
299                         bh->b_private = pb;
300                         p_offset += bh->b_size;
301                         (*fsbs)++;
302                 } while ((bh = bh->b_this_page) != head);
303
304                 if (p_offset)
305                         return page;
306         }
307
308 out:
309         unlock_page(page);
310         return NULL;
311 }
312
313 /*
314  * Look for a page at index which is unlocked and not mapped
315  * yet - clustering for mmap write case.
316  */
317 STATIC unsigned int
318 xfs_probe_unmapped_page(
319         struct address_space    *mapping,
320         pgoff_t                 index,
321         unsigned int            pg_offset)
322 {
323         struct page             *page;
324         int                     ret = 0;
325
326         page = find_trylock_page(mapping, index);
327         if (!page)
328                 return 0;
329         if (PageWriteback(page))
330                 goto out;
331
332         if (page->mapping && PageDirty(page)) {
333                 if (page_has_buffers(page)) {
334                         struct buffer_head      *bh, *head;
335
336                         bh = head = page_buffers(page);
337                         do {
338                                 if (buffer_mapped(bh) || !buffer_uptodate(bh))
339                                         break;
340                                 ret += bh->b_size;
341                                 if (ret >= pg_offset)
342                                         break;
343                         } while ((bh = bh->b_this_page) != head);
344                 } else
345                         ret = PAGE_CACHE_SIZE;
346         }
347
348 out:
349         unlock_page(page);
350         return ret;
351 }
352
353 STATIC unsigned int
354 xfs_probe_unmapped_cluster(
355         struct inode            *inode,
356         struct page             *startpage,
357         struct buffer_head      *bh,
358         struct buffer_head      *head)
359 {
360         pgoff_t                 tindex, tlast, tloff;
361         unsigned int            pg_offset, len, total = 0;
362         struct address_space    *mapping = inode->i_mapping;
363
364         /* First sum forwards in this page */
365         do {
366                 if (buffer_mapped(bh))
367                         break;
368                 total += bh->b_size;
369         } while ((bh = bh->b_this_page) != head);
370
371         /* If we reached the end of the page, sum forwards in
372          * following pages.
373          */
374         if (bh == head) {
375                 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
376                 /* Prune this back to avoid pathological behavior */
377                 tloff = min(tlast, startpage->index + 64);
378                 for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
379                         len = xfs_probe_unmapped_page(mapping, tindex,
380                                                         PAGE_CACHE_SIZE);
381                         if (!len)
382                                 return total;
383                         total += len;
384                 }
385                 if (tindex == tlast &&
386                     (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
387                         total += xfs_probe_unmapped_page(mapping,
388                                                         tindex, pg_offset);
389                 }
390         }
391         return total;
392 }
393
394 /*
395  * Probe for a given page (index) in the inode and test if it is delayed
396  * and without unwritten buffers.  Returns page locked and with an extra
397  * reference count.
398  */
399 STATIC struct page *
400 xfs_probe_delalloc_page(
401         struct inode            *inode,
402         pgoff_t                 index)
403 {
404         struct page             *page;
405
406         page = find_trylock_page(inode->i_mapping, index);
407         if (!page)
408                 return NULL;
409         if (PageWriteback(page))
410                 goto out;
411
412         if (page->mapping && page_has_buffers(page)) {
413                 struct buffer_head      *bh, *head;
414                 int                     acceptable = 0;
415
416                 bh = head = page_buffers(page);
417                 do {
418                         if (buffer_unwritten(bh)) {
419                                 acceptable = 0;
420                                 break;
421                         } else if (buffer_delay(bh)) {
422                                 acceptable = 1;
423                         }
424                 } while ((bh = bh->b_this_page) != head);
425
426                 if (acceptable)
427                         return page;
428         }
429
430 out:
431         unlock_page(page);
432         return NULL;
433 }
434
435 STATIC int
436 xfs_map_unwritten(
437         struct inode            *inode,
438         struct page             *start_page,
439         struct buffer_head      *head,
440         struct buffer_head      *curr,
441         unsigned long           p_offset,
442         int                     block_bits,
443         xfs_iomap_t             *iomapp,
444         struct writeback_control *wbc,
445         int                     startio,
446         int                     all_bh)
447 {
448         struct buffer_head      *bh = curr;
449         xfs_iomap_t             *tmp;
450         xfs_buf_t               *pb;
451         loff_t                  offset, size;
452         unsigned long           nblocks = 0;
453
454         offset = start_page->index;
455         offset <<= PAGE_CACHE_SHIFT;
456         offset += p_offset;
457
458         /* get an "empty" pagebuf to manage IO completion
459          * Proper values will be set before returning */
460         pb = pagebuf_lookup(iomapp->iomap_target, 0, 0, 0);
461         if (!pb)
462                 return -EAGAIN;
463
464         /* Take a reference to the inode to prevent it from
465          * being reclaimed while we have outstanding unwritten
466          * extent IO on it.
467          */
468         if ((igrab(inode)) != inode) {
469                 pagebuf_free(pb);
470                 return -EAGAIN;
471         }
472
473         /* Set the count to 1 initially, this will stop an I/O
474          * completion callout which happens before we have started
475          * all the I/O from calling pagebuf_iodone too early.
476          */
477         atomic_set(&pb->pb_io_remaining, 1);
478
479         /* First map forwards in the page consecutive buffers
480          * covering this unwritten extent
481          */
482         do {
483                 if (!buffer_unwritten(bh))
484                         break;
485                 tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
486                 if (!tmp)
487                         break;
488                 xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
489                 set_buffer_unwritten_io(bh);
490                 bh->b_private = pb;
491                 p_offset += bh->b_size;
492                 nblocks++;
493         } while ((bh = bh->b_this_page) != head);
494
495         atomic_add(nblocks, &pb->pb_io_remaining);
496
497         /* If we reached the end of the page, map forwards in any
498          * following pages which are also covered by this extent.
499          */
500         if (bh == head) {
501                 struct address_space    *mapping = inode->i_mapping;
502                 pgoff_t                 tindex, tloff, tlast;
503                 unsigned long           bs;
504                 unsigned int            pg_offset, bbits = inode->i_blkbits;
505                 struct page             *page;
506
507                 tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
508                 tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
509                 tloff = min(tlast, tloff);
510                 for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
511                         page = xfs_probe_unwritten_page(mapping,
512                                                 tindex, iomapp, pb,
513                                                 PAGE_CACHE_SIZE, &bs, bbits);
514                         if (!page)
515                                 break;
516                         nblocks += bs;
517                         atomic_add(bs, &pb->pb_io_remaining);
518                         xfs_convert_page(inode, page, iomapp, wbc, pb,
519                                                         startio, all_bh);
520                         /* stop if converting the next page might add
521                          * enough blocks that the corresponding byte
522                          * count won't fit in our ulong page buf length */
523                         if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
524                                 goto enough;
525                 }
526
527                 if (tindex == tlast &&
528                     (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
529                         page = xfs_probe_unwritten_page(mapping,
530                                                         tindex, iomapp, pb,
531                                                         pg_offset, &bs, bbits);
532                         if (page) {
533                                 nblocks += bs;
534                                 atomic_add(bs, &pb->pb_io_remaining);
535                                 xfs_convert_page(inode, page, iomapp, wbc, pb,
536                                                         startio, all_bh);
537                                 if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
538                                         goto enough;
539                         }
540                 }
541         }
542
543 enough:
544         size = nblocks;         /* NB: using 64bit number here */
545         size <<= block_bits;    /* convert fsb's to byte range */
546
547         XFS_BUF_DATAIO(pb);
548         XFS_BUF_ASYNC(pb);
549         XFS_BUF_SET_SIZE(pb, size);
550         XFS_BUF_SET_COUNT(pb, size);
551         XFS_BUF_SET_OFFSET(pb, offset);
552         XFS_BUF_SET_FSPRIVATE(pb, LINVFS_GET_VP(inode));
553         XFS_BUF_SET_IODONE_FUNC(pb, linvfs_unwritten_convert);
554
555         if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
556                 pagebuf_iodone(pb, 1, 1);
557         }
558
559         return 0;
560 }
561
562 STATIC void
563 xfs_submit_page(
564         struct page             *page,
565         struct buffer_head      *bh_arr[],
566         int                     cnt)
567 {
568         struct buffer_head      *bh;
569         int                     i;
570
571         BUG_ON(PageWriteback(page));
572         set_page_writeback(page);
573         clear_page_dirty(page);
574         unlock_page(page);
575
576         if (cnt) {
577                 for (i = 0; i < cnt; i++) {
578                         bh = bh_arr[i];
579                         mark_buffer_async_write(bh);
580                         if (buffer_unwritten(bh))
581                                 set_buffer_unwritten_io(bh);
582                         set_buffer_uptodate(bh);
583                         clear_buffer_dirty(bh);
584                 }
585
586                 for (i = 0; i < cnt; i++)
587                         submit_bh(WRITE, bh_arr[i]);
588         } else
589                 end_page_writeback(page);
590 }
591
592 /*
593  * Allocate & map buffers for page given the extent map. Write it out.
594  * except for the original page of a writepage, this is called on
595  * delalloc/unwritten pages only, for the original page it is possible
596  * that the page has no mapping at all.
597  */
598 STATIC void
599 xfs_convert_page(
600         struct inode            *inode,
601         struct page             *page,
602         xfs_iomap_t             *iomapp,
603         struct writeback_control *wbc,
604         void                    *private,
605         int                     startio,
606         int                     all_bh)
607 {
608         struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
609         xfs_iomap_t             *mp = iomapp, *tmp;
610         unsigned long           end, offset;
611         pgoff_t                 end_index;
612         int                     i = 0, index = 0;
613         int                     bbits = inode->i_blkbits;
614
615         end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
616         if (page->index < end_index) {
617                 end = PAGE_CACHE_SIZE;
618         } else {
619                 end = i_size_read(inode) & (PAGE_CACHE_SIZE-1);
620         }
621         bh = head = page_buffers(page);
622         do {
623                 offset = i << bbits;
624                 if (!(PageUptodate(page) || buffer_uptodate(bh)))
625                         continue;
626                 if (buffer_mapped(bh) && all_bh &&
627                     !buffer_unwritten(bh) && !buffer_delay(bh)) {
628                         if (startio && (offset < end)) {
629                                 lock_buffer(bh);
630                                 bh_arr[index++] = bh;
631                         }
632                         continue;
633                 }
634                 tmp = xfs_offset_to_map(page, mp, offset);
635                 if (!tmp)
636                         continue;
637                 ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
638                 ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
639
640                 /* If this is a new unwritten extent buffer (i.e. one
641                  * that we haven't passed in private data for, we must
642                  * now map this buffer too.
643                  */
644                 if (buffer_unwritten(bh) && !bh->b_end_io) {
645                         ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
646                         xfs_map_unwritten(inode, page, head, bh, offset,
647                                         bbits, tmp, wbc, startio, all_bh);
648                 } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
649                         xfs_map_at_offset(page, bh, offset, bbits, tmp);
650                         if (buffer_unwritten(bh)) {
651                                 set_buffer_unwritten_io(bh);
652                                 bh->b_private = private;
653                                 ASSERT(private);
654                         }
655                 }
656                 if (startio && (offset < end)) {
657                         bh_arr[index++] = bh;
658                 } else {
659                         set_buffer_dirty(bh);
660                         unlock_buffer(bh);
661                         mark_buffer_dirty(bh);
662                 }
663         } while (i++, (bh = bh->b_this_page) != head);
664
665         if (startio) {
666                 wbc->nr_to_write--;
667                 xfs_submit_page(page, bh_arr, index);
668         } else {
669                 unlock_page(page);
670         }
671 }
672
673 /*
674  * Convert & write out a cluster of pages in the same extent as defined
675  * by mp and following the start page.
676  */
677 STATIC void
678 xfs_cluster_write(
679         struct inode            *inode,
680         pgoff_t                 tindex,
681         xfs_iomap_t             *iomapp,
682         struct writeback_control *wbc,
683         int                     startio,
684         int                     all_bh)
685 {
686         pgoff_t                 tlast;
687         struct page             *page;
688
689         tlast = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
690         for (; tindex < tlast; tindex++) {
691                 page = xfs_probe_delalloc_page(inode, tindex);
692                 if (!page)
693                         break;
694                 xfs_convert_page(inode, page, iomapp, wbc, NULL,
695                                 startio, all_bh);
696         }
697 }
698
699 /*
700  * Calling this without startio set means we are being asked to make a dirty
701  * page ready for freeing it's buffers.  When called with startio set then
702  * we are coming from writepage.
703  *
704  * When called with startio set it is important that we write the WHOLE
705  * page if possible.
706  * The bh->b_state's cannot know if any of the blocks or which block for
707  * that matter are dirty due to mmap writes, and therefore bh uptodate is
708  * only vaild if the page itself isn't completely uptodate.  Some layers
709  * may clear the page dirty flag prior to calling write page, under the
710  * assumption the entire page will be written out; by not writing out the
711  * whole page the page can be reused before all valid dirty data is
712  * written out.  Note: in the case of a page that has been dirty'd by
713  * mapwrite and but partially setup by block_prepare_write the
714  * bh->b_states's will not agree and only ones setup by BPW/BCW will have
715  * valid state, thus the whole page must be written out thing.
716  */
717
718 STATIC int
719 xfs_page_state_convert(
720         struct inode    *inode,
721         struct page     *page,
722         struct writeback_control *wbc,
723         int             startio,
724         int             unmapped) /* also implies page uptodate */
725 {
726         struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
727         xfs_iomap_t             *iomp, iomap;
728         unsigned long           p_offset = 0;
729         pgoff_t                 end_index;
730         loff_t                  offset;
731         unsigned long long      end_offset;
732         int                     len, err, i, cnt = 0, uptodate = 1;
733         int                     flags = startio ? 0 : BMAPI_TRYLOCK;
734         int                     page_dirty = 1;
735
736
737         /* Are we off the end of the file ? */
738         end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
739         if (page->index >= end_index) {
740                 if ((page->index >= end_index + 1) ||
741                     !(i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
742                         err = -EIO;
743                         goto error;
744                 }
745         }
746
747         offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
748         end_offset = min_t(unsigned long long,
749                         offset + PAGE_CACHE_SIZE, i_size_read(inode));
750
751         bh = head = page_buffers(page);
752         iomp = NULL;
753
754         len = bh->b_size;
755         do {
756                 if (offset >= end_offset)
757                         break;
758                 if (!buffer_uptodate(bh))
759                         uptodate = 0;
760                 if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
761                         continue;
762
763                 if (iomp) {
764                         iomp = xfs_offset_to_map(page, &iomap, p_offset);
765                 }
766
767                 /*
768                  * First case, map an unwritten extent and prepare for
769                  * extent state conversion transaction on completion.
770                  */
771                 if (buffer_unwritten(bh)) {
772                         if (!iomp) {
773                                 err = xfs_map_blocks(inode, offset, len, &iomap,
774                                                 BMAPI_READ|BMAPI_IGNSTATE);
775                                 if (err) {
776                                         goto error;
777                                 }
778                                 iomp = xfs_offset_to_map(page, &iomap,
779                                                                 p_offset);
780                         }
781                         if (iomp && startio) {
782                                 if (!bh->b_end_io) {
783                                         err = xfs_map_unwritten(inode, page,
784                                                         head, bh, p_offset,
785                                                         inode->i_blkbits, iomp,
786                                                         wbc, startio, unmapped);
787                                         if (err) {
788                                                 goto error;
789                                         }
790                                 }
791                                 bh_arr[cnt++] = bh;
792                                 page_dirty = 0;
793                         }
794                 /*
795                  * Second case, allocate space for a delalloc buffer.
796                  * We can return EAGAIN here in the release page case.
797                  */
798                 } else if (buffer_delay(bh)) {
799                         if (!iomp) {
800                                 err = xfs_map_blocks(inode, offset, len, &iomap,
801                                                 BMAPI_ALLOCATE | flags);
802                                 if (err) {
803                                         goto error;
804                                 }
805                                 iomp = xfs_offset_to_map(page, &iomap,
806                                                                 p_offset);
807                         }
808                         if (iomp) {
809                                 xfs_map_at_offset(page, bh, p_offset,
810                                                 inode->i_blkbits, iomp);
811                                 if (startio) {
812                                         bh_arr[cnt++] = bh;
813                                 } else {
814                                         set_buffer_dirty(bh);
815                                         unlock_buffer(bh);
816                                         mark_buffer_dirty(bh);
817                                 }
818                                 page_dirty = 0;
819                         }
820                 } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
821                            (unmapped || startio)) {
822
823                         if (!buffer_mapped(bh)) {
824                                 int     size;
825
826                                 /*
827                                  * Getting here implies an unmapped buffer
828                                  * was found, and we are in a path where we
829                                  * need to write the whole page out.
830                                  */
831                                 if (!iomp) {
832                                         size = xfs_probe_unmapped_cluster(
833                                                         inode, page, bh, head);
834                                         err = xfs_map_blocks(inode, offset,
835                                                         size, &iomap,
836                                                         BMAPI_WRITE|BMAPI_MMAP);
837                                         if (err) {
838                                                 goto error;
839                                         }
840                                         iomp = xfs_offset_to_map(page, &iomap,
841                                                                      p_offset);
842                                 }
843                                 if (iomp) {
844                                         xfs_map_at_offset(page,
845                                                         bh, p_offset,
846                                                         inode->i_blkbits, iomp);
847                                         if (startio) {
848                                                 bh_arr[cnt++] = bh;
849                                         } else {
850                                                 set_buffer_dirty(bh);
851                                                 unlock_buffer(bh);
852                                                 mark_buffer_dirty(bh);
853                                         }
854                                         page_dirty = 0;
855                                 }
856                         } else if (startio) {
857                                 if (buffer_uptodate(bh) &&
858                                     !test_and_set_bit(BH_Lock, &bh->b_state)) {
859                                         bh_arr[cnt++] = bh;
860                                         page_dirty = 0;
861                                 }
862                         }
863                 }
864         } while (offset += len, p_offset += len,
865                 ((bh = bh->b_this_page) != head));
866
867         if (uptodate && bh == head)
868                 SetPageUptodate(page);
869
870         if (startio)
871                 xfs_submit_page(page, bh_arr, cnt);
872
873         if (iomp) {
874                 xfs_cluster_write(inode, page->index + 1, iomp, wbc,
875                                 startio, unmapped);
876         }
877
878         return page_dirty;
879
880 error:
881         for (i = 0; i < cnt; i++) {
882                 unlock_buffer(bh_arr[i]);
883         }
884
885         /*
886          * If it's delalloc and we have nowhere to put it,
887          * throw it away, unless the lower layers told
888          * us to try again.
889          */
890         if (err != -EAGAIN) {
891                 if (!unmapped) {
892                         block_invalidatepage(page, 0);
893                 }
894                 ClearPageUptodate(page);
895         }
896         return err;
897 }
898
899 STATIC int
900 linvfs_get_block_core(
901         struct inode            *inode,
902         sector_t                iblock,
903         unsigned long           blocks,
904         struct buffer_head      *bh_result,
905         int                     create,
906         int                     direct,
907         bmapi_flags_t           flags)
908 {
909         vnode_t                 *vp = LINVFS_GET_VP(inode);
910         xfs_iomap_t             iomap;
911         int                     retpbbm = 1;
912         int                     error;
913         ssize_t                 size;
914         loff_t                  offset = (loff_t)iblock << inode->i_blkbits;
915
916         if (blocks)
917                 size = blocks << inode->i_blkbits;
918         else
919                 size = 1 << inode->i_blkbits;
920
921         VOP_BMAP(vp, offset, size,
922                 create ? flags : BMAPI_READ, &iomap, &retpbbm, error);
923         if (error)
924                 return -error;
925
926         if (retpbbm == 0)
927                 return 0;
928
929         if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
930                 xfs_daddr_t             bn;
931                 loff_t                  delta;
932
933                 /* For unwritten extents do not report a disk address on
934                  * the read case (treat as if we're reading into a hole).
935                  */
936                 if (create || !(iomap.iomap_flags & IOMAP_UNWRITTEN)) {
937                         delta = offset - iomap.iomap_offset;
938                         delta >>= inode->i_blkbits;
939
940                         bn = iomap.iomap_bn >> (inode->i_blkbits - BBSHIFT);
941                         bn += delta;
942
943                         bh_result->b_blocknr = bn;
944                         bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
945                         set_buffer_mapped(bh_result);
946                 }
947                 if (create && (iomap.iomap_flags & IOMAP_UNWRITTEN)) {
948                         if (direct)
949                                 bh_result->b_private = inode;
950                         set_buffer_unwritten(bh_result);
951                         set_buffer_delay(bh_result);
952                 }
953         }
954
955         /* If this is a realtime file, data might be on a new device */
956         bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
957
958         /* If we previously allocated a block out beyond eof and
959          * we are now coming back to use it then we will need to
960          * flag it as new even if it has a disk address.
961          */
962         if (create &&
963             ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
964              (offset >= i_size_read(inode)) || (iomap.iomap_flags & IOMAP_NEW))) {
965                 set_buffer_new(bh_result);
966         }
967
968         if (iomap.iomap_flags & IOMAP_DELAY) {
969                 if (unlikely(direct))
970                         BUG();
971                 if (create) {
972                         set_buffer_mapped(bh_result);
973                         set_buffer_uptodate(bh_result);
974                 }
975                 bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
976                 set_buffer_delay(bh_result);
977         }
978
979         if (blocks) {
980                 loff_t iosize;
981                 iosize = (iomap.iomap_bsize - iomap.iomap_delta);
982                 bh_result->b_size =
983                     (ssize_t)min(iosize, (loff_t)(blocks << inode->i_blkbits));
984         }
985
986         return 0;
987 }
988
989 int
990 linvfs_get_block(
991         struct inode            *inode,
992         sector_t                iblock,
993         struct buffer_head      *bh_result,
994         int                     create)
995 {
996         return linvfs_get_block_core(inode, iblock, 0, bh_result,
997                                         create, 0, BMAPI_WRITE);
998 }
999
1000 STATIC int
1001 linvfs_get_block_sync(
1002         struct inode            *inode,
1003         sector_t                iblock,
1004         struct buffer_head      *bh_result,
1005         int                     create)
1006 {
1007         return linvfs_get_block_core(inode, iblock, 0, bh_result,
1008                                         create, 0, BMAPI_SYNC|BMAPI_WRITE);
1009 }
1010
1011 STATIC int
1012 linvfs_get_blocks_direct(
1013         struct inode            *inode,
1014         sector_t                iblock,
1015         unsigned long           max_blocks,
1016         struct buffer_head      *bh_result,
1017         int                     create)
1018 {
1019         return linvfs_get_block_core(inode, iblock, max_blocks, bh_result,
1020                                         create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1021 }
1022
1023 STATIC ssize_t
1024 linvfs_direct_IO(
1025         int                     rw,
1026         struct kiocb            *iocb,
1027         const struct iovec      *iov,
1028         loff_t                  offset,
1029         unsigned long           nr_segs)
1030 {
1031         struct file     *file = iocb->ki_filp;
1032         struct inode    *inode = file->f_mapping->host;
1033         vnode_t         *vp = LINVFS_GET_VP(inode);
1034         xfs_iomap_t     iomap;
1035         int             maps = 1;
1036         int             error;
1037
1038         VOP_BMAP(vp, offset, 0, BMAPI_DEVICE, &iomap, &maps, error);
1039         if (error)
1040                 return -error;
1041
1042         return blockdev_direct_IO_no_locking(rw, iocb, inode,
1043                 iomap.iomap_target->pbr_bdev,
1044                 iov, offset, nr_segs,
1045                 linvfs_get_blocks_direct,
1046                 linvfs_unwritten_convert_direct);
1047 }
1048
1049
1050 STATIC sector_t
1051 linvfs_bmap(
1052         struct address_space    *mapping,
1053         sector_t                block)
1054 {
1055         struct inode            *inode = (struct inode *)mapping->host;
1056         vnode_t                 *vp = LINVFS_GET_VP(inode);
1057         int                     error;
1058
1059         vn_trace_entry(vp, "linvfs_bmap", (inst_t *)__return_address);
1060
1061         VOP_RWLOCK(vp, VRWLOCK_READ);
1062         VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1063         VOP_RWUNLOCK(vp, VRWLOCK_READ);
1064         return generic_block_bmap(mapping, block, linvfs_get_block);
1065 }
1066
1067 STATIC int
1068 linvfs_readpage(
1069         struct file             *unused,
1070         struct page             *page)
1071 {
1072         return mpage_readpage(page, linvfs_get_block);
1073 }
1074
1075 STATIC int
1076 linvfs_readpages(
1077         struct file             *unused,
1078         struct address_space    *mapping,
1079         struct list_head        *pages,
1080         unsigned                nr_pages)
1081 {
1082         return mpage_readpages(mapping, pages, nr_pages, linvfs_get_block);
1083 }
1084
1085 STATIC void
1086 xfs_count_page_state(
1087         struct page             *page,
1088         int                     *delalloc,
1089         int                     *unmapped,
1090         int                     *unwritten)
1091 {
1092         struct buffer_head      *bh, *head;
1093
1094         *delalloc = *unmapped = *unwritten = 0;
1095
1096         bh = head = page_buffers(page);
1097         do {
1098                 if (buffer_uptodate(bh) && !buffer_mapped(bh))
1099                         (*unmapped) = 1;
1100                 else if (buffer_unwritten(bh) && !buffer_delay(bh))
1101                         clear_buffer_unwritten(bh);
1102                 else if (buffer_unwritten(bh))
1103                         (*unwritten) = 1;
1104                 else if (buffer_delay(bh))
1105                         (*delalloc) = 1;
1106         } while ((bh = bh->b_this_page) != head);
1107 }
1108
1109
1110 /*
1111  * writepage: Called from one of two places:
1112  *
1113  * 1. we are flushing a delalloc buffer head.
1114  *
1115  * 2. we are writing out a dirty page. Typically the page dirty
1116  *    state is cleared before we get here. In this case is it
1117  *    conceivable we have no buffer heads.
1118  *
1119  * For delalloc space on the page we need to allocate space and
1120  * flush it. For unmapped buffer heads on the page we should
1121  * allocate space if the page is uptodate. For any other dirty
1122  * buffer heads on the page we should flush them.
1123  *
1124  * If we detect that a transaction would be required to flush
1125  * the page, we have to check the process flags first, if we
1126  * are already in a transaction or disk I/O during allocations
1127  * is off, we need to fail the writepage and redirty the page.
1128  */
1129
1130 STATIC int
1131 linvfs_writepage(
1132         struct page             *page,
1133         struct writeback_control *wbc)
1134 {
1135         int                     error;
1136         int                     need_trans;
1137         int                     delalloc, unmapped, unwritten;
1138         struct inode            *inode = page->mapping->host;
1139
1140         xfs_page_trace(XFS_WRITEPAGE_ENTER, inode, page, 0);
1141
1142         /*
1143          * We need a transaction if:
1144          *  1. There are delalloc buffers on the page
1145          *  2. The page is uptodate and we have unmapped buffers
1146          *  3. The page is uptodate and we have no buffers
1147          *  4. There are unwritten buffers on the page
1148          */
1149
1150         if (!page_has_buffers(page)) {
1151                 unmapped = 1;
1152                 need_trans = 1;
1153         } else {
1154                 xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1155                 if (!PageUptodate(page))
1156                         unmapped = 0;
1157                 need_trans = delalloc + unmapped + unwritten;
1158         }
1159
1160         /*
1161          * If we need a transaction and the process flags say
1162          * we are already in a transaction, or no IO is allowed
1163          * then mark the page dirty again and leave the page
1164          * as is.
1165          */
1166         if (PFLAGS_TEST_FSTRANS() && need_trans)
1167                 goto out_fail;
1168
1169         /*
1170          * Delay hooking up buffer heads until we have
1171          * made our go/no-go decision.
1172          */
1173         if (!page_has_buffers(page))
1174                 create_empty_buffers(page, 1 << inode->i_blkbits, 0);
1175
1176         /*
1177          * Convert delayed allocate, unwritten or unmapped space
1178          * to real space and flush out to disk.
1179          */
1180         error = xfs_page_state_convert(inode, page, wbc, 1, unmapped);
1181         if (error == -EAGAIN)
1182                 goto out_fail;
1183         if (unlikely(error < 0))
1184                 goto out_unlock;
1185
1186         return 0;
1187
1188 out_fail:
1189         set_page_dirty(page);
1190         unlock_page(page);
1191         return 0;
1192 out_unlock:
1193         unlock_page(page);
1194         return error;
1195 }
1196
1197 /*
1198  * Called to move a page into cleanable state - and from there
1199  * to be released. Possibly the page is already clean. We always
1200  * have buffer heads in this call.
1201  *
1202  * Returns 0 if the page is ok to release, 1 otherwise.
1203  *
1204  * Possible scenarios are:
1205  *
1206  * 1. We are being called to release a page which has been written
1207  *    to via regular I/O. buffer heads will be dirty and possibly
1208  *    delalloc. If no delalloc buffer heads in this case then we
1209  *    can just return zero.
1210  *
1211  * 2. We are called to release a page which has been written via
1212  *    mmap, all we need to do is ensure there is no delalloc
1213  *    state in the buffer heads, if not we can let the caller
1214  *    free them and we should come back later via writepage.
1215  */
1216 STATIC int
1217 linvfs_release_page(
1218         struct page             *page,
1219         int                     gfp_mask)
1220 {
1221         struct inode            *inode = page->mapping->host;
1222         int                     dirty, delalloc, unmapped, unwritten;
1223         struct writeback_control wbc = {
1224                 .sync_mode = WB_SYNC_ALL,
1225                 .nr_to_write = 1,
1226         };
1227
1228         xfs_page_trace(XFS_RELEASEPAGE_ENTER, inode, page, gfp_mask);
1229
1230         xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
1231         if (!delalloc && !unwritten)
1232                 goto free_buffers;
1233
1234         if (!(gfp_mask & __GFP_FS))
1235                 return 0;
1236
1237         /* If we are already inside a transaction or the thread cannot
1238          * do I/O, we cannot release this page.
1239          */
1240         if (PFLAGS_TEST_FSTRANS())
1241                 return 0;
1242
1243         /*
1244          * Convert delalloc space to real space, do not flush the
1245          * data out to disk, that will be done by the caller.
1246          * Never need to allocate space here - we will always
1247          * come back to writepage in that case.
1248          */
1249         dirty = xfs_page_state_convert(inode, page, &wbc, 0, 0);
1250         if (dirty == 0 && !unwritten)
1251                 goto free_buffers;
1252         return 0;
1253
1254 free_buffers:
1255         return try_to_free_buffers(page);
1256 }
1257
1258 STATIC int
1259 linvfs_prepare_write(
1260         struct file             *file,
1261         struct page             *page,
1262         unsigned int            from,
1263         unsigned int            to)
1264 {
1265         if (file && (file->f_flags & O_SYNC)) {
1266                 return block_prepare_write(page, from, to,
1267                                                 linvfs_get_block_sync);
1268         } else {
1269                 return block_prepare_write(page, from, to,
1270                                                 linvfs_get_block);
1271         }
1272 }
1273
1274 struct address_space_operations linvfs_aops = {
1275         .readpage               = linvfs_readpage,
1276         .readpages              = linvfs_readpages,
1277         .writepage              = linvfs_writepage,
1278         .sync_page              = block_sync_page,
1279         .releasepage            = linvfs_release_page,
1280         .prepare_write          = linvfs_prepare_write,
1281         .commit_write           = generic_commit_write,
1282         .bmap                   = linvfs_bmap,
1283         .direct_IO              = linvfs_direct_IO,
1284 };