fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / fs / ntfs / aops.c
1 /**
2  * aops.c - NTFS kernel address space operations and page cache handling.
3  *          Part of the Linux-NTFS project.
4  *
5  * Copyright (c) 2001-2006 Anton Altaparmakov
6  * Copyright (c) 2002 Richard Russon
7  *
8  * This program/include file is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License as published
10  * by the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program/include file is distributed in the hope that it will be
14  * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program (in the main directory of the Linux-NTFS
20  * distribution in the file COPYING); if not, write to the Free Software
21  * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
22  */
23
24 #include <linux/errno.h>
25 #include <linux/fs.h>
26 #include <linux/mm.h>
27 #include <linux/pagemap.h>
28 #include <linux/swap.h>
29 #include <linux/buffer_head.h>
30 #include <linux/writeback.h>
31 #include <linux/bit_spinlock.h>
32
33 #include "aops.h"
34 #include "attrib.h"
35 #include "debug.h"
36 #include "inode.h"
37 #include "mft.h"
38 #include "runlist.h"
39 #include "types.h"
40 #include "ntfs.h"
41
42 /**
43  * ntfs_end_buffer_async_read - async io completion for reading attributes
44  * @bh:         buffer head on which io is completed
45  * @uptodate:   whether @bh is now uptodate or not
46  *
47  * Asynchronous I/O completion handler for reading pages belonging to the
48  * attribute address space of an inode.  The inodes can either be files or
49  * directories or they can be fake inodes describing some attribute.
50  *
51  * If NInoMstProtected(), perform the post read mst fixups when all IO on the
52  * page has been completed and mark the page uptodate or set the error bit on
53  * the page.  To determine the size of the records that need fixing up, we
54  * cheat a little bit by setting the index_block_size in ntfs_inode to the ntfs
55  * record size, and index_block_size_bits, to the log(base 2) of the ntfs
56  * record size.
57  */
58 static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
59 {
60         unsigned long flags;
61         struct buffer_head *first, *tmp;
62         struct page *page;
63         struct inode *vi;
64         ntfs_inode *ni;
65         int page_uptodate = 1;
66
67         page = bh->b_page;
68         vi = page->mapping->host;
69         ni = NTFS_I(vi);
70
71         if (likely(uptodate)) {
72                 loff_t i_size;
73                 s64 file_ofs, init_size;
74
75                 set_buffer_uptodate(bh);
76
77                 file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) +
78                                 bh_offset(bh);
79                 read_lock_irqsave(&ni->size_lock, flags);
80                 init_size = ni->initialized_size;
81                 i_size = i_size_read(vi);
82                 read_unlock_irqrestore(&ni->size_lock, flags);
83                 if (unlikely(init_size > i_size)) {
84                         /* Race with shrinking truncate. */
85                         init_size = i_size;
86                 }
87                 /* Check for the current buffer head overflowing. */
88                 if (unlikely(file_ofs + bh->b_size > init_size)) {
89                         u8 *kaddr;
90                         int ofs;
91
92                         ofs = 0;
93                         if (file_ofs < init_size)
94                                 ofs = init_size - file_ofs;
95                         local_irq_save(flags);
96                         kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
97                         memset(kaddr + bh_offset(bh) + ofs, 0,
98                                         bh->b_size - ofs);
99                         kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
100                         local_irq_restore(flags);
101                         flush_dcache_page(page);
102                 }
103         } else {
104                 clear_buffer_uptodate(bh);
105                 SetPageError(page);
106                 ntfs_error(ni->vol->sb, "Buffer I/O error, logical block "
107                                 "0x%llx.", (unsigned long long)bh->b_blocknr);
108         }
109         first = page_buffers(page);
110         local_irq_save(flags);
111         bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
112         clear_buffer_async_read(bh);
113         unlock_buffer(bh);
114         tmp = bh;
115         do {
116                 if (!buffer_uptodate(tmp))
117                         page_uptodate = 0;
118                 if (buffer_async_read(tmp)) {
119                         if (likely(buffer_locked(tmp)))
120                                 goto still_busy;
121                         /* Async buffers must be locked. */
122                         BUG();
123                 }
124                 tmp = tmp->b_this_page;
125         } while (tmp != bh);
126         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
127         local_irq_restore(flags);
128         /*
129          * If none of the buffers had errors then we can set the page uptodate,
130          * but we first have to perform the post read mst fixups, if the
131          * attribute is mst protected, i.e. if NInoMstProteced(ni) is true.
132          * Note we ignore fixup errors as those are detected when
133          * map_mft_record() is called which gives us per record granularity
134          * rather than per page granularity.
135          */
136         if (!NInoMstProtected(ni)) {
137                 if (likely(page_uptodate && !PageError(page)))
138                         SetPageUptodate(page);
139         } else {
140                 u8 *kaddr;
141                 unsigned int i, recs;
142                 u32 rec_size;
143
144                 rec_size = ni->itype.index.block_size;
145                 recs = PAGE_CACHE_SIZE / rec_size;
146                 /* Should have been verified before we got here... */
147                 BUG_ON(!recs);
148                 local_irq_save(flags);
149                 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ);
150                 for (i = 0; i < recs; i++)
151                         post_read_mst_fixup((NTFS_RECORD*)(kaddr +
152                                         i * rec_size), rec_size);
153                 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ);
154                 local_irq_restore(flags);
155                 flush_dcache_page(page);
156                 if (likely(page_uptodate && !PageError(page)))
157                         SetPageUptodate(page);
158         }
159         unlock_page(page);
160         return;
161 still_busy:
162         bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
163         local_irq_restore(flags);
164         return;
165 }
166
167 /**
168  * ntfs_read_block - fill a @page of an address space with data
169  * @page:       page cache page to fill with data
170  *
171  * Fill the page @page of the address space belonging to the @page->host inode.
172  * We read each buffer asynchronously and when all buffers are read in, our io
173  * completion handler ntfs_end_buffer_read_async(), if required, automatically
174  * applies the mst fixups to the page before finally marking it uptodate and
175  * unlocking it.
176  *
177  * We only enforce allocated_size limit because i_size is checked for in
178  * generic_file_read().
179  *
180  * Return 0 on success and -errno on error.
181  *
182  * Contains an adapted version of fs/buffer.c::block_read_full_page().
183  */
184 static int ntfs_read_block(struct page *page)
185 {
186         loff_t i_size;
187         VCN vcn;
188         LCN lcn;
189         s64 init_size;
190         struct inode *vi;
191         ntfs_inode *ni;
192         ntfs_volume *vol;
193         runlist_element *rl;
194         struct buffer_head *bh, *head, *arr[MAX_BUF_PER_PAGE];
195         sector_t iblock, lblock, zblock;
196         unsigned long flags;
197         unsigned int blocksize, vcn_ofs;
198         int i, nr;
199         unsigned char blocksize_bits;
200
201         vi = page->mapping->host;
202         ni = NTFS_I(vi);
203         vol = ni->vol;
204
205         /* $MFT/$DATA must have its complete runlist in memory at all times. */
206         BUG_ON(!ni->runlist.rl && !ni->mft_no && !NInoAttr(ni));
207
208         blocksize = vol->sb->s_blocksize;
209         blocksize_bits = vol->sb->s_blocksize_bits;
210
211         if (!page_has_buffers(page)) {
212                 create_empty_buffers(page, blocksize, 0);
213                 if (unlikely(!page_has_buffers(page))) {
214                         unlock_page(page);
215                         return -ENOMEM;
216                 }
217         }
218         bh = head = page_buffers(page);
219         BUG_ON(!bh);
220
221         /*
222          * We may be racing with truncate.  To avoid some of the problems we
223          * now take a snapshot of the various sizes and use those for the whole
224          * of the function.  In case of an extending truncate it just means we
225          * may leave some buffers unmapped which are now allocated.  This is
226          * not a problem since these buffers will just get mapped when a write
227          * occurs.  In case of a shrinking truncate, we will detect this later
228          * on due to the runlist being incomplete and if the page is being
229          * fully truncated, truncate will throw it away as soon as we unlock
230          * it so no need to worry what we do with it.
231          */
232         iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
233         read_lock_irqsave(&ni->size_lock, flags);
234         lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits;
235         init_size = ni->initialized_size;
236         i_size = i_size_read(vi);
237         read_unlock_irqrestore(&ni->size_lock, flags);
238         if (unlikely(init_size > i_size)) {
239                 /* Race with shrinking truncate. */
240                 init_size = i_size;
241         }
242         zblock = (init_size + blocksize - 1) >> blocksize_bits;
243
244         /* Loop through all the buffers in the page. */
245         rl = NULL;
246         nr = i = 0;
247         do {
248                 u8 *kaddr;
249                 int err;
250
251                 if (unlikely(buffer_uptodate(bh)))
252                         continue;
253                 if (unlikely(buffer_mapped(bh))) {
254                         arr[nr++] = bh;
255                         continue;
256                 }
257                 err = 0;
258                 bh->b_bdev = vol->sb->s_bdev;
259                 /* Is the block within the allowed limits? */
260                 if (iblock < lblock) {
261                         bool is_retry = false;
262
263                         /* Convert iblock into corresponding vcn and offset. */
264                         vcn = (VCN)iblock << blocksize_bits >>
265                                         vol->cluster_size_bits;
266                         vcn_ofs = ((VCN)iblock << blocksize_bits) &
267                                         vol->cluster_size_mask;
268                         if (!rl) {
269 lock_retry_remap:
270                                 down_read(&ni->runlist.lock);
271                                 rl = ni->runlist.rl;
272                         }
273                         if (likely(rl != NULL)) {
274                                 /* Seek to element containing target vcn. */
275                                 while (rl->length && rl[1].vcn <= vcn)
276                                         rl++;
277                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
278                         } else
279                                 lcn = LCN_RL_NOT_MAPPED;
280                         /* Successful remap. */
281                         if (lcn >= 0) {
282                                 /* Setup buffer head to correct block. */
283                                 bh->b_blocknr = ((lcn << vol->cluster_size_bits)
284                                                 + vcn_ofs) >> blocksize_bits;
285                                 set_buffer_mapped(bh);
286                                 /* Only read initialized data blocks. */
287                                 if (iblock < zblock) {
288                                         arr[nr++] = bh;
289                                         continue;
290                                 }
291                                 /* Fully non-initialized data block, zero it. */
292                                 goto handle_zblock;
293                         }
294                         /* It is a hole, need to zero it. */
295                         if (lcn == LCN_HOLE)
296                                 goto handle_hole;
297                         /* If first try and runlist unmapped, map and retry. */
298                         if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
299                                 is_retry = true;
300                                 /*
301                                  * Attempt to map runlist, dropping lock for
302                                  * the duration.
303                                  */
304                                 up_read(&ni->runlist.lock);
305                                 err = ntfs_map_runlist(ni, vcn);
306                                 if (likely(!err))
307                                         goto lock_retry_remap;
308                                 rl = NULL;
309                         } else if (!rl)
310                                 up_read(&ni->runlist.lock);
311                         /*
312                          * If buffer is outside the runlist, treat it as a
313                          * hole.  This can happen due to concurrent truncate
314                          * for example.
315                          */
316                         if (err == -ENOENT || lcn == LCN_ENOENT) {
317                                 err = 0;
318                                 goto handle_hole;
319                         }
320                         /* Hard error, zero out region. */
321                         if (!err)
322                                 err = -EIO;
323                         bh->b_blocknr = -1;
324                         SetPageError(page);
325                         ntfs_error(vol->sb, "Failed to read from inode 0x%lx, "
326                                         "attribute type 0x%x, vcn 0x%llx, "
327                                         "offset 0x%x because its location on "
328                                         "disk could not be determined%s "
329                                         "(error code %i).", ni->mft_no,
330                                         ni->type, (unsigned long long)vcn,
331                                         vcn_ofs, is_retry ? " even after "
332                                         "retrying" : "", err);
333                 }
334                 /*
335                  * Either iblock was outside lblock limits or
336                  * ntfs_rl_vcn_to_lcn() returned error.  Just zero that portion
337                  * of the page and set the buffer uptodate.
338                  */
339 handle_hole:
340                 bh->b_blocknr = -1UL;
341                 clear_buffer_mapped(bh);
342 handle_zblock:
343                 kaddr = kmap_atomic(page, KM_USER0);
344                 memset(kaddr + i * blocksize, 0, blocksize);
345                 kunmap_atomic(kaddr, KM_USER0);
346                 flush_dcache_page(page);
347                 if (likely(!err))
348                         set_buffer_uptodate(bh);
349         } while (i++, iblock++, (bh = bh->b_this_page) != head);
350
351         /* Release the lock if we took it. */
352         if (rl)
353                 up_read(&ni->runlist.lock);
354
355         /* Check we have at least one buffer ready for i/o. */
356         if (nr) {
357                 struct buffer_head *tbh;
358
359                 /* Lock the buffers. */
360                 for (i = 0; i < nr; i++) {
361                         tbh = arr[i];
362                         lock_buffer(tbh);
363                         tbh->b_end_io = ntfs_end_buffer_async_read;
364                         set_buffer_async_read(tbh);
365                 }
366                 /* Finally, start i/o on the buffers. */
367                 for (i = 0; i < nr; i++) {
368                         tbh = arr[i];
369                         if (likely(!buffer_uptodate(tbh)))
370                                 submit_bh(READ, tbh);
371                         else
372                                 ntfs_end_buffer_async_read(tbh, 1);
373                 }
374                 return 0;
375         }
376         /* No i/o was scheduled on any of the buffers. */
377         if (likely(!PageError(page)))
378                 SetPageUptodate(page);
379         else /* Signal synchronous i/o error. */
380                 nr = -EIO;
381         unlock_page(page);
382         return nr;
383 }
384
385 /**
386  * ntfs_readpage - fill a @page of a @file with data from the device
387  * @file:       open file to which the page @page belongs or NULL
388  * @page:       page cache page to fill with data
389  *
390  * For non-resident attributes, ntfs_readpage() fills the @page of the open
391  * file @file by calling the ntfs version of the generic block_read_full_page()
392  * function, ntfs_read_block(), which in turn creates and reads in the buffers
393  * associated with the page asynchronously.
394  *
395  * For resident attributes, OTOH, ntfs_readpage() fills @page by copying the
396  * data from the mft record (which at this stage is most likely in memory) and
397  * fills the remainder with zeroes. Thus, in this case, I/O is synchronous, as
398  * even if the mft record is not cached at this point in time, we need to wait
399  * for it to be read in before we can do the copy.
400  *
401  * Return 0 on success and -errno on error.
402  */
403 static int ntfs_readpage(struct file *file, struct page *page)
404 {
405         loff_t i_size;
406         struct inode *vi;
407         ntfs_inode *ni, *base_ni;
408         u8 *kaddr;
409         ntfs_attr_search_ctx *ctx;
410         MFT_RECORD *mrec;
411         unsigned long flags;
412         u32 attr_len;
413         int err = 0;
414
415 retry_readpage:
416         BUG_ON(!PageLocked(page));
417         /*
418          * This can potentially happen because we clear PageUptodate() during
419          * ntfs_writepage() of MstProtected() attributes.
420          */
421         if (PageUptodate(page)) {
422                 unlock_page(page);
423                 return 0;
424         }
425         vi = page->mapping->host;
426         ni = NTFS_I(vi);
427         /*
428          * Only $DATA attributes can be encrypted and only unnamed $DATA
429          * attributes can be compressed.  Index root can have the flags set but
430          * this means to create compressed/encrypted files, not that the
431          * attribute is compressed/encrypted.  Note we need to check for
432          * AT_INDEX_ALLOCATION since this is the type of both directory and
433          * index inodes.
434          */
435         if (ni->type != AT_INDEX_ALLOCATION) {
436                 /* If attribute is encrypted, deny access, just like NT4. */
437                 if (NInoEncrypted(ni)) {
438                         BUG_ON(ni->type != AT_DATA);
439                         err = -EACCES;
440                         goto err_out;
441                 }
442                 /* Compressed data streams are handled in compress.c. */
443                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
444                         BUG_ON(ni->type != AT_DATA);
445                         BUG_ON(ni->name_len);
446                         return ntfs_read_compressed_block(page);
447                 }
448         }
449         /* NInoNonResident() == NInoIndexAllocPresent() */
450         if (NInoNonResident(ni)) {
451                 /* Normal, non-resident data stream. */
452                 return ntfs_read_block(page);
453         }
454         /*
455          * Attribute is resident, implying it is not compressed or encrypted.
456          * This also means the attribute is smaller than an mft record and
457          * hence smaller than a page, so can simply zero out any pages with
458          * index above 0.  Note the attribute can actually be marked compressed
459          * but if it is resident the actual data is not compressed so we are
460          * ok to ignore the compressed flag here.
461          */
462         if (unlikely(page->index > 0)) {
463                 kaddr = kmap_atomic(page, KM_USER0);
464                 memset(kaddr, 0, PAGE_CACHE_SIZE);
465                 flush_dcache_page(page);
466                 kunmap_atomic(kaddr, KM_USER0);
467                 goto done;
468         }
469         if (!NInoAttr(ni))
470                 base_ni = ni;
471         else
472                 base_ni = ni->ext.base_ntfs_ino;
473         /* Map, pin, and lock the mft record. */
474         mrec = map_mft_record(base_ni);
475         if (IS_ERR(mrec)) {
476                 err = PTR_ERR(mrec);
477                 goto err_out;
478         }
479         /*
480          * If a parallel write made the attribute non-resident, drop the mft
481          * record and retry the readpage.
482          */
483         if (unlikely(NInoNonResident(ni))) {
484                 unmap_mft_record(base_ni);
485                 goto retry_readpage;
486         }
487         ctx = ntfs_attr_get_search_ctx(base_ni, mrec);
488         if (unlikely(!ctx)) {
489                 err = -ENOMEM;
490                 goto unm_err_out;
491         }
492         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
493                         CASE_SENSITIVE, 0, NULL, 0, ctx);
494         if (unlikely(err))
495                 goto put_unm_err_out;
496         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
497         read_lock_irqsave(&ni->size_lock, flags);
498         if (unlikely(attr_len > ni->initialized_size))
499                 attr_len = ni->initialized_size;
500         i_size = i_size_read(vi);
501         read_unlock_irqrestore(&ni->size_lock, flags);
502         if (unlikely(attr_len > i_size)) {
503                 /* Race with shrinking truncate. */
504                 attr_len = i_size;
505         }
506         kaddr = kmap_atomic(page, KM_USER0);
507         /* Copy the data to the page. */
508         memcpy(kaddr, (u8*)ctx->attr +
509                         le16_to_cpu(ctx->attr->data.resident.value_offset),
510                         attr_len);
511         /* Zero the remainder of the page. */
512         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
513         flush_dcache_page(page);
514         kunmap_atomic(kaddr, KM_USER0);
515 put_unm_err_out:
516         ntfs_attr_put_search_ctx(ctx);
517 unm_err_out:
518         unmap_mft_record(base_ni);
519 done:
520         SetPageUptodate(page);
521 err_out:
522         unlock_page(page);
523         return err;
524 }
525
526 #ifdef NTFS_RW
527
528 /**
529  * ntfs_write_block - write a @page to the backing store
530  * @page:       page cache page to write out
531  * @wbc:        writeback control structure
532  *
533  * This function is for writing pages belonging to non-resident, non-mst
534  * protected attributes to their backing store.
535  *
536  * For a page with buffers, map and write the dirty buffers asynchronously
537  * under page writeback. For a page without buffers, create buffers for the
538  * page, then proceed as above.
539  *
540  * If a page doesn't have buffers the page dirty state is definitive. If a page
541  * does have buffers, the page dirty state is just a hint, and the buffer dirty
542  * state is definitive. (A hint which has rules: dirty buffers against a clean
543  * page is illegal. Other combinations are legal and need to be handled. In
544  * particular a dirty page containing clean buffers for example.)
545  *
546  * Return 0 on success and -errno on error.
547  *
548  * Based on ntfs_read_block() and __block_write_full_page().
549  */
550 static int ntfs_write_block(struct page *page, struct writeback_control *wbc)
551 {
552         VCN vcn;
553         LCN lcn;
554         s64 initialized_size;
555         loff_t i_size;
556         sector_t block, dblock, iblock;
557         struct inode *vi;
558         ntfs_inode *ni;
559         ntfs_volume *vol;
560         runlist_element *rl;
561         struct buffer_head *bh, *head;
562         unsigned long flags;
563         unsigned int blocksize, vcn_ofs;
564         int err;
565         bool need_end_writeback;
566         unsigned char blocksize_bits;
567
568         vi = page->mapping->host;
569         ni = NTFS_I(vi);
570         vol = ni->vol;
571
572         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
573                         "0x%lx.", ni->mft_no, ni->type, page->index);
574
575         BUG_ON(!NInoNonResident(ni));
576         BUG_ON(NInoMstProtected(ni));
577         blocksize = vol->sb->s_blocksize;
578         blocksize_bits = vol->sb->s_blocksize_bits;
579         if (!page_has_buffers(page)) {
580                 BUG_ON(!PageUptodate(page));
581                 create_empty_buffers(page, blocksize,
582                                 (1 << BH_Uptodate) | (1 << BH_Dirty));
583                 if (unlikely(!page_has_buffers(page))) {
584                         ntfs_warning(vol->sb, "Error allocating page "
585                                         "buffers.  Redirtying page so we try "
586                                         "again later.");
587                         /*
588                          * Put the page back on mapping->dirty_pages, but leave
589                          * its buffers' dirty state as-is.
590                          */
591                         redirty_page_for_writepage(wbc, page);
592                         unlock_page(page);
593                         return 0;
594                 }
595         }
596         bh = head = page_buffers(page);
597         BUG_ON(!bh);
598
599         /* NOTE: Different naming scheme to ntfs_read_block()! */
600
601         /* The first block in the page. */
602         block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits);
603
604         read_lock_irqsave(&ni->size_lock, flags);
605         i_size = i_size_read(vi);
606         initialized_size = ni->initialized_size;
607         read_unlock_irqrestore(&ni->size_lock, flags);
608
609         /* The first out of bounds block for the data size. */
610         dblock = (i_size + blocksize - 1) >> blocksize_bits;
611
612         /* The last (fully or partially) initialized block. */
613         iblock = initialized_size >> blocksize_bits;
614
615         /*
616          * Be very careful.  We have no exclusion from __set_page_dirty_buffers
617          * here, and the (potentially unmapped) buffers may become dirty at
618          * any time.  If a buffer becomes dirty here after we've inspected it
619          * then we just miss that fact, and the page stays dirty.
620          *
621          * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
622          * handle that here by just cleaning them.
623          */
624
625         /*
626          * Loop through all the buffers in the page, mapping all the dirty
627          * buffers to disk addresses and handling any aliases from the
628          * underlying block device's mapping.
629          */
630         rl = NULL;
631         err = 0;
632         do {
633                 bool is_retry = false;
634
635                 if (unlikely(block >= dblock)) {
636                         /*
637                          * Mapped buffers outside i_size will occur, because
638                          * this page can be outside i_size when there is a
639                          * truncate in progress. The contents of such buffers
640                          * were zeroed by ntfs_writepage().
641                          *
642                          * FIXME: What about the small race window where
643                          * ntfs_writepage() has not done any clearing because
644                          * the page was within i_size but before we get here,
645                          * vmtruncate() modifies i_size?
646                          */
647                         clear_buffer_dirty(bh);
648                         set_buffer_uptodate(bh);
649                         continue;
650                 }
651
652                 /* Clean buffers are not written out, so no need to map them. */
653                 if (!buffer_dirty(bh))
654                         continue;
655
656                 /* Make sure we have enough initialized size. */
657                 if (unlikely((block >= iblock) &&
658                                 (initialized_size < i_size))) {
659                         /*
660                          * If this page is fully outside initialized size, zero
661                          * out all pages between the current initialized size
662                          * and the current page. Just use ntfs_readpage() to do
663                          * the zeroing transparently.
664                          */
665                         if (block > iblock) {
666                                 // TODO:
667                                 // For each page do:
668                                 // - read_cache_page()
669                                 // Again for each page do:
670                                 // - wait_on_page_locked()
671                                 // - Check (PageUptodate(page) &&
672                                 //                      !PageError(page))
673                                 // Update initialized size in the attribute and
674                                 // in the inode.
675                                 // Again, for each page do:
676                                 //      __set_page_dirty_buffers();
677                                 // page_cache_release()
678                                 // We don't need to wait on the writes.
679                                 // Update iblock.
680                         }
681                         /*
682                          * The current page straddles initialized size. Zero
683                          * all non-uptodate buffers and set them uptodate (and
684                          * dirty?). Note, there aren't any non-uptodate buffers
685                          * if the page is uptodate.
686                          * FIXME: For an uptodate page, the buffers may need to
687                          * be written out because they were not initialized on
688                          * disk before.
689                          */
690                         if (!PageUptodate(page)) {
691                                 // TODO:
692                                 // Zero any non-uptodate buffers up to i_size.
693                                 // Set them uptodate and dirty.
694                         }
695                         // TODO:
696                         // Update initialized size in the attribute and in the
697                         // inode (up to i_size).
698                         // Update iblock.
699                         // FIXME: This is inefficient. Try to batch the two
700                         // size changes to happen in one go.
701                         ntfs_error(vol->sb, "Writing beyond initialized size "
702                                         "is not supported yet. Sorry.");
703                         err = -EOPNOTSUPP;
704                         break;
705                         // Do NOT set_buffer_new() BUT DO clear buffer range
706                         // outside write request range.
707                         // set_buffer_uptodate() on complete buffers as well as
708                         // set_buffer_dirty().
709                 }
710
711                 /* No need to map buffers that are already mapped. */
712                 if (buffer_mapped(bh))
713                         continue;
714
715                 /* Unmapped, dirty buffer. Need to map it. */
716                 bh->b_bdev = vol->sb->s_bdev;
717
718                 /* Convert block into corresponding vcn and offset. */
719                 vcn = (VCN)block << blocksize_bits;
720                 vcn_ofs = vcn & vol->cluster_size_mask;
721                 vcn >>= vol->cluster_size_bits;
722                 if (!rl) {
723 lock_retry_remap:
724                         down_read(&ni->runlist.lock);
725                         rl = ni->runlist.rl;
726                 }
727                 if (likely(rl != NULL)) {
728                         /* Seek to element containing target vcn. */
729                         while (rl->length && rl[1].vcn <= vcn)
730                                 rl++;
731                         lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
732                 } else
733                         lcn = LCN_RL_NOT_MAPPED;
734                 /* Successful remap. */
735                 if (lcn >= 0) {
736                         /* Setup buffer head to point to correct block. */
737                         bh->b_blocknr = ((lcn << vol->cluster_size_bits) +
738                                         vcn_ofs) >> blocksize_bits;
739                         set_buffer_mapped(bh);
740                         continue;
741                 }
742                 /* It is a hole, need to instantiate it. */
743                 if (lcn == LCN_HOLE) {
744                         u8 *kaddr;
745                         unsigned long *bpos, *bend;
746
747                         /* Check if the buffer is zero. */
748                         kaddr = kmap_atomic(page, KM_USER0);
749                         bpos = (unsigned long *)(kaddr + bh_offset(bh));
750                         bend = (unsigned long *)((u8*)bpos + blocksize);
751                         do {
752                                 if (unlikely(*bpos))
753                                         break;
754                         } while (likely(++bpos < bend));
755                         kunmap_atomic(kaddr, KM_USER0);
756                         if (bpos == bend) {
757                                 /*
758                                  * Buffer is zero and sparse, no need to write
759                                  * it.
760                                  */
761                                 bh->b_blocknr = -1;
762                                 clear_buffer_dirty(bh);
763                                 continue;
764                         }
765                         // TODO: Instantiate the hole.
766                         // clear_buffer_new(bh);
767                         // unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
768                         ntfs_error(vol->sb, "Writing into sparse regions is "
769                                         "not supported yet. Sorry.");
770                         err = -EOPNOTSUPP;
771                         break;
772                 }
773                 /* If first try and runlist unmapped, map and retry. */
774                 if (!is_retry && lcn == LCN_RL_NOT_MAPPED) {
775                         is_retry = true;
776                         /*
777                          * Attempt to map runlist, dropping lock for
778                          * the duration.
779                          */
780                         up_read(&ni->runlist.lock);
781                         err = ntfs_map_runlist(ni, vcn);
782                         if (likely(!err))
783                                 goto lock_retry_remap;
784                         rl = NULL;
785                 } else if (!rl)
786                         up_read(&ni->runlist.lock);
787                 /*
788                  * If buffer is outside the runlist, truncate has cut it out
789                  * of the runlist.  Just clean and clear the buffer and set it
790                  * uptodate so it can get discarded by the VM.
791                  */
792                 if (err == -ENOENT || lcn == LCN_ENOENT) {
793                         u8 *kaddr;
794
795                         bh->b_blocknr = -1;
796                         clear_buffer_dirty(bh);
797                         kaddr = kmap_atomic(page, KM_USER0);
798                         memset(kaddr + bh_offset(bh), 0, blocksize);
799                         kunmap_atomic(kaddr, KM_USER0);
800                         flush_dcache_page(page);
801                         set_buffer_uptodate(bh);
802                         err = 0;
803                         continue;
804                 }
805                 /* Failed to map the buffer, even after retrying. */
806                 if (!err)
807                         err = -EIO;
808                 bh->b_blocknr = -1;
809                 ntfs_error(vol->sb, "Failed to write to inode 0x%lx, "
810                                 "attribute type 0x%x, vcn 0x%llx, offset 0x%x "
811                                 "because its location on disk could not be "
812                                 "determined%s (error code %i).", ni->mft_no,
813                                 ni->type, (unsigned long long)vcn,
814                                 vcn_ofs, is_retry ? " even after "
815                                 "retrying" : "", err);
816                 break;
817         } while (block++, (bh = bh->b_this_page) != head);
818
819         /* Release the lock if we took it. */
820         if (rl)
821                 up_read(&ni->runlist.lock);
822
823         /* For the error case, need to reset bh to the beginning. */
824         bh = head;
825
826         /* Just an optimization, so ->readpage() is not called later. */
827         if (unlikely(!PageUptodate(page))) {
828                 int uptodate = 1;
829                 do {
830                         if (!buffer_uptodate(bh)) {
831                                 uptodate = 0;
832                                 bh = head;
833                                 break;
834                         }
835                 } while ((bh = bh->b_this_page) != head);
836                 if (uptodate)
837                         SetPageUptodate(page);
838         }
839
840         /* Setup all mapped, dirty buffers for async write i/o. */
841         do {
842                 if (buffer_mapped(bh) && buffer_dirty(bh)) {
843                         lock_buffer(bh);
844                         if (test_clear_buffer_dirty(bh)) {
845                                 BUG_ON(!buffer_uptodate(bh));
846                                 mark_buffer_async_write(bh);
847                         } else
848                                 unlock_buffer(bh);
849                 } else if (unlikely(err)) {
850                         /*
851                          * For the error case. The buffer may have been set
852                          * dirty during attachment to a dirty page.
853                          */
854                         if (err != -ENOMEM)
855                                 clear_buffer_dirty(bh);
856                 }
857         } while ((bh = bh->b_this_page) != head);
858
859         if (unlikely(err)) {
860                 // TODO: Remove the -EOPNOTSUPP check later on...
861                 if (unlikely(err == -EOPNOTSUPP))
862                         err = 0;
863                 else if (err == -ENOMEM) {
864                         ntfs_warning(vol->sb, "Error allocating memory. "
865                                         "Redirtying page so we try again "
866                                         "later.");
867                         /*
868                          * Put the page back on mapping->dirty_pages, but
869                          * leave its buffer's dirty state as-is.
870                          */
871                         redirty_page_for_writepage(wbc, page);
872                         err = 0;
873                 } else
874                         SetPageError(page);
875         }
876
877         BUG_ON(PageWriteback(page));
878         set_page_writeback(page);       /* Keeps try_to_free_buffers() away. */
879
880         /* Submit the prepared buffers for i/o. */
881         need_end_writeback = true;
882         do {
883                 struct buffer_head *next = bh->b_this_page;
884                 if (buffer_async_write(bh)) {
885                         submit_bh(WRITE, bh);
886                         need_end_writeback = false;
887                 }
888                 bh = next;
889         } while (bh != head);
890         unlock_page(page);
891
892         /* If no i/o was started, need to end_page_writeback(). */
893         if (unlikely(need_end_writeback))
894                 end_page_writeback(page);
895
896         ntfs_debug("Done.");
897         return err;
898 }
899
900 /**
901  * ntfs_write_mst_block - write a @page to the backing store
902  * @page:       page cache page to write out
903  * @wbc:        writeback control structure
904  *
905  * This function is for writing pages belonging to non-resident, mst protected
906  * attributes to their backing store.  The only supported attributes are index
907  * allocation and $MFT/$DATA.  Both directory inodes and index inodes are
908  * supported for the index allocation case.
909  *
910  * The page must remain locked for the duration of the write because we apply
911  * the mst fixups, write, and then undo the fixups, so if we were to unlock the
912  * page before undoing the fixups, any other user of the page will see the
913  * page contents as corrupt.
914  *
915  * We clear the page uptodate flag for the duration of the function to ensure
916  * exclusion for the $MFT/$DATA case against someone mapping an mft record we
917  * are about to apply the mst fixups to.
918  *
919  * Return 0 on success and -errno on error.
920  *
921  * Based on ntfs_write_block(), ntfs_mft_writepage(), and
922  * write_mft_record_nolock().
923  */
924 static int ntfs_write_mst_block(struct page *page,
925                 struct writeback_control *wbc)
926 {
927         sector_t block, dblock, rec_block;
928         struct inode *vi = page->mapping->host;
929         ntfs_inode *ni = NTFS_I(vi);
930         ntfs_volume *vol = ni->vol;
931         u8 *kaddr;
932         unsigned int rec_size = ni->itype.index.block_size;
933         ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size];
934         struct buffer_head *bh, *head, *tbh, *rec_start_bh;
935         struct buffer_head *bhs[MAX_BUF_PER_PAGE];
936         runlist_element *rl;
937         int i, nr_locked_nis, nr_recs, nr_bhs, max_bhs, bhs_per_rec, err, err2;
938         unsigned bh_size, rec_size_bits;
939         bool sync, is_mft, page_is_dirty, rec_is_dirty;
940         unsigned char bh_size_bits;
941
942         ntfs_debug("Entering for inode 0x%lx, attribute type 0x%x, page index "
943                         "0x%lx.", vi->i_ino, ni->type, page->index);
944         BUG_ON(!NInoNonResident(ni));
945         BUG_ON(!NInoMstProtected(ni));
946         is_mft = (S_ISREG(vi->i_mode) && !vi->i_ino);
947         /*
948          * NOTE: ntfs_write_mst_block() would be called for $MFTMirr if a page
949          * in its page cache were to be marked dirty.  However this should
950          * never happen with the current driver and considering we do not
951          * handle this case here we do want to BUG(), at least for now.
952          */
953         BUG_ON(!(is_mft || S_ISDIR(vi->i_mode) ||
954                         (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION)));
955         bh_size = vol->sb->s_blocksize;
956         bh_size_bits = vol->sb->s_blocksize_bits;
957         max_bhs = PAGE_CACHE_SIZE / bh_size;
958         BUG_ON(!max_bhs);
959         BUG_ON(max_bhs > MAX_BUF_PER_PAGE);
960
961         /* Were we called for sync purposes? */
962         sync = (wbc->sync_mode == WB_SYNC_ALL);
963
964         /* Make sure we have mapped buffers. */
965         bh = head = page_buffers(page);
966         BUG_ON(!bh);
967
968         rec_size_bits = ni->itype.index.block_size_bits;
969         BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits));
970         bhs_per_rec = rec_size >> bh_size_bits;
971         BUG_ON(!bhs_per_rec);
972
973         /* The first block in the page. */
974         rec_block = block = (sector_t)page->index <<
975                         (PAGE_CACHE_SHIFT - bh_size_bits);
976
977         /* The first out of bounds block for the data size. */
978         dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits;
979
980         rl = NULL;
981         err = err2 = nr_bhs = nr_recs = nr_locked_nis = 0;
982         page_is_dirty = rec_is_dirty = false;
983         rec_start_bh = NULL;
984         do {
985                 bool is_retry = false;
986
987                 if (likely(block < rec_block)) {
988                         if (unlikely(block >= dblock)) {
989                                 clear_buffer_dirty(bh);
990                                 set_buffer_uptodate(bh);
991                                 continue;
992                         }
993                         /*
994                          * This block is not the first one in the record.  We
995                          * ignore the buffer's dirty state because we could
996                          * have raced with a parallel mark_ntfs_record_dirty().
997                          */
998                         if (!rec_is_dirty)
999                                 continue;
1000                         if (unlikely(err2)) {
1001                                 if (err2 != -ENOMEM)
1002                                         clear_buffer_dirty(bh);
1003                                 continue;
1004                         }
1005                 } else /* if (block == rec_block) */ {
1006                         BUG_ON(block > rec_block);
1007                         /* This block is the first one in the record. */
1008                         rec_block += bhs_per_rec;
1009                         err2 = 0;
1010                         if (unlikely(block >= dblock)) {
1011                                 clear_buffer_dirty(bh);
1012                                 continue;
1013                         }
1014                         if (!buffer_dirty(bh)) {
1015                                 /* Clean records are not written out. */
1016                                 rec_is_dirty = false;
1017                                 continue;
1018                         }
1019                         rec_is_dirty = true;
1020                         rec_start_bh = bh;
1021                 }
1022                 /* Need to map the buffer if it is not mapped already. */
1023                 if (unlikely(!buffer_mapped(bh))) {
1024                         VCN vcn;
1025                         LCN lcn;
1026                         unsigned int vcn_ofs;
1027
1028                         bh->b_bdev = vol->sb->s_bdev;
1029                         /* Obtain the vcn and offset of the current block. */
1030                         vcn = (VCN)block << bh_size_bits;
1031                         vcn_ofs = vcn & vol->cluster_size_mask;
1032                         vcn >>= vol->cluster_size_bits;
1033                         if (!rl) {
1034 lock_retry_remap:
1035                                 down_read(&ni->runlist.lock);
1036                                 rl = ni->runlist.rl;
1037                         }
1038                         if (likely(rl != NULL)) {
1039                                 /* Seek to element containing target vcn. */
1040                                 while (rl->length && rl[1].vcn <= vcn)
1041                                         rl++;
1042                                 lcn = ntfs_rl_vcn_to_lcn(rl, vcn);
1043                         } else
1044                                 lcn = LCN_RL_NOT_MAPPED;
1045                         /* Successful remap. */
1046                         if (likely(lcn >= 0)) {
1047                                 /* Setup buffer head to correct block. */
1048                                 bh->b_blocknr = ((lcn <<
1049                                                 vol->cluster_size_bits) +
1050                                                 vcn_ofs) >> bh_size_bits;
1051                                 set_buffer_mapped(bh);
1052                         } else {
1053                                 /*
1054                                  * Remap failed.  Retry to map the runlist once
1055                                  * unless we are working on $MFT which always
1056                                  * has the whole of its runlist in memory.
1057                                  */
1058                                 if (!is_mft && !is_retry &&
1059                                                 lcn == LCN_RL_NOT_MAPPED) {
1060                                         is_retry = true;
1061                                         /*
1062                                          * Attempt to map runlist, dropping
1063                                          * lock for the duration.
1064                                          */
1065                                         up_read(&ni->runlist.lock);
1066                                         err2 = ntfs_map_runlist(ni, vcn);
1067                                         if (likely(!err2))
1068                                                 goto lock_retry_remap;
1069                                         if (err2 == -ENOMEM)
1070                                                 page_is_dirty = true;
1071                                         lcn = err2;
1072                                 } else {
1073                                         err2 = -EIO;
1074                                         if (!rl)
1075                                                 up_read(&ni->runlist.lock);
1076                                 }
1077                                 /* Hard error.  Abort writing this record. */
1078                                 if (!err || err == -ENOMEM)
1079                                         err = err2;
1080                                 bh->b_blocknr = -1;
1081                                 ntfs_error(vol->sb, "Cannot write ntfs record "
1082                                                 "0x%llx (inode 0x%lx, "
1083                                                 "attribute type 0x%x) because "
1084                                                 "its location on disk could "
1085                                                 "not be determined (error "
1086                                                 "code %lli).",
1087                                                 (long long)block <<
1088                                                 bh_size_bits >>
1089                                                 vol->mft_record_size_bits,
1090                                                 ni->mft_no, ni->type,
1091                                                 (long long)lcn);
1092                                 /*
1093                                  * If this is not the first buffer, remove the
1094                                  * buffers in this record from the list of
1095                                  * buffers to write and clear their dirty bit
1096                                  * if not error -ENOMEM.
1097                                  */
1098                                 if (rec_start_bh != bh) {
1099                                         while (bhs[--nr_bhs] != rec_start_bh)
1100                                                 ;
1101                                         if (err2 != -ENOMEM) {
1102                                                 do {
1103                                                         clear_buffer_dirty(
1104                                                                 rec_start_bh);
1105                                                 } while ((rec_start_bh =
1106                                                                 rec_start_bh->
1107                                                                 b_this_page) !=
1108                                                                 bh);
1109                                         }
1110                                 }
1111                                 continue;
1112                         }
1113                 }
1114                 BUG_ON(!buffer_uptodate(bh));
1115                 BUG_ON(nr_bhs >= max_bhs);
1116                 bhs[nr_bhs++] = bh;
1117         } while (block++, (bh = bh->b_this_page) != head);
1118         if (unlikely(rl))
1119                 up_read(&ni->runlist.lock);
1120         /* If there were no dirty buffers, we are done. */
1121         if (!nr_bhs)
1122                 goto done;
1123         /* Map the page so we can access its contents. */
1124         kaddr = kmap(page);
1125         /* Clear the page uptodate flag whilst the mst fixups are applied. */
1126         BUG_ON(!PageUptodate(page));
1127         ClearPageUptodate(page);
1128         for (i = 0; i < nr_bhs; i++) {
1129                 unsigned int ofs;
1130
1131                 /* Skip buffers which are not at the beginning of records. */
1132                 if (i % bhs_per_rec)
1133                         continue;
1134                 tbh = bhs[i];
1135                 ofs = bh_offset(tbh);
1136                 if (is_mft) {
1137                         ntfs_inode *tni;
1138                         unsigned long mft_no;
1139
1140                         /* Get the mft record number. */
1141                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1142                                         >> rec_size_bits;
1143                         /* Check whether to write this mft record. */
1144                         tni = NULL;
1145                         if (!ntfs_may_write_mft_record(vol, mft_no,
1146                                         (MFT_RECORD*)(kaddr + ofs), &tni)) {
1147                                 /*
1148                                  * The record should not be written.  This
1149                                  * means we need to redirty the page before
1150                                  * returning.
1151                                  */
1152                                 page_is_dirty = true;
1153                                 /*
1154                                  * Remove the buffers in this mft record from
1155                                  * the list of buffers to write.
1156                                  */
1157                                 do {
1158                                         bhs[i] = NULL;
1159                                 } while (++i % bhs_per_rec);
1160                                 continue;
1161                         }
1162                         /*
1163                          * The record should be written.  If a locked ntfs
1164                          * inode was returned, add it to the array of locked
1165                          * ntfs inodes.
1166                          */
1167                         if (tni)
1168                                 locked_nis[nr_locked_nis++] = tni;
1169                 }
1170                 /* Apply the mst protection fixups. */
1171                 err2 = pre_write_mst_fixup((NTFS_RECORD*)(kaddr + ofs),
1172                                 rec_size);
1173                 if (unlikely(err2)) {
1174                         if (!err || err == -ENOMEM)
1175                                 err = -EIO;
1176                         ntfs_error(vol->sb, "Failed to apply mst fixups "
1177                                         "(inode 0x%lx, attribute type 0x%x, "
1178                                         "page index 0x%lx, page offset 0x%x)!"
1179                                         "  Unmount and run chkdsk.", vi->i_ino,
1180                                         ni->type, page->index, ofs);
1181                         /*
1182                          * Mark all the buffers in this record clean as we do
1183                          * not want to write corrupt data to disk.
1184                          */
1185                         do {
1186                                 clear_buffer_dirty(bhs[i]);
1187                                 bhs[i] = NULL;
1188                         } while (++i % bhs_per_rec);
1189                         continue;
1190                 }
1191                 nr_recs++;
1192         }
1193         /* If no records are to be written out, we are done. */
1194         if (!nr_recs)
1195                 goto unm_done;
1196         flush_dcache_page(page);
1197         /* Lock buffers and start synchronous write i/o on them. */
1198         for (i = 0; i < nr_bhs; i++) {
1199                 tbh = bhs[i];
1200                 if (!tbh)
1201                         continue;
1202                 if (unlikely(test_set_buffer_locked(tbh)))
1203                         BUG();
1204                 /* The buffer dirty state is now irrelevant, just clean it. */
1205                 clear_buffer_dirty(tbh);
1206                 BUG_ON(!buffer_uptodate(tbh));
1207                 BUG_ON(!buffer_mapped(tbh));
1208                 get_bh(tbh);
1209                 tbh->b_end_io = end_buffer_write_sync;
1210                 submit_bh(WRITE, tbh);
1211         }
1212         /* Synchronize the mft mirror now if not @sync. */
1213         if (is_mft && !sync)
1214                 goto do_mirror;
1215 do_wait:
1216         /* Wait on i/o completion of buffers. */
1217         for (i = 0; i < nr_bhs; i++) {
1218                 tbh = bhs[i];
1219                 if (!tbh)
1220                         continue;
1221                 wait_on_buffer(tbh);
1222                 if (unlikely(!buffer_uptodate(tbh))) {
1223                         ntfs_error(vol->sb, "I/O error while writing ntfs "
1224                                         "record buffer (inode 0x%lx, "
1225                                         "attribute type 0x%x, page index "
1226                                         "0x%lx, page offset 0x%lx)!  Unmount "
1227                                         "and run chkdsk.", vi->i_ino, ni->type,
1228                                         page->index, bh_offset(tbh));
1229                         if (!err || err == -ENOMEM)
1230                                 err = -EIO;
1231                         /*
1232                          * Set the buffer uptodate so the page and buffer
1233                          * states do not become out of sync.
1234                          */
1235                         set_buffer_uptodate(tbh);
1236                 }
1237         }
1238         /* If @sync, now synchronize the mft mirror. */
1239         if (is_mft && sync) {
1240 do_mirror:
1241                 for (i = 0; i < nr_bhs; i++) {
1242                         unsigned long mft_no;
1243                         unsigned int ofs;
1244
1245                         /*
1246                          * Skip buffers which are not at the beginning of
1247                          * records.
1248                          */
1249                         if (i % bhs_per_rec)
1250                                 continue;
1251                         tbh = bhs[i];
1252                         /* Skip removed buffers (and hence records). */
1253                         if (!tbh)
1254                                 continue;
1255                         ofs = bh_offset(tbh);
1256                         /* Get the mft record number. */
1257                         mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs)
1258                                         >> rec_size_bits;
1259                         if (mft_no < vol->mftmirr_size)
1260                                 ntfs_sync_mft_mirror(vol, mft_no,
1261                                                 (MFT_RECORD*)(kaddr + ofs),
1262                                                 sync);
1263                 }
1264                 if (!sync)
1265                         goto do_wait;
1266         }
1267         /* Remove the mst protection fixups again. */
1268         for (i = 0; i < nr_bhs; i++) {
1269                 if (!(i % bhs_per_rec)) {
1270                         tbh = bhs[i];
1271                         if (!tbh)
1272                                 continue;
1273                         post_write_mst_fixup((NTFS_RECORD*)(kaddr +
1274                                         bh_offset(tbh)));
1275                 }
1276         }
1277         flush_dcache_page(page);
1278 unm_done:
1279         /* Unlock any locked inodes. */
1280         while (nr_locked_nis-- > 0) {
1281                 ntfs_inode *tni, *base_tni;
1282                 
1283                 tni = locked_nis[nr_locked_nis];
1284                 /* Get the base inode. */
1285                 mutex_lock(&tni->extent_lock);
1286                 if (tni->nr_extents >= 0)
1287                         base_tni = tni;
1288                 else {
1289                         base_tni = tni->ext.base_ntfs_ino;
1290                         BUG_ON(!base_tni);
1291                 }
1292                 mutex_unlock(&tni->extent_lock);
1293                 ntfs_debug("Unlocking %s inode 0x%lx.",
1294                                 tni == base_tni ? "base" : "extent",
1295                                 tni->mft_no);
1296                 mutex_unlock(&tni->mrec_lock);
1297                 atomic_dec(&tni->count);
1298                 iput(VFS_I(base_tni));
1299         }
1300         SetPageUptodate(page);
1301         kunmap(page);
1302 done:
1303         if (unlikely(err && err != -ENOMEM)) {
1304                 /*
1305                  * Set page error if there is only one ntfs record in the page.
1306                  * Otherwise we would loose per-record granularity.
1307                  */
1308                 if (ni->itype.index.block_size == PAGE_CACHE_SIZE)
1309                         SetPageError(page);
1310                 NVolSetErrors(vol);
1311         }
1312         if (page_is_dirty) {
1313                 ntfs_debug("Page still contains one or more dirty ntfs "
1314                                 "records.  Redirtying the page starting at "
1315                                 "record 0x%lx.", page->index <<
1316                                 (PAGE_CACHE_SHIFT - rec_size_bits));
1317                 redirty_page_for_writepage(wbc, page);
1318                 unlock_page(page);
1319         } else {
1320                 /*
1321                  * Keep the VM happy.  This must be done otherwise the
1322                  * radix-tree tag PAGECACHE_TAG_DIRTY remains set even though
1323                  * the page is clean.
1324                  */
1325                 BUG_ON(PageWriteback(page));
1326                 set_page_writeback(page);
1327                 unlock_page(page);
1328                 end_page_writeback(page);
1329         }
1330         if (likely(!err))
1331                 ntfs_debug("Done.");
1332         return err;
1333 }
1334
1335 /**
1336  * ntfs_writepage - write a @page to the backing store
1337  * @page:       page cache page to write out
1338  * @wbc:        writeback control structure
1339  *
1340  * This is called from the VM when it wants to have a dirty ntfs page cache
1341  * page cleaned.  The VM has already locked the page and marked it clean.
1342  *
1343  * For non-resident attributes, ntfs_writepage() writes the @page by calling
1344  * the ntfs version of the generic block_write_full_page() function,
1345  * ntfs_write_block(), which in turn if necessary creates and writes the
1346  * buffers associated with the page asynchronously.
1347  *
1348  * For resident attributes, OTOH, ntfs_writepage() writes the @page by copying
1349  * the data to the mft record (which at this stage is most likely in memory).
1350  * The mft record is then marked dirty and written out asynchronously via the
1351  * vfs inode dirty code path for the inode the mft record belongs to or via the
1352  * vm page dirty code path for the page the mft record is in.
1353  *
1354  * Based on ntfs_readpage() and fs/buffer.c::block_write_full_page().
1355  *
1356  * Return 0 on success and -errno on error.
1357  */
1358 static int ntfs_writepage(struct page *page, struct writeback_control *wbc)
1359 {
1360         loff_t i_size;
1361         struct inode *vi = page->mapping->host;
1362         ntfs_inode *base_ni = NULL, *ni = NTFS_I(vi);
1363         char *kaddr;
1364         ntfs_attr_search_ctx *ctx = NULL;
1365         MFT_RECORD *m = NULL;
1366         u32 attr_len;
1367         int err;
1368
1369 retry_writepage:
1370         BUG_ON(!PageLocked(page));
1371         i_size = i_size_read(vi);
1372         /* Is the page fully outside i_size? (truncate in progress) */
1373         if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >>
1374                         PAGE_CACHE_SHIFT)) {
1375                 /*
1376                  * The page may have dirty, unmapped buffers.  Make them
1377                  * freeable here, so the page does not leak.
1378                  */
1379                 block_invalidatepage(page, 0);
1380                 unlock_page(page);
1381                 ntfs_debug("Write outside i_size - truncated?");
1382                 return 0;
1383         }
1384         /*
1385          * Only $DATA attributes can be encrypted and only unnamed $DATA
1386          * attributes can be compressed.  Index root can have the flags set but
1387          * this means to create compressed/encrypted files, not that the
1388          * attribute is compressed/encrypted.  Note we need to check for
1389          * AT_INDEX_ALLOCATION since this is the type of both directory and
1390          * index inodes.
1391          */
1392         if (ni->type != AT_INDEX_ALLOCATION) {
1393                 /* If file is encrypted, deny access, just like NT4. */
1394                 if (NInoEncrypted(ni)) {
1395                         unlock_page(page);
1396                         BUG_ON(ni->type != AT_DATA);
1397                         ntfs_debug("Denying write access to encrypted file.");
1398                         return -EACCES;
1399                 }
1400                 /* Compressed data streams are handled in compress.c. */
1401                 if (NInoNonResident(ni) && NInoCompressed(ni)) {
1402                         BUG_ON(ni->type != AT_DATA);
1403                         BUG_ON(ni->name_len);
1404                         // TODO: Implement and replace this with
1405                         // return ntfs_write_compressed_block(page);
1406                         unlock_page(page);
1407                         ntfs_error(vi->i_sb, "Writing to compressed files is "
1408                                         "not supported yet.  Sorry.");
1409                         return -EOPNOTSUPP;
1410                 }
1411                 // TODO: Implement and remove this check.
1412                 if (NInoNonResident(ni) && NInoSparse(ni)) {
1413                         unlock_page(page);
1414                         ntfs_error(vi->i_sb, "Writing to sparse files is not "
1415                                         "supported yet.  Sorry.");
1416                         return -EOPNOTSUPP;
1417                 }
1418         }
1419         /* NInoNonResident() == NInoIndexAllocPresent() */
1420         if (NInoNonResident(ni)) {
1421                 /* We have to zero every time due to mmap-at-end-of-file. */
1422                 if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) {
1423                         /* The page straddles i_size. */
1424                         unsigned int ofs = i_size & ~PAGE_CACHE_MASK;
1425                         kaddr = kmap_atomic(page, KM_USER0);
1426                         memset(kaddr + ofs, 0, PAGE_CACHE_SIZE - ofs);
1427                         kunmap_atomic(kaddr, KM_USER0);
1428                         flush_dcache_page(page);
1429                 }
1430                 /* Handle mst protected attributes. */
1431                 if (NInoMstProtected(ni))
1432                         return ntfs_write_mst_block(page, wbc);
1433                 /* Normal, non-resident data stream. */
1434                 return ntfs_write_block(page, wbc);
1435         }
1436         /*
1437          * Attribute is resident, implying it is not compressed, encrypted, or
1438          * mst protected.  This also means the attribute is smaller than an mft
1439          * record and hence smaller than a page, so can simply return error on
1440          * any pages with index above 0.  Note the attribute can actually be
1441          * marked compressed but if it is resident the actual data is not
1442          * compressed so we are ok to ignore the compressed flag here.
1443          */
1444         BUG_ON(page_has_buffers(page));
1445         BUG_ON(!PageUptodate(page));
1446         if (unlikely(page->index > 0)) {
1447                 ntfs_error(vi->i_sb, "BUG()! page->index (0x%lx) > 0.  "
1448                                 "Aborting write.", page->index);
1449                 BUG_ON(PageWriteback(page));
1450                 set_page_writeback(page);
1451                 unlock_page(page);
1452                 end_page_writeback(page);
1453                 return -EIO;
1454         }
1455         if (!NInoAttr(ni))
1456                 base_ni = ni;
1457         else
1458                 base_ni = ni->ext.base_ntfs_ino;
1459         /* Map, pin, and lock the mft record. */
1460         m = map_mft_record(base_ni);
1461         if (IS_ERR(m)) {
1462                 err = PTR_ERR(m);
1463                 m = NULL;
1464                 ctx = NULL;
1465                 goto err_out;
1466         }
1467         /*
1468          * If a parallel write made the attribute non-resident, drop the mft
1469          * record and retry the writepage.
1470          */
1471         if (unlikely(NInoNonResident(ni))) {
1472                 unmap_mft_record(base_ni);
1473                 goto retry_writepage;
1474         }
1475         ctx = ntfs_attr_get_search_ctx(base_ni, m);
1476         if (unlikely(!ctx)) {
1477                 err = -ENOMEM;
1478                 goto err_out;
1479         }
1480         err = ntfs_attr_lookup(ni->type, ni->name, ni->name_len,
1481                         CASE_SENSITIVE, 0, NULL, 0, ctx);
1482         if (unlikely(err))
1483                 goto err_out;
1484         /*
1485          * Keep the VM happy.  This must be done otherwise the radix-tree tag
1486          * PAGECACHE_TAG_DIRTY remains set even though the page is clean.
1487          */
1488         BUG_ON(PageWriteback(page));
1489         set_page_writeback(page);
1490         unlock_page(page);
1491         attr_len = le32_to_cpu(ctx->attr->data.resident.value_length);
1492         i_size = i_size_read(vi);
1493         if (unlikely(attr_len > i_size)) {
1494                 /* Race with shrinking truncate or a failed truncate. */
1495                 attr_len = i_size;
1496                 /*
1497                  * If the truncate failed, fix it up now.  If a concurrent
1498                  * truncate, we do its job, so it does not have to do anything.
1499                  */
1500                 err = ntfs_resident_attr_value_resize(ctx->mrec, ctx->attr,
1501                                 attr_len);
1502                 /* Shrinking cannot fail. */
1503                 BUG_ON(err);
1504         }
1505         kaddr = kmap_atomic(page, KM_USER0);
1506         /* Copy the data from the page to the mft record. */
1507         memcpy((u8*)ctx->attr +
1508                         le16_to_cpu(ctx->attr->data.resident.value_offset),
1509                         kaddr, attr_len);
1510         /* Zero out of bounds area in the page cache page. */
1511         memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1512         kunmap_atomic(kaddr, KM_USER0);
1513         flush_dcache_page(page);
1514         flush_dcache_mft_record_page(ctx->ntfs_ino);
1515         /* We are done with the page. */
1516         end_page_writeback(page);
1517         /* Finally, mark the mft record dirty, so it gets written back. */
1518         mark_mft_record_dirty(ctx->ntfs_ino);
1519         ntfs_attr_put_search_ctx(ctx);
1520         unmap_mft_record(base_ni);
1521         return 0;
1522 err_out:
1523         if (err == -ENOMEM) {
1524                 ntfs_warning(vi->i_sb, "Error allocating memory. Redirtying "
1525                                 "page so we try again later.");
1526                 /*
1527                  * Put the page back on mapping->dirty_pages, but leave its
1528                  * buffers' dirty state as-is.
1529                  */
1530                 redirty_page_for_writepage(wbc, page);
1531                 err = 0;
1532         } else {
1533                 ntfs_error(vi->i_sb, "Resident attribute write failed with "
1534                                 "error %i.", err);
1535                 SetPageError(page);
1536                 NVolSetErrors(ni->vol);
1537         }
1538         unlock_page(page);
1539         if (ctx)
1540                 ntfs_attr_put_search_ctx(ctx);
1541         if (m)
1542                 unmap_mft_record(base_ni);
1543         return err;
1544 }
1545
1546 #endif  /* NTFS_RW */
1547
1548 /**
1549  * ntfs_aops - general address space operations for inodes and attributes
1550  */
1551 const struct address_space_operations ntfs_aops = {
1552         .readpage       = ntfs_readpage,        /* Fill page with data. */
1553         .sync_page      = block_sync_page,      /* Currently, just unplugs the
1554                                                    disk request queue. */
1555 #ifdef NTFS_RW
1556         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
1557 #endif /* NTFS_RW */
1558         .migratepage    = buffer_migrate_page,  /* Move a page cache page from
1559                                                    one physical page to an
1560                                                    other. */
1561 };
1562
1563 /**
1564  * ntfs_mst_aops - general address space operations for mst protecteed inodes
1565  *                 and attributes
1566  */
1567 const struct address_space_operations ntfs_mst_aops = {
1568         .readpage       = ntfs_readpage,        /* Fill page with data. */
1569         .sync_page      = block_sync_page,      /* Currently, just unplugs the
1570                                                    disk request queue. */
1571 #ifdef NTFS_RW
1572         .writepage      = ntfs_writepage,       /* Write dirty page to disk. */
1573         .set_page_dirty = __set_page_dirty_nobuffers,   /* Set the page dirty
1574                                                    without touching the buffers
1575                                                    belonging to the page. */
1576 #endif /* NTFS_RW */
1577         .migratepage    = buffer_migrate_page,  /* Move a page cache page from
1578                                                    one physical page to an
1579                                                    other. */
1580 };
1581
1582 #ifdef NTFS_RW
1583
1584 /**
1585  * mark_ntfs_record_dirty - mark an ntfs record dirty
1586  * @page:       page containing the ntfs record to mark dirty
1587  * @ofs:        byte offset within @page at which the ntfs record begins
1588  *
1589  * Set the buffers and the page in which the ntfs record is located dirty.
1590  *
1591  * The latter also marks the vfs inode the ntfs record belongs to dirty
1592  * (I_DIRTY_PAGES only).
1593  *
1594  * If the page does not have buffers, we create them and set them uptodate.
1595  * The page may not be locked which is why we need to handle the buffers under
1596  * the mapping->private_lock.  Once the buffers are marked dirty we no longer
1597  * need the lock since try_to_free_buffers() does not free dirty buffers.
1598  */
1599 void mark_ntfs_record_dirty(struct page *page, const unsigned int ofs) {
1600         struct address_space *mapping = page->mapping;
1601         ntfs_inode *ni = NTFS_I(mapping->host);
1602         struct buffer_head *bh, *head, *buffers_to_free = NULL;
1603         unsigned int end, bh_size, bh_ofs;
1604
1605         BUG_ON(!PageUptodate(page));
1606         end = ofs + ni->itype.index.block_size;
1607         bh_size = VFS_I(ni)->i_sb->s_blocksize;
1608         spin_lock(&mapping->private_lock);
1609         if (unlikely(!page_has_buffers(page))) {
1610                 spin_unlock(&mapping->private_lock);
1611                 bh = head = alloc_page_buffers(page, bh_size, 1);
1612                 spin_lock(&mapping->private_lock);
1613                 if (likely(!page_has_buffers(page))) {
1614                         struct buffer_head *tail;
1615
1616                         do {
1617                                 set_buffer_uptodate(bh);
1618                                 tail = bh;
1619                                 bh = bh->b_this_page;
1620                         } while (bh);
1621                         tail->b_this_page = head;
1622                         attach_page_buffers(page, head);
1623                 } else
1624                         buffers_to_free = bh;
1625         }
1626         bh = head = page_buffers(page);
1627         BUG_ON(!bh);
1628         do {
1629                 bh_ofs = bh_offset(bh);
1630                 if (bh_ofs + bh_size <= ofs)
1631                         continue;
1632                 if (unlikely(bh_ofs >= end))
1633                         break;
1634                 set_buffer_dirty(bh);
1635         } while ((bh = bh->b_this_page) != head);
1636         spin_unlock(&mapping->private_lock);
1637         __set_page_dirty_nobuffers(page);
1638         if (unlikely(buffers_to_free)) {
1639                 do {
1640                         bh = buffers_to_free->b_this_page;
1641                         free_buffer_head(buffers_to_free);
1642                         buffers_to_free = bh;
1643                 } while (buffers_to_free);
1644         }
1645 }
1646
1647 #endif /* NTFS_RW */