2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
6 #include <linux/time.h>
7 #include <linux/reiserfs_fs.h>
8 #include <linux/reiserfs_acl.h>
9 #include <linux/reiserfs_xattr.h>
10 #include <linux/smp_lock.h>
11 #include <asm/uaccess.h>
12 #include <linux/pagemap.h>
13 #include <linux/swap.h>
14 #include <linux/writeback.h>
15 #include <linux/blkdev.h>
16 #include <linux/buffer_head.h>
17 #include <linux/quotaops.h>
20 ** We pack the tails of files on file close, not at the time they are written.
21 ** This implies an unnecessary copy of the tail and an unnecessary indirect item
22 ** insertion/balancing, for files that are written in one write.
23 ** It avoids unnecessary tail packings (balances) for files that are written in
24 ** multiple writes and are small enough to have tails.
26 ** file_release is called by the VFS layer when the file is closed. If
27 ** this is the last open file descriptor, and the file
28 ** small enough to have a tail, and the tail is currently in an
29 ** unformatted node, the tail is converted back into a direct item.
31 ** We use reiserfs_truncate_file to pack the tail, since it already has
32 ** all the conditions coded.
34 static int reiserfs_file_release (struct inode * inode, struct file * filp)
37 struct reiserfs_transaction_handle th ;
39 if (!S_ISREG (inode->i_mode))
42 /* fast out for when nothing needs to be done */
43 if ((atomic_read(&inode->i_count) > 1 ||
44 !(REISERFS_I(inode)->i_flags & i_pack_on_close_mask) ||
45 !tail_has_to_be_packed(inode)) &&
46 REISERFS_I(inode)->i_prealloc_count <= 0) {
50 reiserfs_write_lock(inode->i_sb);
52 journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
53 reiserfs_update_inode_transaction(inode) ;
55 #ifdef REISERFS_PREALLOCATE
56 reiserfs_discard_prealloc (&th, inode);
58 journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3) ;
60 if (atomic_read(&inode->i_count) <= 1 &&
61 (REISERFS_I(inode)->i_flags & i_pack_on_close_mask) &&
62 tail_has_to_be_packed (inode)) {
63 /* if regular file is released by last holder and it has been
64 appended (we append by unformatted node only) or its direct
65 item(s) had to be converted, then it may have to be
66 indirect2direct converted */
67 reiserfs_truncate_file(inode, 0) ;
70 reiserfs_write_unlock(inode->i_sb);
74 static void reiserfs_vfs_truncate_file(struct inode *inode) {
75 reiserfs_truncate_file(inode, 1) ;
78 /* Sync a reiserfs file. */
81 * FIXME: sync_mapping_buffers() never has anything to sync. Can
85 static int reiserfs_sync_file(
86 struct file * p_s_filp,
87 struct dentry * p_s_dentry,
90 struct inode * p_s_inode = p_s_dentry->d_inode;
93 reiserfs_write_lock(p_s_inode->i_sb);
95 if (!S_ISREG(p_s_inode->i_mode))
98 n_err = sync_mapping_buffers(p_s_inode->i_mapping) ;
99 reiserfs_commit_for_inode(p_s_inode) ;
100 reiserfs_write_unlock(p_s_inode->i_sb);
101 return ( n_err < 0 ) ? -EIO : 0;
104 /* I really do not want to play with memory shortage right now, so
105 to simplify the code, we are not going to write more than this much pages at
106 a time. This still should considerably improve performance compared to 4k
107 at a time case. This is 32 pages of 4k size. */
108 #define REISERFS_WRITE_PAGES_AT_A_TIME (128 * 1024) / PAGE_CACHE_SIZE
110 /* Allocates blocks for a file to fulfil write request.
111 Maps all unmapped but prepared pages from the list.
112 Updates metadata with newly allocated blocknumbers as needed */
113 int reiserfs_allocate_blocks_for_region(
114 struct reiserfs_transaction_handle *th,
115 struct inode *inode, /* Inode we work with */
116 loff_t pos, /* Writing position */
117 int num_pages, /* number of pages write going
119 int write_bytes, /* amount of bytes to write */
120 struct page **prepared_pages, /* array of
123 int blocks_to_allocate /* Amount of blocks we
125 fit the data into file
129 struct cpu_key key; // cpu key of item that we are going to deal with
130 struct item_head *ih; // pointer to item head that we are going to deal with
131 struct buffer_head *bh; // Buffer head that contains items that we are going to deal with
132 __u32 * item; // pointer to item we are going to deal with
133 INITIALIZE_PATH(path); // path to item, that we are going to deal with.
134 b_blocknr_t allocated_blocks[blocks_to_allocate]; // Pointer to a place where allocated blocknumbers would be stored. Right now statically allocated, later that will change.
135 reiserfs_blocknr_hint_t hint; // hint structure for block allocator.
136 size_t res; // return value of various functions that we call.
137 int curr_block; // current block used to keep track of unmapped blocks.
138 int i; // loop counter
139 int itempos; // position in item
140 unsigned int from = (pos & (PAGE_CACHE_SIZE - 1)); // writing position in
142 unsigned int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1; /* last modified byte offset in last page */
143 __u64 hole_size ; // amount of blocks for a file hole, if it needed to be created.
144 int modifying_this_item = 0; // Flag for items traversal code to keep track
145 // of the fact that we already prepared
146 // current block for journal
149 RFALSE(!blocks_to_allocate, "green-9004: tried to allocate zero blocks?");
151 /* First we compose a key to point at the writing position, we want to do
152 that outside of any locking region. */
153 make_cpu_key (&key, inode, pos+1, TYPE_ANY, 3/*key length*/);
155 /* If we came here, it means we absolutely need to open a transaction,
156 since we need to allocate some blocks */
157 reiserfs_write_lock(inode->i_sb); // Journaling stuff and we need that.
158 journal_begin(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1); // Wish I know if this number enough
159 reiserfs_update_inode_transaction(inode) ;
161 /* Look for the in-tree position of our write, need path for block allocator */
162 res = search_for_position_by_key(inode->i_sb, &key, &path);
163 if ( res == IO_ERROR ) {
168 /* Allocate blocks */
169 /* First fill in "hint" structure for block allocator */
170 hint.th = th; // transaction handle.
171 hint.path = &path; // Path, so that block allocator can determine packing locality or whatever it needs to determine.
172 hint.inode = inode; // Inode is needed by block allocator too.
173 hint.search_start = 0; // We have no hint on where to search free blocks for block allocator.
174 hint.key = key.on_disk_key; // on disk key of file.
175 hint.block = inode->i_blocks>>(inode->i_sb->s_blocksize_bits-9); // Number of disk blocks this file occupies already.
176 hint.formatted_node = 0; // We are allocating blocks for unformatted node.
178 /* only preallocate if this is a small write */
179 if (blocks_to_allocate <
180 REISERFS_SB(inode->i_sb)->s_alloc_options.preallocsize)
181 hint.preallocate = 1;
183 hint.preallocate = 0;
185 /* Call block allocator to allocate blocks */
186 res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
187 if ( res != CARRY_ON ) {
188 if ( res == NO_DISK_SPACE ) {
189 /* We flush the transaction in case of no space. This way some
190 blocks might become free */
191 SB_JOURNAL(inode->i_sb)->j_must_wait = 1;
192 restart_transaction(th, inode, &path);
194 /* We might have scheduled, so search again */
195 res = search_for_position_by_key(inode->i_sb, &key, &path);
196 if ( res == IO_ERROR ) {
201 /* update changed info for hint structure. */
202 res = reiserfs_allocate_blocknrs(&hint, allocated_blocks, blocks_to_allocate, blocks_to_allocate);
203 if ( res != CARRY_ON ) {
216 // Too bad, I have not found any way to convert a given region from
217 // cpu format to little endian format
220 for ( i = 0; i < blocks_to_allocate ; i++)
221 allocated_blocks[i]=cpu_to_le32(allocated_blocks[i]);
225 /* Blocks allocating well might have scheduled and tree might have changed,
226 let's search the tree again */
227 /* find where in the tree our write should go */
228 res = search_for_position_by_key(inode->i_sb, &key, &path);
229 if ( res == IO_ERROR ) {
231 goto error_exit_free_blocks;
234 bh = get_last_bh( &path ); // Get a bufferhead for last element in path.
235 ih = get_ih( &path ); // Get a pointer to last item head in path.
236 item = get_item( &path ); // Get a pointer to last item in path
238 /* Let's see what we have found */
239 if ( res != POSITION_FOUND ) { /* position not found, this means that we
240 might need to append file with holes
242 // Since we are writing past the file's end, we need to find out if
243 // there is a hole that needs to be inserted before our writing
244 // position, and how many blocks it is going to cover (we need to
245 // populate pointers to file blocks representing the hole with zeros)
250 * if ih is stat data, its offset is 0 and we don't want to
251 * add 1 to pos in the hole_size calculation
253 if (is_statdata_le_ih(ih))
255 hole_size = (pos + item_offset -
256 (le_key_k_offset( get_inode_item_key_version(inode),
258 op_bytes_number(ih, inode->i_sb->s_blocksize))) >>
259 inode->i_sb->s_blocksize_bits;
262 if ( hole_size > 0 ) {
263 int to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE ); // How much data to insert first time.
264 /* area filled with zeroes, to supply as list of zero blocknumbers
265 We allocate it outside of loop just in case loop would spin for
266 several iterations. */
267 char *zeros = kmalloc(to_paste*UNFM_P_SIZE, GFP_ATOMIC); // We cannot insert more than MAX_ITEM_LEN bytes anyway.
270 goto error_exit_free_blocks;
272 memset ( zeros, 0, to_paste*UNFM_P_SIZE);
274 to_paste = min_t(__u64, hole_size, MAX_ITEM_LEN(inode->i_sb->s_blocksize)/UNFM_P_SIZE );
275 if ( is_indirect_le_ih(ih) ) {
276 /* Ok, there is existing indirect item already. Need to append it */
277 /* Calculate position past inserted item */
278 make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
279 res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)zeros, UNFM_P_SIZE*to_paste);
282 goto error_exit_free_blocks;
284 } else if ( is_statdata_le_ih(ih) ) {
285 /* No existing item, create it */
286 /* item head for new item */
287 struct item_head ins_ih;
289 /* create a key for our new item */
290 make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3);
292 /* Create new item head for our new item */
293 make_le_item_head (&ins_ih, &key, key.version, 1,
294 TYPE_INDIRECT, to_paste*UNFM_P_SIZE,
297 /* Find where such item should live in the tree */
298 res = search_item (inode->i_sb, &key, &path);
299 if ( res != ITEM_NOT_FOUND ) {
300 /* item should not exist, otherwise we have error */
301 if ( res != -ENOSPC ) {
302 reiserfs_warning (inode->i_sb,
303 "green-9008: search_by_key (%K) returned %d",
308 goto error_exit_free_blocks;
310 res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)zeros);
312 reiserfs_panic(inode->i_sb, "green-9011: Unexpected key type %K\n", &key);
316 goto error_exit_free_blocks;
318 /* Now we want to check if transaction is too full, and if it is
319 we restart it. This will also free the path. */
320 if (journal_transaction_should_end(th, th->t_blocks_allocated))
321 restart_transaction(th, inode, &path);
323 /* Well, need to recalculate path and stuff */
324 set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + (to_paste << inode->i_blkbits));
325 res = search_for_position_by_key(inode->i_sb, &key, &path);
326 if ( res == IO_ERROR ) {
329 goto error_exit_free_blocks;
331 bh=get_last_bh(&path);
333 item = get_item(&path);
334 hole_size -= to_paste;
335 } while ( hole_size );
340 // Go through existing indirect items first
341 // replace all zeroes with blocknumbers from list
342 // Note that if no corresponding item was found, by previous search,
343 // it means there are no existing in-tree representation for file area
344 // we are going to overwrite, so there is nothing to scan through for holes.
345 for ( curr_block = 0, itempos = path.pos_in_item ; curr_block < blocks_to_allocate && res == POSITION_FOUND ; ) {
347 if ( itempos >= ih_item_len(ih)/UNFM_P_SIZE ) {
348 /* We run out of data in this indirect item, let's look for another
350 /* First if we are already modifying current item, log it */
351 if ( modifying_this_item ) {
352 journal_mark_dirty (th, inode->i_sb, bh);
353 modifying_this_item = 0;
355 /* Then set the key to look for a new indirect item (offset of old
356 item is added to old item length */
357 set_cpu_key_k_offset( &key, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize));
358 /* Search ofor position of new key in the tree. */
359 res = search_for_position_by_key(inode->i_sb, &key, &path);
360 if ( res == IO_ERROR) {
362 goto error_exit_free_blocks;
364 bh=get_last_bh(&path);
366 item = get_item(&path);
367 itempos = path.pos_in_item;
368 continue; // loop to check all kinds of conditions and so on.
370 /* Ok, we have correct position in item now, so let's see if it is
371 representing file hole (blocknumber is zero) and fill it if needed */
372 if ( !item[itempos] ) {
373 /* Ok, a hole. Now we need to check if we already prepared this
374 block to be journaled */
375 while ( !modifying_this_item ) { // loop until succeed
376 /* Well, this item is not journaled yet, so we must prepare
377 it for journal first, before we can change it */
378 struct item_head tmp_ih; // We copy item head of found item,
379 // here to detect if fs changed under
380 // us while we were preparing for
382 int fs_gen; // We store fs generation here to find if someone
383 // changes fs under our feet
385 copy_item_head (&tmp_ih, ih); // Remember itemhead
386 fs_gen = get_generation (inode->i_sb); // remember fs generation
387 reiserfs_prepare_for_journal(inode->i_sb, bh, 1); // Prepare a buffer within which indirect item is stored for changing.
388 if (fs_changed (fs_gen, inode->i_sb) && item_moved (&tmp_ih, &path)) {
389 // Sigh, fs was changed under us, we need to look for new
390 // location of item we are working with
392 /* unmark prepaerd area as journaled and search for it's
394 reiserfs_restore_prepared_buffer(inode->i_sb, bh);
395 res = search_for_position_by_key(inode->i_sb, &key, &path);
396 if ( res == IO_ERROR) {
398 goto error_exit_free_blocks;
400 bh=get_last_bh(&path);
402 item = get_item(&path);
403 itempos = path.pos_in_item;
406 modifying_this_item = 1;
408 item[itempos] = allocated_blocks[curr_block]; // Assign new block
414 if ( modifying_this_item ) { // We need to log last-accessed block, if it
415 // was modified, but not logged yet.
416 journal_mark_dirty (th, inode->i_sb, bh);
419 if ( curr_block < blocks_to_allocate ) {
420 // Oh, well need to append to indirect item, or to create indirect item
421 // if there weren't any
422 if ( is_indirect_le_ih(ih) ) {
423 // Existing indirect item - append. First calculate key for append
424 // position. We do not need to recalculate path as it should
425 // already point to correct place.
426 make_cpu_key( &key, inode, le_key_k_offset( get_inode_item_key_version(inode), &(ih->ih_key)) + op_bytes_number(ih, inode->i_sb->s_blocksize), TYPE_INDIRECT, 3);
427 res = reiserfs_paste_into_item( th, &path, &key, inode, (char *)(allocated_blocks+curr_block), UNFM_P_SIZE*(blocks_to_allocate-curr_block));
429 goto error_exit_free_blocks;
431 } else if (is_statdata_le_ih(ih) ) {
432 // Last found item was statdata. That means we need to create indirect item.
433 struct item_head ins_ih; /* itemhead for new item */
435 /* create a key for our new item */
436 make_cpu_key( &key, inode, 1, TYPE_INDIRECT, 3); // Position one,
441 /* Create new item head for our new item */
442 make_le_item_head (&ins_ih, &key, key.version, 1, TYPE_INDIRECT,
443 (blocks_to_allocate-curr_block)*UNFM_P_SIZE,
445 /* Find where such item should live in the tree */
446 res = search_item (inode->i_sb, &key, &path);
447 if ( res != ITEM_NOT_FOUND ) {
448 /* Well, if we have found such item already, or some error
449 occured, we need to warn user and return error */
450 if ( res != -ENOSPC ) {
451 reiserfs_warning (inode->i_sb,
452 "green-9009: search_by_key (%K) "
453 "returned %d", &key, res);
456 goto error_exit_free_blocks;
458 /* Insert item into the tree with the data as its body */
459 res = reiserfs_insert_item( th, &path, &key, &ins_ih, inode, (char *)(allocated_blocks+curr_block));
461 reiserfs_panic(inode->i_sb, "green-9010: unexpected item type for key %K\n",&key);
465 // the caller is responsible for closing the transaction
466 // unless we return an error, they are also responsible for logging
470 reiserfs_write_unlock(inode->i_sb);
472 // go through all the pages/buffers and map the buffers to newly allocated
473 // blocks (so that system knows where to write these pages later).
475 for ( i = 0; i < num_pages ; i++ ) {
476 struct page *page=prepared_pages[i]; //current page
477 struct buffer_head *head = page_buffers(page);// first buffer for a page
478 int block_start, block_end; // in-page offsets for buffers.
480 if (!page_buffers(page))
481 reiserfs_panic(inode->i_sb, "green-9005: No buffers for prepared page???");
483 /* For each buffer in page */
484 for(bh = head, block_start = 0; bh != head || !block_start;
485 block_start=block_end, bh = bh->b_this_page) {
487 reiserfs_panic(inode->i_sb, "green-9006: Allocated but absent buffer for a page?");
488 block_end = block_start+inode->i_sb->s_blocksize;
489 if (i == 0 && block_end <= from )
490 /* if this buffer is before requested data to map, skip it */
492 if (i == num_pages - 1 && block_start >= to)
493 /* If this buffer is after requested data to map, abort
494 processing of current page */
497 if ( !buffer_mapped(bh) ) { // Ok, unmapped buffer, need to map it
498 map_bh( bh, inode->i_sb, le32_to_cpu(allocated_blocks[curr_block]));
505 RFALSE( curr_block > blocks_to_allocate, "green-9007: Used too many blocks? weird");
509 // Need to deal with transaction here.
510 error_exit_free_blocks:
513 for( i = 0; i < blocks_to_allocate; i++ )
514 reiserfs_free_block(th, inode, le32_to_cpu(allocated_blocks[i]), 1);
517 reiserfs_update_sd(th, inode); // update any changes we made to blk count
518 journal_end(th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 3 + 1);
519 reiserfs_write_unlock(inode->i_sb);
524 /* Unlock pages prepared by reiserfs_prepare_file_region_for_write */
525 void reiserfs_unprepare_pages(struct page **prepared_pages, /* list of locked pages */
526 int num_pages /* amount of pages */) {
527 int i; // loop counter
529 for (i=0; i < num_pages ; i++) {
530 struct page *page = prepared_pages[i];
532 try_to_free_buffers(page);
534 page_cache_release(page);
538 /* This function will copy data from userspace to specified pages within
539 supplied byte range */
540 int reiserfs_copy_from_user_to_file_region(
541 loff_t pos, /* In-file position */
542 int num_pages, /* Number of pages affected */
543 int write_bytes, /* Amount of bytes to write */
544 struct page **prepared_pages, /* pointer to
548 const char __user *buf /* Pointer to user-supplied
552 long page_fault=0; // status of copy_from_user.
553 int i; // loop counter.
554 int offset; // offset in page
556 for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
557 int count = min_t(int,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
558 struct page *page=prepared_pages[i]; // Current page we process.
560 fault_in_pages_readable( buf, count);
562 /* Copy data from userspace to the current page */
564 page_fault = __copy_from_user(page_address(page)+offset, buf, count); // Copy the data.
565 /* Flush processor's dcache for this page */
566 flush_dcache_page(page);
572 break; // Was there a fault? abort.
575 return page_fault?-EFAULT:0;
578 /* taken fs/buffer.c:__block_commit_write */
579 int reiserfs_commit_page(struct inode *inode, struct page *page,
580 unsigned from, unsigned to)
582 unsigned block_start, block_end;
585 struct buffer_head *bh, *head;
586 unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT;
589 blocksize = 1 << inode->i_blkbits;
591 for(bh = head = page_buffers(page), block_start = 0;
592 bh != head || !block_start;
593 block_start=block_end, bh = bh->b_this_page)
596 new = buffer_new(bh);
597 clear_buffer_new(bh);
598 block_end = block_start + blocksize;
599 if (block_end <= from || block_start >= to) {
600 if (!buffer_uptodate(bh))
603 set_buffer_uptodate(bh);
604 if (!buffer_dirty(bh)) {
605 mark_buffer_dirty(bh);
606 /* do data=ordered on any page past the end
607 * of file and any buffer marked BH_New.
609 if (reiserfs_data_ordered(inode->i_sb) &&
610 (new || page->index >= i_size_index)) {
611 reiserfs_add_ordered_list(inode, bh);
618 * If this is a partial write which happened to make all buffers
619 * uptodate then we can optimize away a bogus readpage() for
620 * the next read(). Here we 'discover' whether the page went
621 * uptodate as a result of this (potentially partial) write.
624 SetPageUptodate(page);
629 /* Submit pages for write. This was separated from actual file copying
630 because we might want to allocate block numbers in-between.
631 This function assumes that caller will adjust file size to correct value. */
632 int reiserfs_submit_file_region_for_write(
633 struct reiserfs_transaction_handle *th,
635 loff_t pos, /* Writing position offset */
636 int num_pages, /* Number of pages to write */
637 int write_bytes, /* number of bytes to write */
638 struct page **prepared_pages /* list of pages */
641 int status; // return status of block_commit_write.
642 int retval = 0; // Return value we are going to return.
643 int i; // loop counter
644 int offset; // Writing offset in page.
645 int orig_write_bytes = write_bytes;
648 for ( i = 0, offset = (pos & (PAGE_CACHE_SIZE-1)); i < num_pages ; i++,offset=0) {
649 int count = min_t(int,PAGE_CACHE_SIZE-offset,write_bytes); // How much of bytes to write to this page
650 struct page *page=prepared_pages[i]; // Current page we process.
652 status = reiserfs_commit_page(inode, page, offset, offset+count);
654 retval = status; // To not overcomplicate matters We are going to
655 // submit all the pages even if there was error.
656 // we only remember error status to report it on
660 /* now that we've gotten all the ordered buffers marked dirty,
661 * we can safely update i_size and close any running transaction
663 if ( pos + orig_write_bytes > inode->i_size) {
664 inode->i_size = pos + orig_write_bytes; // Set new size
665 /* If the file have grown so much that tail packing is no
666 * longer possible, reset "need to pack" flag */
667 if ( (have_large_tails (inode->i_sb) &&
668 inode->i_size > i_block_size (inode)*4) ||
669 (have_small_tails (inode->i_sb) &&
670 inode->i_size > i_block_size(inode)) )
671 REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask ;
672 else if ( (have_large_tails (inode->i_sb) &&
673 inode->i_size < i_block_size (inode)*4) ||
674 (have_small_tails (inode->i_sb) &&
675 inode->i_size < i_block_size(inode)) )
676 REISERFS_I(inode)->i_flags |= i_pack_on_close_mask ;
678 if (th->t_trans_id) {
679 reiserfs_write_lock(inode->i_sb);
680 reiserfs_update_sd(th, inode); // And update on-disk metadata
681 reiserfs_write_unlock(inode->i_sb);
683 inode->i_sb->s_op->dirty_inode(inode);
687 if (th->t_trans_id) {
688 reiserfs_write_lock(inode->i_sb);
690 reiserfs_update_sd(th, inode);
691 journal_end(th, th->t_super, th->t_blocks_allocated);
692 reiserfs_write_unlock(inode->i_sb);
697 * we have to unlock the pages after updating i_size, otherwise
698 * we race with writepage
700 for ( i = 0; i < num_pages ; i++) {
701 struct page *page=prepared_pages[i];
703 mark_page_accessed(page);
704 page_cache_release(page);
709 /* Look if passed writing region is going to touch file's tail
710 (if it is present). And if it is, convert the tail to unformatted node */
711 int reiserfs_check_for_tail_and_convert( struct inode *inode, /* inode to deal with */
712 loff_t pos, /* Writing position */
713 int write_bytes /* amount of bytes to write */
716 INITIALIZE_PATH(path); // needed for search_for_position
717 struct cpu_key key; // Key that would represent last touched writing byte.
718 struct item_head *ih; // item header of found block;
719 int res; // Return value of various functions we call.
720 int cont_expand_offset; // We will put offset for generic_cont_expand here
721 // This can be int just because tails are created
722 // only for small files.
724 /* this embodies a dependency on a particular tail policy */
725 if ( inode->i_size >= inode->i_sb->s_blocksize*4 ) {
726 /* such a big files do not have tails, so we won't bother ourselves
727 to look for tails, simply return */
731 reiserfs_write_lock(inode->i_sb);
732 /* find the item containing the last byte to be written, or if
733 * writing past the end of the file then the last item of the
734 * file (and then we check its type). */
735 make_cpu_key (&key, inode, pos+write_bytes+1, TYPE_ANY, 3/*key length*/);
736 res = search_for_position_by_key(inode->i_sb, &key, &path);
737 if ( res == IO_ERROR ) {
738 reiserfs_write_unlock(inode->i_sb);
743 if ( is_direct_le_ih(ih) ) {
744 /* Ok, closest item is file tail (tails are stored in "direct"
745 * items), so we need to unpack it. */
746 /* To not overcomplicate matters, we just call generic_cont_expand
747 which will in turn call other stuff and finally will boil down to
748 reiserfs_get_block() that would do necessary conversion. */
749 cont_expand_offset = le_key_k_offset(get_inode_item_key_version(inode), &(ih->ih_key));
751 res = generic_cont_expand( inode, cont_expand_offset);
755 reiserfs_write_unlock(inode->i_sb);
759 /* This function locks pages starting from @pos for @inode.
760 @num_pages pages are locked and stored in
761 @prepared_pages array. Also buffers are allocated for these pages.
762 First and last page of the region is read if it is overwritten only
763 partially. If last page did not exist before write (file hole or file
764 append), it is zeroed, then.
765 Returns number of unallocated blocks that should be allocated to cover
767 int reiserfs_prepare_file_region_for_write(
768 struct inode *inode /* Inode of the file */,
769 loff_t pos, /* position in the file */
770 int num_pages, /* number of pages to
772 int write_bytes, /* Amount of bytes to be
775 struct page **prepared_pages /* pointer to array
780 int res=0; // Return values of different functions we call.
781 unsigned long index = pos >> PAGE_CACHE_SHIFT; // Offset in file in pages.
782 int from = (pos & (PAGE_CACHE_SIZE - 1)); // Writing offset in first page
783 int to = ((pos + write_bytes - 1) & (PAGE_CACHE_SIZE - 1)) + 1;
784 /* offset of last modified byte in last
786 struct address_space *mapping = inode->i_mapping; // Pages are mapped here.
787 int i; // Simple counter
788 int blocks = 0; /* Return value (blocks that should be allocated) */
789 struct buffer_head *bh, *head; // Current bufferhead and first bufferhead
791 unsigned block_start, block_end; // Starting and ending offsets of current
792 // buffer in the page.
793 struct buffer_head *wait[2], **wait_bh=wait; // Buffers for page, if
794 // Page appeared to be not up
795 // to date. Note how we have
796 // at most 2 buffers, this is
797 // because we at most may
798 // partially overwrite two
799 // buffers for one page. One at // the beginning of write area
800 // and one at the end.
801 // Everything inthe middle gets // overwritten totally.
803 struct cpu_key key; // cpu key of item that we are going to deal with
804 struct item_head *ih = NULL; // pointer to item head that we are going to deal with
805 struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with
806 INITIALIZE_PATH(path); // path to item, that we are going to deal with.
807 __u32 * item=0; // pointer to item we are going to deal with
808 int item_pos=-1; /* Position in indirect item */
811 if ( num_pages < 1 ) {
812 reiserfs_warning (inode->i_sb,
813 "green-9001: reiserfs_prepare_file_region_for_write "
814 "called with zero number of pages to process");
818 /* We have 2 loops for pages. In first loop we grab and lock the pages, so
819 that nobody would touch these until we release the pages. Then
820 we'd start to deal with mapping buffers to blocks. */
821 for ( i = 0; i < num_pages; i++) {
822 prepared_pages[i] = grab_cache_page(mapping, index + i); // locks the page
823 if ( !prepared_pages[i]) {
825 goto failed_page_grabbing;
827 if (!page_has_buffers(prepared_pages[i]))
828 create_empty_buffers(prepared_pages[i], inode->i_sb->s_blocksize, 0);
831 /* Let's count amount of blocks for a case where all the blocks
832 overwritten are new (we will substract already allocated blocks later)*/
834 /* These are full-overwritten pages so we count all the blocks in
835 these pages are counted as needed to be allocated */
836 blocks = (num_pages - 2) << (PAGE_CACHE_SHIFT - inode->i_blkbits);
838 /* count blocks needed for first page (possibly partially written) */
839 blocks += ((PAGE_CACHE_SIZE - from) >> inode->i_blkbits) +
840 !!(from & (inode->i_sb->s_blocksize-1)); /* roundup */
842 /* Now we account for last page. If last page == first page (we
843 overwrite only one page), we substract all the blocks past the
844 last writing position in a page out of already calculated number
846 blocks += ((num_pages > 1) << (PAGE_CACHE_SHIFT-inode->i_blkbits)) -
847 ((PAGE_CACHE_SIZE - to) >> inode->i_blkbits);
848 /* Note how we do not roundup here since partial blocks still
849 should be allocated */
851 /* Now if all the write area lies past the file end, no point in
852 maping blocks, since there is none, so we just zero out remaining
853 parts of first and last pages in write area (if needed) */
854 if ( (pos & ~((loff_t)PAGE_CACHE_SIZE - 1)) > inode->i_size ) {
855 if ( from != 0 ) {/* First page needs to be partially zeroed */
856 char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
857 memset(kaddr, 0, from);
858 kunmap_atomic( kaddr, KM_USER0);
860 if ( to != PAGE_CACHE_SIZE ) { /* Last page needs to be partially zeroed */
861 char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
862 memset(kaddr+to, 0, PAGE_CACHE_SIZE - to);
863 kunmap_atomic( kaddr, KM_USER0);
866 /* Since all blocks are new - use already calculated value */
870 /* Well, since we write somewhere into the middle of a file, there is
871 possibility we are writing over some already allocated blocks, so
872 let's map these blocks and substract number of such blocks out of blocks
873 we need to allocate (calculated above) */
874 /* Mask write position to start on blocksize, we do it out of the
875 loop for performance reasons */
876 pos &= ~((loff_t) inode->i_sb->s_blocksize - 1);
877 /* Set cpu key to the starting position in a file (on left block boundary)*/
878 make_cpu_key (&key, inode, 1 + ((pos) & ~((loff_t) inode->i_sb->s_blocksize - 1)), TYPE_ANY, 3/*key length*/);
880 reiserfs_write_lock(inode->i_sb); // We need that for at least search_by_key()
881 for ( i = 0; i < num_pages ; i++ ) {
883 head = page_buffers(prepared_pages[i]);
884 /* For each buffer in the page */
885 for(bh = head, block_start = 0; bh != head || !block_start;
886 block_start=block_end, bh = bh->b_this_page) {
888 reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
889 /* Find where this buffer ends */
890 block_end = block_start+inode->i_sb->s_blocksize;
891 if (i == 0 && block_end <= from )
892 /* if this buffer is before requested data to map, skip it*/
895 if (i == num_pages - 1 && block_start >= to) {
896 /* If this buffer is after requested data to map, abort
897 processing of current page */
901 if ( buffer_mapped(bh) && bh->b_blocknr !=0 ) {
902 /* This is optimisation for a case where buffer is mapped
903 and have blocknumber assigned. In case significant amount
904 of such buffers are present, we may avoid some amount
905 of search_by_key calls.
906 Probably it would be possible to move parts of this code
907 out of BKL, but I afraid that would overcomplicate code
908 without any noticeable benefit.
912 set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
913 blocks--; // Decrease the amount of blocks that need to be
915 continue; // Go to the next buffer
918 if ( !itembuf || /* if first iteration */
919 item_pos >= ih_item_len(ih)/UNFM_P_SIZE)
920 { /* or if we progressed past the
921 current unformatted_item */
922 /* Try to find next item */
923 res = search_for_position_by_key(inode->i_sb, &key, &path);
924 /* Abort if no more items */
925 if ( res != POSITION_FOUND ) {
926 /* make sure later loops don't use this item */
932 /* Update information about current indirect item */
933 itembuf = get_last_bh( &path );
934 ih = get_ih( &path );
935 item = get_item( &path );
936 item_pos = path.pos_in_item;
938 RFALSE( !is_indirect_le_ih (ih), "green-9003: indirect item expected");
941 /* See if there is some block associated with the file
942 at that position, map the buffer to this block */
943 if ( get_block_num(item,item_pos) ) {
944 map_bh(bh, inode->i_sb, get_block_num(item,item_pos));
945 blocks--; // Decrease the amount of blocks that need to be
950 set_cpu_key_k_offset( &key, cpu_key_k_offset(&key) + inode->i_sb->s_blocksize);
953 pathrelse(&path); // Free the path
954 reiserfs_write_unlock(inode->i_sb);
956 /* Now zero out unmappend buffers for the first and last pages of
957 write area or issue read requests if page is mapped. */
958 /* First page, see if it is not uptodate */
959 if ( !PageUptodate(prepared_pages[0]) ) {
960 head = page_buffers(prepared_pages[0]);
962 /* For each buffer in page */
963 for(bh = head, block_start = 0; bh != head || !block_start;
964 block_start=block_end, bh = bh->b_this_page) {
967 reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
968 /* Find where this buffer ends */
969 block_end = block_start+inode->i_sb->s_blocksize;
970 if ( block_end <= from )
971 /* if this buffer is before requested data to map, skip it*/
973 if ( block_start < from ) { /* Aha, our partial buffer */
974 if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
975 issue READ request for it to
977 ll_rw_block(READ, 1, &bh);
979 } else { /* Not mapped, zero it */
980 char *kaddr = kmap_atomic(prepared_pages[0], KM_USER0);
981 memset(kaddr+block_start, 0, from-block_start);
982 kunmap_atomic( kaddr, KM_USER0);
983 set_buffer_uptodate(bh);
989 /* Last page, see if it is not uptodate, or if the last page is past the end of the file. */
990 if ( !PageUptodate(prepared_pages[num_pages-1]) ||
991 ((pos+write_bytes)>>PAGE_CACHE_SHIFT) > (inode->i_size>>PAGE_CACHE_SHIFT) ) {
992 head = page_buffers(prepared_pages[num_pages-1]);
994 /* for each buffer in page */
995 for(bh = head, block_start = 0; bh != head || !block_start;
996 block_start=block_end, bh = bh->b_this_page) {
999 reiserfs_panic(inode->i_sb, "green-9002: Allocated but absent buffer for a page?");
1000 /* Find where this buffer ends */
1001 block_end = block_start+inode->i_sb->s_blocksize;
1002 if ( block_start >= to )
1003 /* if this buffer is after requested data to map, skip it*/
1005 if ( block_end > to ) { /* Aha, our partial buffer */
1006 if ( buffer_mapped(bh) ) { /* If it is mapped, we need to
1007 issue READ request for it to
1009 ll_rw_block(READ, 1, &bh);
1011 } else { /* Not mapped, zero it */
1012 char *kaddr = kmap_atomic(prepared_pages[num_pages-1], KM_USER0);
1013 memset(kaddr+to, 0, block_end-to);
1014 kunmap_atomic( kaddr, KM_USER0);
1015 set_buffer_uptodate(bh);
1021 /* Wait for read requests we made to happen, if necessary */
1022 while(wait_bh > wait) {
1023 wait_on_buffer(*--wait_bh);
1024 if (!buffer_uptodate(*wait_bh)) {
1031 failed_page_grabbing:
1034 reiserfs_unprepare_pages(prepared_pages, num_pages);
1038 /* Write @count bytes at position @ppos in a file indicated by @file
1039 from the buffer @buf.
1041 generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
1042 something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
1043 written for (ext2/3). This is for several reasons:
1045 * It has no understanding of any filesystem specific optimizations.
1047 * It enters the filesystem repeatedly for each page that is written.
1049 * It depends on reiserfs_get_block() function which if implemented by reiserfs performs costly search_by_key
1050 * operation for each page it is supplied with. By contrast reiserfs_file_write() feeds as much as possible at a time
1051 * to reiserfs which allows for fewer tree traversals.
1053 * Each indirect pointer insertion takes a lot of cpu, because it involves memory moves inside of blocks.
1055 * Asking the block allocation code for blocks one at a time is slightly less efficient.
1057 All of these reasons for not using only generic file write were understood back when reiserfs was first miscoded to
1058 use it, but we were in a hurry to make code freeze, and so it couldn't be revised then. This new code should make
1059 things right finally.
1061 Future Features: providing search_by_key with hints.
1064 ssize_t reiserfs_file_write( struct file *file, /* the file we are going to write into */
1065 const char __user *buf, /* pointer to user supplied data
1067 size_t count, /* amount of bytes to write */
1068 loff_t *ppos /* pointer to position in file that we start writing at. Should be updated to
1069 * new current position before returning. */ )
1071 size_t already_written = 0; // Number of bytes already written to the file.
1072 loff_t pos; // Current position in the file.
1073 size_t res; // return value of various functions that we call.
1074 struct inode *inode = file->f_dentry->d_inode; // Inode of the file that we are writing to.
1075 /* To simplify coding at this time, we store
1076 locked pages in array for now */
1077 struct page * prepared_pages[REISERFS_WRITE_PAGES_AT_A_TIME];
1078 struct reiserfs_transaction_handle th;
1081 if ( file->f_flags & O_DIRECT) { // Direct IO needs treatment
1082 int result, after_file_end = 0;
1083 if ( (*ppos + count >= inode->i_size) || (file->f_flags & O_APPEND) ) {
1084 /* If we are appending a file, we need to put this savelink in here.
1085 If we will crash while doing direct io, finish_unfinished will
1086 cut the garbage from the file end. */
1087 reiserfs_write_lock(inode->i_sb);
1088 journal_begin(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT );
1089 reiserfs_update_inode_transaction(inode);
1090 add_save_link (&th, inode, 1 /* Truncate */);
1091 journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT );
1092 reiserfs_write_unlock(inode->i_sb);
1095 result = generic_file_write(file, buf, count, ppos);
1097 if ( after_file_end ) { /* Now update i_size and remove the savelink */
1098 struct reiserfs_transaction_handle th;
1099 reiserfs_write_lock(inode->i_sb);
1100 journal_begin(&th, inode->i_sb, 1);
1101 reiserfs_update_inode_transaction(inode);
1102 reiserfs_update_sd(&th, inode);
1103 journal_end(&th, inode->i_sb, 1);
1104 remove_save_link (inode, 1/* truncate */);
1105 reiserfs_write_unlock(inode->i_sb);
1111 if ( unlikely((ssize_t) count < 0 ))
1114 if (unlikely(!access_ok(VERIFY_READ, buf, count)))
1117 down(&inode->i_sem); // locks the entire file for just us
1121 /* Check if we can write to specified region of file, file
1122 is not overly big and this kind of stuff. Adjust pos and
1124 res = generic_write_checks(file, &pos, &count, 0);
1131 res = remove_suid(file->f_dentry);
1135 inode_update_time(inode, 1); /* Both mtime and ctime */
1137 // Ok, we are done with all the checks.
1139 // Now we should start real work
1141 /* If we are going to write past the file's packed tail or if we are going
1142 to overwrite part of the tail, we need that tail to be converted into
1144 res = reiserfs_check_for_tail_and_convert( inode, pos, count);
1148 while ( count > 0) {
1149 /* This is the main loop in which we running until some error occures
1150 or until we write all of the data. */
1151 int num_pages;/* amount of pages we are going to write this iteration */
1152 int write_bytes; /* amount of bytes to write during this iteration */
1153 int blocks_to_allocate; /* how much blocks we need to allocate for
1156 /* (pos & (PAGE_CACHE_SIZE-1)) is an idiom for offset into a page of pos*/
1157 num_pages = !!((pos+count) & (PAGE_CACHE_SIZE - 1)) + /* round up partial
1159 ((count + (pos & (PAGE_CACHE_SIZE-1))) >> PAGE_CACHE_SHIFT);
1160 /* convert size to amount of
1162 reiserfs_write_lock(inode->i_sb);
1163 if ( num_pages > REISERFS_WRITE_PAGES_AT_A_TIME
1164 || num_pages > reiserfs_can_fit_pages(inode->i_sb) ) {
1165 /* If we were asked to write more data than we want to or if there
1166 is not that much space, then we shorten amount of data to write
1167 for this iteration. */
1168 num_pages = min_t(int, REISERFS_WRITE_PAGES_AT_A_TIME, reiserfs_can_fit_pages(inode->i_sb));
1169 /* Also we should not forget to set size in bytes accordingly */
1170 write_bytes = (num_pages << PAGE_CACHE_SHIFT) -
1171 (pos & (PAGE_CACHE_SIZE-1));
1172 /* If position is not on the
1173 start of the page, we need
1174 to substract the offset
1177 write_bytes = count;
1179 /* reserve the blocks to be allocated later, so that later on
1180 we still have the space to write the blocks to */
1181 reiserfs_claim_blocks_to_be_allocated(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1182 reiserfs_write_unlock(inode->i_sb);
1184 if ( !num_pages ) { /* If we do not have enough space even for */
1185 res = -ENOSPC; /* single page, return -ENOSPC */
1186 if ( pos > (inode->i_size & (inode->i_sb->s_blocksize-1)))
1187 break; // In case we are writing past the file end, break.
1188 // Otherwise we are possibly overwriting the file, so
1189 // let's set write size to be equal or less than blocksize.
1190 // This way we get it correctly for file holes.
1191 // But overwriting files on absolutelly full volumes would not
1192 // be very efficient. Well, people are not supposed to fill
1193 // 100% of disk space anyway.
1194 write_bytes = min_t(int, count, inode->i_sb->s_blocksize - (pos & (inode->i_sb->s_blocksize - 1)));
1196 // No blocks were claimed before, so do it now.
1197 reiserfs_claim_blocks_to_be_allocated(inode->i_sb, 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1200 /* Prepare for writing into the region, read in all the
1201 partially overwritten pages, if needed. And lock the pages,
1202 so that nobody else can access these until we are done.
1203 We get number of actual blocks needed as a result.*/
1204 blocks_to_allocate = reiserfs_prepare_file_region_for_write(inode, pos, num_pages, write_bytes, prepared_pages);
1205 if ( blocks_to_allocate < 0 ) {
1206 res = blocks_to_allocate;
1207 reiserfs_release_claimed_blocks(inode->i_sb, num_pages << (PAGE_CACHE_SHIFT - inode->i_blkbits));
1211 /* First we correct our estimate of how many blocks we need */
1212 reiserfs_release_claimed_blocks(inode->i_sb, (num_pages << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits)) - blocks_to_allocate );
1214 if ( blocks_to_allocate > 0) {/*We only allocate blocks if we need to*/
1215 /* Fill in all the possible holes and append the file if needed */
1216 res = reiserfs_allocate_blocks_for_region(&th, inode, pos, num_pages, write_bytes, prepared_pages, blocks_to_allocate);
1219 /* well, we have allocated the blocks, so it is time to free
1220 the reservation we made earlier. */
1221 reiserfs_release_claimed_blocks(inode->i_sb, blocks_to_allocate);
1223 reiserfs_unprepare_pages(prepared_pages, num_pages);
1227 /* NOTE that allocating blocks and filling blocks can be done in reverse order
1228 and probably we would do that just to get rid of garbage in files after a
1231 /* Copy data from user-supplied buffer to file's pages */
1232 res = reiserfs_copy_from_user_to_file_region(pos, num_pages, write_bytes, prepared_pages, buf);
1234 reiserfs_unprepare_pages(prepared_pages, num_pages);
1238 /* Send the pages to disk and unlock them. */
1239 res = reiserfs_submit_file_region_for_write(&th, inode, pos, num_pages,
1240 write_bytes,prepared_pages);
1244 already_written += write_bytes;
1246 *ppos = pos += write_bytes;
1247 count -= write_bytes;
1248 balance_dirty_pages_ratelimited(inode->i_mapping);
1251 /* this is only true on error */
1252 if (th.t_trans_id) {
1253 reiserfs_write_lock(inode->i_sb);
1254 journal_end(&th, th.t_super, th.t_blocks_allocated);
1255 reiserfs_write_unlock(inode->i_sb);
1257 if ((file->f_flags & O_SYNC) || IS_SYNC(inode))
1258 res = generic_osync_inode(inode, file->f_mapping, OSYNC_METADATA|OSYNC_DATA);
1261 reiserfs_async_progress_wait(inode->i_sb);
1262 return (already_written != 0)?already_written:res;
1265 up(&inode->i_sem); // unlock the file on exit.
1269 static ssize_t reiserfs_aio_write(struct kiocb *iocb, const char __user *buf,
1270 size_t count, loff_t pos)
1272 return generic_file_aio_write(iocb, buf, count, pos);
1277 struct file_operations reiserfs_file_operations = {
1278 .read = generic_file_read,
1279 .write = reiserfs_file_write,
1280 .ioctl = reiserfs_ioctl,
1281 .mmap = generic_file_mmap,
1282 .release = reiserfs_file_release,
1283 .fsync = reiserfs_sync_file,
1284 .sendfile = generic_file_sendfile,
1285 .aio_read = generic_file_aio_read,
1286 .aio_write = reiserfs_aio_write,
1290 struct inode_operations reiserfs_file_inode_operations = {
1291 .truncate = reiserfs_vfs_truncate_file,
1292 .setattr = reiserfs_setattr,
1293 .setxattr = reiserfs_setxattr,
1294 .getxattr = reiserfs_getxattr,
1295 .listxattr = reiserfs_listxattr,
1296 .removexattr = reiserfs_removexattr,
1297 .permission = reiserfs_permission,