2 ** Write ahead logging implementation copyright Chris Mason 2000
4 ** The background commits make this code very interelated, and
5 ** overly complex. I need to rethink things a bit....The major players:
7 ** journal_begin -- call with the number of blocks you expect to log.
8 ** If the current transaction is too
9 ** old, it will block until the current transaction is
10 ** finished, and then start a new one.
11 ** Usually, your transaction will get joined in with
12 ** previous ones for speed.
14 ** journal_join -- same as journal_begin, but won't block on the current
15 ** transaction regardless of age. Don't ever call
16 ** this. Ever. There are only two places it should be
17 ** called from, and they are both inside this file.
19 ** journal_mark_dirty -- adds blocks into this transaction. clears any flags
20 ** that might make them get sent to disk
21 ** and then marks them BH_JDirty. Puts the buffer head
22 ** into the current transaction hash.
24 ** journal_end -- if the current transaction is batchable, it does nothing
25 ** otherwise, it could do an async/synchronous commit, or
26 ** a full flush of all log and real blocks in the
29 ** flush_old_commits -- if the current transaction is too old, it is ended and
30 ** commit blocks are sent to disk. Forces commit blocks
31 ** to disk for all backgrounded commits that have been
33 ** -- Note, if you call this as an immediate flush from
34 ** from within kupdate, it will ignore the immediate flag
37 #include <linux/config.h>
38 #include <asm/uaccess.h>
39 #include <asm/system.h>
41 #include <linux/time.h>
42 #include <asm/semaphore.h>
44 #include <linux/vmalloc.h>
45 #include <linux/reiserfs_fs.h>
47 #include <linux/kernel.h>
48 #include <linux/errno.h>
49 #include <linux/fcntl.h>
50 #include <linux/stat.h>
51 #include <linux/string.h>
52 #include <linux/smp_lock.h>
53 #include <linux/suspend.h>
54 #include <linux/buffer_head.h>
55 #include <linux/workqueue.h>
56 #include <linux/writeback.h>
57 #include <linux/blkdev.h>
60 /* gets a struct reiserfs_journal_list * from a list head */
61 #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
63 #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \
66 /* the number of mounted filesystems. This is used to decide when to
67 ** start and kill the commit workqueue
69 static int reiserfs_mounted_fs_count;
71 static struct workqueue_struct *commit_wq;
73 #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit
75 #define BUFNR 64 /*read ahead */
77 /* cnode stat bits. Move these into reiserfs_fs.h */
79 #define BLOCK_FREED 2 /* this block was freed, and can't be written. */
80 #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */
82 #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */
83 #define BLOCK_DIRTIED 5
86 /* journal list state bits */
87 #define LIST_TOUCHED 1
89 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */
91 /* flags for do_journal_end */
92 #define FLUSH_ALL 1 /* flush commit and real blocks */
93 #define COMMIT_NOW 2 /* end and commit this transaction */
94 #define WAIT 4 /* wait for the log blocks to hit the disk*/
96 static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
97 static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
98 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
99 static int can_dirty(struct reiserfs_journal_cnode *cn) ;
100 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks);
101 static int release_journal_dev( struct super_block *super,
102 struct reiserfs_journal *journal );
103 static int dirty_one_transaction(struct super_block *s,
104 struct reiserfs_journal_list *jl);
105 static void flush_async_commits(void *p);
106 static void queue_log_writer(struct super_block *s);
108 /* values for join in do_journal_begin_r */
110 JBEGIN_REG = 0, /* regular journal begin */
111 JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
112 JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
115 static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
116 struct super_block * p_s_sb,
117 unsigned long nblocks,int join);
119 static void init_journal_hash(struct super_block *p_s_sb) {
120 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
121 memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
125 ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to
126 ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for
129 static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
131 clear_buffer_dirty(bh);
132 clear_buffer_journal_test(bh);
137 static void disable_barrier(struct super_block *s)
139 REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
140 printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s));
143 static struct reiserfs_bitmap_node *
144 allocate_bitmap_node(struct super_block *p_s_sb) {
145 struct reiserfs_bitmap_node *bn ;
148 bn = reiserfs_kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS, p_s_sb) ;
152 bn->data = reiserfs_kmalloc(p_s_sb->s_blocksize, GFP_NOFS, p_s_sb) ;
154 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
158 memset(bn->data, 0, p_s_sb->s_blocksize) ;
159 INIT_LIST_HEAD(&bn->list) ;
163 static struct reiserfs_bitmap_node *
164 get_bitmap_node(struct super_block *p_s_sb) {
165 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
166 struct reiserfs_bitmap_node *bn = NULL;
167 struct list_head *entry = journal->j_bitmap_nodes.next ;
169 journal->j_used_bitmap_nodes++ ;
172 if(entry != &journal->j_bitmap_nodes) {
173 bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
175 memset(bn->data, 0, p_s_sb->s_blocksize) ;
176 journal->j_free_bitmap_nodes-- ;
179 bn = allocate_bitmap_node(p_s_sb) ;
186 static inline void free_bitmap_node(struct super_block *p_s_sb,
187 struct reiserfs_bitmap_node *bn) {
188 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
189 journal->j_used_bitmap_nodes-- ;
190 if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
191 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
192 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
194 list_add(&bn->list, &journal->j_bitmap_nodes) ;
195 journal->j_free_bitmap_nodes++ ;
199 static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
201 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
202 struct reiserfs_bitmap_node *bn = NULL ;
203 for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
204 bn = allocate_bitmap_node(p_s_sb) ;
206 list_add(&bn->list, &journal->j_bitmap_nodes) ;
207 journal->j_free_bitmap_nodes++ ;
209 break ; // this is ok, we'll try again when more are needed
214 static int set_bit_in_list_bitmap(struct super_block *p_s_sb, int block,
215 struct reiserfs_list_bitmap *jb) {
216 int bmap_nr = block / (p_s_sb->s_blocksize << 3) ;
217 int bit_nr = block % (p_s_sb->s_blocksize << 3) ;
219 if (!jb->bitmaps[bmap_nr]) {
220 jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb) ;
222 set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data) ;
226 static void cleanup_bitmap_list(struct super_block *p_s_sb,
227 struct reiserfs_list_bitmap *jb) {
229 if (jb->bitmaps == NULL)
232 for (i = 0 ; i < SB_BMAP_NR(p_s_sb) ; i++) {
233 if (jb->bitmaps[i]) {
234 free_bitmap_node(p_s_sb, jb->bitmaps[i]) ;
235 jb->bitmaps[i] = NULL ;
241 ** only call this on FS unmount.
243 static int free_list_bitmaps(struct super_block *p_s_sb,
244 struct reiserfs_list_bitmap *jb_array) {
246 struct reiserfs_list_bitmap *jb ;
247 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
249 jb->journal_list = NULL ;
250 cleanup_bitmap_list(p_s_sb, jb) ;
257 static int free_bitmap_nodes(struct super_block *p_s_sb) {
258 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
259 struct list_head *next = journal->j_bitmap_nodes.next ;
260 struct reiserfs_bitmap_node *bn ;
262 while(next != &journal->j_bitmap_nodes) {
263 bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
265 reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
266 reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
267 next = journal->j_bitmap_nodes.next ;
268 journal->j_free_bitmap_nodes-- ;
275 ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
276 ** jb_array is the array to be filled in.
278 int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
279 struct reiserfs_list_bitmap *jb_array,
283 struct reiserfs_list_bitmap *jb ;
284 int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *) ;
286 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
288 jb->journal_list = NULL ;
289 jb->bitmaps = vmalloc( mem ) ;
291 reiserfs_warning(p_s_sb, "clm-2000, unable to allocate bitmaps for journal lists") ;
295 memset(jb->bitmaps, 0, mem) ;
298 free_list_bitmaps(p_s_sb, jb_array) ;
305 ** find an available list bitmap. If you can't find one, flush a commit list
308 static struct reiserfs_list_bitmap *
309 get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
311 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
312 struct reiserfs_list_bitmap *jb = NULL ;
314 for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
315 i = journal->j_list_bitmap_index ;
316 journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
317 jb = journal->j_list_bitmap + i ;
318 if (journal->j_list_bitmap[i].journal_list) {
319 flush_commit_list(p_s_sb, journal->j_list_bitmap[i].journal_list, 1) ;
320 if (!journal->j_list_bitmap[i].journal_list) {
327 if (jb->journal_list) { /* double check to make sure if flushed correctly */
330 jb->journal_list = jl ;
335 ** allocates a new chunk of X nodes, and links them all together as a list.
336 ** Uses the cnode->next and cnode->prev pointers
337 ** returns NULL on failure
339 static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) {
340 struct reiserfs_journal_cnode *head ;
342 if (num_cnodes <= 0) {
345 head = vmalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
349 memset(head, 0, num_cnodes * sizeof(struct reiserfs_journal_cnode)) ;
350 head[0].prev = NULL ;
351 head[0].next = head + 1 ;
352 for (i = 1 ; i < num_cnodes; i++) {
353 head[i].prev = head + (i - 1) ;
354 head[i].next = head + (i + 1) ; /* if last one, overwrite it after the if */
356 head[num_cnodes -1].next = NULL ;
361 ** pulls a cnode off the free list, or returns NULL on failure
363 static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
364 struct reiserfs_journal_cnode *cn ;
365 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
367 reiserfs_check_lock_depth(p_s_sb, "get_cnode") ;
369 if (journal->j_cnode_free <= 0) {
372 journal->j_cnode_used++ ;
373 journal->j_cnode_free-- ;
374 cn = journal->j_cnode_free_list ;
379 cn->next->prev = NULL ;
381 journal->j_cnode_free_list = cn->next ;
382 memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
387 ** returns a cnode to the free list
389 static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
390 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
392 reiserfs_check_lock_depth(p_s_sb, "free_cnode") ;
394 journal->j_cnode_used-- ;
395 journal->j_cnode_free++ ;
396 /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
397 cn->next = journal->j_cnode_free_list ;
398 if (journal->j_cnode_free_list) {
399 journal->j_cnode_free_list->prev = cn ;
401 cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
402 journal->j_cnode_free_list = cn ;
405 static void clear_prepared_bits(struct buffer_head *bh) {
406 clear_buffer_journal_prepared (bh);
407 clear_buffer_journal_restore_dirty (bh);
410 /* utility function to force a BUG if it is called without the big
411 ** kernel lock held. caller is the string printed just before calling BUG()
413 void reiserfs_check_lock_depth(struct super_block *sb, char *caller) {
415 if (current->lock_depth < 0) {
416 reiserfs_panic (sb, "%s called without kernel lock held", caller) ;
423 /* return a cnode with same dev, block number and size in table, or null if not found */
424 static inline struct reiserfs_journal_cnode *
425 get_journal_hash_dev(struct super_block *sb,
426 struct reiserfs_journal_cnode **table,
429 struct reiserfs_journal_cnode *cn ;
430 cn = journal_hash(table, sb, bl) ;
432 if (cn->blocknr == bl && cn->sb == sb)
436 return (struct reiserfs_journal_cnode *)0 ;
439 /* returns a cnode with same size, block number and dev as bh in the current transaction hash. NULL if not found */
440 static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
441 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
442 struct reiserfs_journal_cnode *cn ;
444 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bh->b_blocknr);
447 return (struct reiserfs_journal_cnode *)0 ;
453 ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
454 ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
455 ** being overwritten by a replay after crashing.
457 ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting
458 ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make
459 ** sure you never write the block without logging it.
461 ** next_zero_bit is a suggestion about the next block to try for find_forward.
462 ** when bl is rejected because it is set in a journal list bitmap, we search
463 ** for the next zero bit in the bitmap that rejected bl. Then, we return that
464 ** through next_zero_bit for find_forward to try.
466 ** Just because we return something in next_zero_bit does not mean we won't
467 ** reject it on the next call to reiserfs_in_journal
470 int reiserfs_in_journal(struct super_block *p_s_sb,
471 int bmap_nr, int bit_nr, int search_all,
472 b_blocknr_t *next_zero_bit) {
473 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
474 struct reiserfs_journal_cnode *cn ;
475 struct reiserfs_list_bitmap *jb ;
479 *next_zero_bit = 0 ; /* always start this at zero. */
481 PROC_INFO_INC( p_s_sb, journal.in_journal );
482 /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
483 ** if we crash before the transaction that freed it commits, this transaction won't
484 ** have committed either, and the block will never be written
487 for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
488 PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
489 jb = journal->j_list_bitmap + i ;
490 if (jb->journal_list && jb->bitmaps[bmap_nr] &&
491 test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
492 *next_zero_bit = find_next_zero_bit((unsigned long *)
493 (jb->bitmaps[bmap_nr]->data),
494 p_s_sb->s_blocksize << 3, bit_nr+1) ;
500 bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
501 /* is it in any old transactions? */
502 if (search_all && (cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
506 /* is it in the current transaction. This should never happen */
507 if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
512 PROC_INFO_INC( p_s_sb, journal.in_journal_reusable );
517 /* insert cn into table
519 inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
520 struct reiserfs_journal_cnode *cn_orig ;
522 cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
523 cn->hnext = cn_orig ;
526 cn_orig->hprev = cn ;
528 journal_hash(table, cn->sb, cn->blocknr) = cn ;
531 /* lock the current transaction */
532 inline static void lock_journal(struct super_block *p_s_sb) {
533 PROC_INFO_INC( p_s_sb, journal.lock_journal );
534 down(&SB_JOURNAL(p_s_sb)->j_lock);
537 /* unlock the current transaction */
538 inline static void unlock_journal(struct super_block *p_s_sb) {
539 up(&SB_JOURNAL(p_s_sb)->j_lock);
542 static inline void get_journal_list(struct reiserfs_journal_list *jl)
547 static inline void put_journal_list(struct super_block *s,
548 struct reiserfs_journal_list *jl)
550 if (jl->j_refcount < 1) {
551 reiserfs_panic (s, "trans id %lu, refcount at %d", jl->j_trans_id,
554 if (--jl->j_refcount == 0)
555 reiserfs_kfree(jl, sizeof(struct reiserfs_journal_list), s);
559 ** this used to be much more involved, and I'm keeping it just in case things get ugly again.
560 ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
563 static void cleanup_freed_for_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
565 struct reiserfs_list_bitmap *jb = jl->j_list_bitmap ;
567 cleanup_bitmap_list(p_s_sb, jb) ;
569 jl->j_list_bitmap->journal_list = NULL ;
570 jl->j_list_bitmap = NULL ;
573 static int journal_list_still_alive(struct super_block *s,
574 unsigned long trans_id)
576 struct reiserfs_journal *journal = SB_JOURNAL (s);
577 struct list_head *entry = &journal->j_journal_list;
578 struct reiserfs_journal_list *jl;
580 if (!list_empty(entry)) {
581 jl = JOURNAL_LIST_ENTRY(entry->next);
582 if (jl->j_trans_id <= trans_id) {
589 static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
590 char b[BDEVNAME_SIZE];
592 if (buffer_journaled(bh)) {
593 reiserfs_warning(NULL, "clm-2084: pinned buffer %lu:%s sent to disk",
594 bh->b_blocknr, bdevname(bh->b_bdev, b)) ;
597 set_buffer_uptodate(bh) ;
599 clear_buffer_uptodate(bh) ;
604 static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) {
606 set_buffer_uptodate(bh) ;
608 clear_buffer_uptodate(bh) ;
613 static void submit_logged_buffer(struct buffer_head *bh) {
615 bh->b_end_io = reiserfs_end_buffer_io_sync ;
616 clear_buffer_journal_new (bh);
617 clear_buffer_dirty(bh) ;
618 if (!test_clear_buffer_journal_test (bh))
620 if (!buffer_uptodate(bh))
622 submit_bh(WRITE, bh) ;
625 static void submit_ordered_buffer(struct buffer_head *bh) {
627 bh->b_end_io = reiserfs_end_ordered_io;
628 clear_buffer_dirty(bh) ;
629 if (!buffer_uptodate(bh))
631 submit_bh(WRITE, bh) ;
634 static int submit_barrier_buffer(struct buffer_head *bh) {
636 bh->b_end_io = reiserfs_end_ordered_io;
637 clear_buffer_dirty(bh) ;
638 if (!buffer_uptodate(bh))
640 return submit_bh(WRITE_BARRIER, bh) ;
643 static void check_barrier_completion(struct super_block *s,
644 struct buffer_head *bh) {
645 if (buffer_eopnotsupp(bh)) {
646 clear_buffer_eopnotsupp(bh);
648 set_buffer_uptodate(bh);
649 set_buffer_dirty(bh);
650 sync_dirty_buffer(bh);
654 #define CHUNK_SIZE 32
655 struct buffer_chunk {
656 struct buffer_head *bh[CHUNK_SIZE];
660 static void write_chunk(struct buffer_chunk *chunk) {
662 for (i = 0; i < chunk->nr ; i++) {
663 submit_logged_buffer(chunk->bh[i]) ;
668 static void write_ordered_chunk(struct buffer_chunk *chunk) {
670 for (i = 0; i < chunk->nr ; i++) {
671 submit_ordered_buffer(chunk->bh[i]) ;
676 static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh,
678 void (fn)(struct buffer_chunk *))
681 if (chunk->nr >= CHUNK_SIZE)
683 chunk->bh[chunk->nr++] = bh;
684 if (chunk->nr >= CHUNK_SIZE) {
696 atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
697 static struct reiserfs_jh *alloc_jh(void) {
698 struct reiserfs_jh *jh;
700 jh = kmalloc(sizeof(*jh), GFP_NOFS);
702 atomic_inc(&nr_reiserfs_jh);
710 * we want to free the jh when the buffer has been written
713 void reiserfs_free_jh(struct buffer_head *bh) {
714 struct reiserfs_jh *jh;
718 bh->b_private = NULL;
720 list_del_init(&jh->list);
722 if (atomic_read(&nr_reiserfs_jh) <= 0)
724 atomic_dec(&nr_reiserfs_jh);
729 static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh,
732 struct reiserfs_jh *jh;
735 spin_lock(&j->j_dirty_buffers_lock);
736 if (!bh->b_private) {
737 spin_unlock(&j->j_dirty_buffers_lock);
741 list_del_init(&jh->list);
746 spin_lock(&j->j_dirty_buffers_lock);
747 /* buffer must be locked for __add_jh, should be able to have
748 * two adds at the same time
755 jh->jl = j->j_current_jl;
757 list_add_tail(&jh->list, &jh->jl->j_tail_bh_list);
759 list_add_tail(&jh->list, &jh->jl->j_bh_list);
761 spin_unlock(&j->j_dirty_buffers_lock);
765 int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) {
766 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1);
768 int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) {
769 return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0);
772 #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list)
773 static int write_ordered_buffers(spinlock_t *lock,
774 struct reiserfs_journal *j,
775 struct reiserfs_journal_list *jl,
776 struct list_head *list)
778 struct buffer_head *bh;
779 struct reiserfs_jh *jh;
780 int ret = j->j_errno;
781 struct buffer_chunk chunk;
782 struct list_head tmp;
783 INIT_LIST_HEAD(&tmp);
787 while(!list_empty(list)) {
788 jh = JH_ENTRY(list->next);
791 if (test_set_buffer_locked(bh)) {
792 if (!buffer_dirty(bh)) {
793 list_del_init(&jh->list);
794 list_add(&jh->list, &tmp);
799 write_ordered_chunk(&chunk);
805 if (buffer_dirty(bh)) {
806 list_del_init(&jh->list);
807 list_add(&jh->list, &tmp);
808 add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
810 reiserfs_free_jh(bh);
815 cond_resched_lock(lock);
819 write_ordered_chunk(&chunk);
822 while(!list_empty(&tmp)) {
823 jh = JH_ENTRY(tmp.prev);
826 reiserfs_free_jh(bh);
828 if (buffer_locked(bh)) {
833 if (!buffer_uptodate(bh)) {
837 cond_resched_lock(lock);
843 static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {
844 struct reiserfs_journal *journal = SB_JOURNAL (s);
845 struct reiserfs_journal_list *other_jl;
846 struct reiserfs_journal_list *first_jl;
847 struct list_head *entry;
848 unsigned long trans_id = jl->j_trans_id;
849 unsigned long other_trans_id;
850 unsigned long first_trans_id;
854 * first we walk backwards to find the oldest uncommitted transation
857 entry = jl->j_list.prev;
859 other_jl = JOURNAL_LIST_ENTRY(entry);
860 if (entry == &journal->j_journal_list ||
861 atomic_read(&other_jl->j_older_commits_done))
865 entry = other_jl->j_list.prev;
868 /* if we didn't find any older uncommitted transactions, return now */
869 if (first_jl == jl) {
873 first_trans_id = first_jl->j_trans_id;
875 entry = &first_jl->j_list;
877 other_jl = JOURNAL_LIST_ENTRY(entry);
878 other_trans_id = other_jl->j_trans_id;
880 if (other_trans_id < trans_id) {
881 if (atomic_read(&other_jl->j_commit_left) != 0) {
882 flush_commit_list(s, other_jl, 0);
884 /* list we were called with is gone, return */
885 if (!journal_list_still_alive(s, trans_id))
888 /* the one we just flushed is gone, this means all
889 * older lists are also gone, so first_jl is no longer
890 * valid either. Go back to the beginning.
892 if (!journal_list_still_alive(s, other_trans_id)) {
897 if (entry == &journal->j_journal_list)
905 int reiserfs_async_progress_wait(struct super_block *s) {
907 struct reiserfs_journal *j = SB_JOURNAL(s);
908 if (atomic_read(&j->j_async_throttle))
909 blk_congestion_wait(WRITE, HZ/10);
914 ** if this journal list still has commit blocks unflushed, send them to disk.
916 ** log areas must be flushed in order (transaction 2 can't commit before transaction 1)
917 ** Before the commit block can by written, every other log block must be safely on disk
920 static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) {
923 struct buffer_head *tbh = NULL ;
924 unsigned long trans_id = jl->j_trans_id;
925 struct reiserfs_journal *journal = SB_JOURNAL (s);
929 reiserfs_check_lock_depth(s, "flush_commit_list") ;
931 if (atomic_read(&jl->j_older_commits_done)) {
935 /* before we can put our commit blocks on disk, we have to make sure everyone older than
938 BUG_ON (jl->j_len <= 0);
939 BUG_ON (trans_id == journal->j_trans_id);
941 get_journal_list(jl);
943 if (flush_older_commits(s, jl) == 1) {
944 /* list disappeared during flush_older_commits. return */
949 /* make sure nobody is trying to flush this one at the same time */
950 down(&jl->j_commit_lock);
951 if (!journal_list_still_alive(s, trans_id)) {
952 up(&jl->j_commit_lock);
955 BUG_ON (jl->j_trans_id == 0);
957 /* this commit is done, exit */
958 if (atomic_read(&(jl->j_commit_left)) <= 0) {
960 atomic_set(&(jl->j_older_commits_done), 1) ;
962 up(&jl->j_commit_lock);
966 if (!list_empty(&jl->j_bh_list)) {
968 write_ordered_buffers(&journal->j_dirty_buffers_lock,
969 journal, jl, &jl->j_bh_list);
972 BUG_ON (!list_empty(&jl->j_bh_list));
974 * for the description block and all the log blocks, submit any buffers
975 * that haven't already reached the disk
977 atomic_inc(&journal->j_async_throttle);
978 for (i = 0 ; i < (jl->j_len + 1) ; i++) {
979 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %
980 SB_ONDISK_JOURNAL_SIZE(s);
981 tbh = journal_find_get_block(s, bn) ;
982 if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
983 ll_rw_block(WRITE, 1, &tbh) ;
986 atomic_dec(&journal->j_async_throttle);
988 /* wait on everything written so far before writing the commit
989 * if we are in barrier mode, send the commit down now
991 barrier = reiserfs_barrier_flush(s);
994 lock_buffer(jl->j_commit_bh);
995 ret = submit_barrier_buffer(jl->j_commit_bh);
996 if (ret == -EOPNOTSUPP) {
997 set_buffer_uptodate(jl->j_commit_bh);
1002 for (i = 0 ; i < (jl->j_len + 1) ; i++) {
1003 bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
1004 (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
1005 tbh = journal_find_get_block(s, bn) ;
1006 wait_on_buffer(tbh) ;
1007 // since we're using ll_rw_blk above, it might have skipped over
1008 // a locked buffer. Double check here
1010 if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
1011 sync_dirty_buffer(tbh);
1012 if (unlikely (!buffer_uptodate(tbh))) {
1013 #ifdef CONFIG_REISERFS_CHECK
1014 reiserfs_warning(s, "journal-601, buffer write failed") ;
1018 put_bh(tbh) ; /* once for journal_find_get_block */
1019 put_bh(tbh) ; /* once due to original getblk in do_journal_end */
1020 atomic_dec(&(jl->j_commit_left)) ;
1023 BUG_ON (atomic_read(&(jl->j_commit_left)) != 1);
1026 if (buffer_dirty(jl->j_commit_bh))
1028 mark_buffer_dirty(jl->j_commit_bh) ;
1029 sync_dirty_buffer(jl->j_commit_bh) ;
1031 wait_on_buffer(jl->j_commit_bh);
1033 check_barrier_completion(s, jl->j_commit_bh);
1035 /* If there was a write error in the journal - we can't commit this
1036 * transaction - it will be invalid and, if successful, will just end
1037 * up propogating the write error out to the filesystem. */
1038 if (unlikely (!buffer_uptodate(jl->j_commit_bh))) {
1039 #ifdef CONFIG_REISERFS_CHECK
1040 reiserfs_warning(s, "journal-615: buffer write failed") ;
1044 bforget(jl->j_commit_bh) ;
1045 if (journal->j_last_commit_id != 0 &&
1046 (jl->j_trans_id - journal->j_last_commit_id) != 1) {
1047 reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
1048 journal->j_last_commit_id,
1051 journal->j_last_commit_id = jl->j_trans_id;
1053 /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
1054 cleanup_freed_for_journal_list(s, jl) ;
1056 retval = retval ? retval : journal->j_errno;
1058 /* mark the metadata dirty */
1060 dirty_one_transaction(s, jl);
1061 atomic_dec(&(jl->j_commit_left)) ;
1064 atomic_set(&(jl->j_older_commits_done), 1) ;
1066 up(&jl->j_commit_lock);
1068 put_journal_list(s, jl);
1071 reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__);
1076 ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
1077 ** returns NULL if it can't find anything
1079 static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) {
1080 struct super_block *sb = cn->sb;
1081 b_blocknr_t blocknr = cn->blocknr ;
1085 if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) {
1093 void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
1094 struct reiserfs_journal_list *, unsigned long, int);
1097 ** once all the real blocks have been flushed, it is safe to remove them from the
1098 ** journal list for this transaction. Aside from freeing the cnode, this also allows the
1099 ** block to be reallocated for data blocks if it had been deleted.
1101 static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
1102 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
1103 struct reiserfs_journal_cnode *cn, *last ;
1104 cn = jl->j_realblock ;
1106 /* which is better, to lock once around the whole loop, or
1107 ** to lock for each call to remove_journal_hash?
1110 if (cn->blocknr != 0) {
1112 reiserfs_warning (p_s_sb, "block %u, bh is %d, state %ld", cn->blocknr,
1113 cn->bh ? 1: 0, cn->state) ;
1116 remove_journal_hash(p_s_sb, journal->j_list_hash_table, jl, cn->blocknr, 1) ;
1120 free_cnode(p_s_sb, last) ;
1122 jl->j_realblock = NULL ;
1126 ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block.
1127 ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start
1128 ** releasing blocks in this transaction for reuse as data blocks.
1129 ** called by flush_journal_list, before it calls remove_all_from_journal_list
1132 static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
1133 struct reiserfs_journal_header *jh ;
1134 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
1136 if (reiserfs_is_journal_aborted (journal))
1139 if (trans_id >= journal->j_last_flush_trans_id) {
1140 if (buffer_locked((journal->j_header_bh))) {
1141 wait_on_buffer((journal->j_header_bh)) ;
1142 if (unlikely (!buffer_uptodate(journal->j_header_bh))) {
1143 #ifdef CONFIG_REISERFS_CHECK
1144 reiserfs_warning (p_s_sb, "journal-699: buffer write failed") ;
1149 journal->j_last_flush_trans_id = trans_id ;
1150 journal->j_first_unflushed_offset = offset ;
1151 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
1152 jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
1153 jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
1154 jh->j_mount_id = cpu_to_le32(journal->j_mount_id) ;
1156 if (reiserfs_barrier_flush(p_s_sb)) {
1158 lock_buffer(journal->j_header_bh);
1159 ret = submit_barrier_buffer(journal->j_header_bh);
1160 if (ret == -EOPNOTSUPP) {
1161 set_buffer_uptodate(journal->j_header_bh);
1162 disable_barrier(p_s_sb);
1165 wait_on_buffer(journal->j_header_bh);
1166 check_barrier_completion(p_s_sb, journal->j_header_bh);
1169 set_buffer_dirty(journal->j_header_bh) ;
1170 sync_dirty_buffer(journal->j_header_bh) ;
1172 if (!buffer_uptodate(journal->j_header_bh)) {
1173 reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
1180 static int update_journal_header_block(struct super_block *p_s_sb,
1181 unsigned long offset,
1182 unsigned long trans_id) {
1183 return _update_journal_header_block(p_s_sb, offset, trans_id);
1186 ** flush any and all journal lists older than you are
1187 ** can only be called from flush_journal_list
1189 static int flush_older_journal_lists(struct super_block *p_s_sb,
1190 struct reiserfs_journal_list *jl)
1192 struct list_head *entry;
1193 struct reiserfs_journal_list *other_jl ;
1194 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
1195 unsigned long trans_id = jl->j_trans_id;
1197 /* we know we are the only ones flushing things, no extra race
1198 * protection is required.
1201 entry = journal->j_journal_list.next;
1203 if (entry == &journal->j_journal_list)
1205 other_jl = JOURNAL_LIST_ENTRY(entry);
1206 if (other_jl->j_trans_id < trans_id) {
1207 BUG_ON (other_jl->j_refcount <= 0);
1208 /* do not flush all */
1209 flush_journal_list(p_s_sb, other_jl, 0) ;
1211 /* other_jl is now deleted from the list */
1217 static void del_from_work_list(struct super_block *s,
1218 struct reiserfs_journal_list *jl) {
1219 struct reiserfs_journal *journal = SB_JOURNAL (s);
1220 if (!list_empty(&jl->j_working_list)) {
1221 list_del_init(&jl->j_working_list);
1222 journal->j_num_work_lists--;
1226 /* flush a journal list, both commit and real blocks
1228 ** always set flushall to 1, unless you are calling from inside
1229 ** flush_journal_list
1231 ** IMPORTANT. This can only be called while there are no journal writers,
1232 ** and the journal is locked. That means it can only be called from
1233 ** do_journal_end, or by journal_release
1235 static int flush_journal_list(struct super_block *s,
1236 struct reiserfs_journal_list *jl, int flushall) {
1237 struct reiserfs_journal_list *pjl ;
1238 struct reiserfs_journal_cnode *cn, *last ;
1242 struct buffer_head *saved_bh ;
1243 unsigned long j_len_saved = jl->j_len ;
1244 struct reiserfs_journal *journal = SB_JOURNAL (s);
1247 BUG_ON (j_len_saved <= 0);
1249 if (atomic_read(&journal->j_wcount) != 0) {
1250 reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d",
1251 atomic_read(&journal->j_wcount)) ;
1253 BUG_ON (jl->j_trans_id == 0);
1255 /* if flushall == 0, the lock is already held */
1257 down(&journal->j_flush_sem);
1258 } else if (!down_trylock(&journal->j_flush_sem)) {
1263 if (j_len_saved > journal->j_trans_max) {
1264 reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved, jl->j_trans_id);
1268 /* if all the work is already done, get out of here */
1269 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1270 atomic_read(&(jl->j_commit_left)) <= 0) {
1271 goto flush_older_and_return ;
1274 /* start by putting the commit list on disk. This will also flush
1275 ** the commit lists of any olders transactions
1277 flush_commit_list(s, jl, 1) ;
1279 if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted (journal))
1282 /* are we done now? */
1283 if (atomic_read(&(jl->j_nonzerolen)) <= 0 &&
1284 atomic_read(&(jl->j_commit_left)) <= 0) {
1285 goto flush_older_and_return ;
1288 /* loop through each cnode, see if we need to write it,
1289 ** or wait on a more recent transaction, or just ignore it
1291 if (atomic_read(&(journal->j_wcount)) != 0) {
1292 reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
1294 cn = jl->j_realblock ;
1299 /* blocknr of 0 is no longer in the hash, ignore it */
1300 if (cn->blocknr == 0) {
1304 /* This transaction failed commit. Don't write out to the disk */
1305 if (!(jl->j_state & LIST_DIRTY))
1308 pjl = find_newer_jl_for_cn(cn) ;
1309 /* the order is important here. We check pjl to make sure we
1310 ** don't clear BH_JDirty_wait if we aren't the one writing this
1313 if (!pjl && cn->bh) {
1316 /* we do this to make sure nobody releases the buffer while
1317 ** we are working with it
1321 if (buffer_journal_dirty(saved_bh)) {
1322 BUG_ON (!can_dirty (cn));
1325 } else if (can_dirty(cn)) {
1326 /* everything with !pjl && jwait should be writable */
1331 /* if someone has this block in a newer transaction, just make
1332 ** sure they are commited, and don't try writing it to disk
1335 if (atomic_read(&pjl->j_commit_left))
1336 flush_commit_list(s, pjl, 1) ;
1340 /* bh == NULL when the block got to disk on its own, OR,
1341 ** the block got freed in a future transaction
1343 if (saved_bh == NULL) {
1347 /* this should never happen. kupdate_one_transaction has this list
1348 ** locked while it works, so we should never see a buffer here that
1349 ** is not marked JDirty_wait
1351 if ((!was_jwait) && !buffer_locked(saved_bh)) {
1352 reiserfs_warning (s, "journal-813: BAD! buffer %llu %cdirty %cjwait, "
1353 "not in a newer tranasction",
1354 (unsigned long long)saved_bh->b_blocknr,
1355 was_dirty ? ' ' : '!', was_jwait ? ' ' : '!') ;
1358 /* we inc again because saved_bh gets decremented at free_cnode */
1360 set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
1361 lock_buffer(saved_bh);
1362 BUG_ON (cn->blocknr != saved_bh->b_blocknr);
1363 if (buffer_dirty(saved_bh))
1364 submit_logged_buffer(saved_bh) ;
1366 unlock_buffer(saved_bh);
1369 reiserfs_warning (s, "clm-2082: Unable to flush buffer %llu in %s",
1370 (unsigned long long)saved_bh->b_blocknr, __FUNCTION__);
1376 /* we incremented this to keep others from taking the buffer head away */
1378 if (atomic_read(&(saved_bh->b_count)) < 0) {
1379 reiserfs_warning (s, "journal-945: saved_bh->b_count < 0");
1384 cn = jl->j_realblock ;
1386 if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
1388 reiserfs_panic(s, "journal-1011: cn->bh is NULL\n") ;
1390 wait_on_buffer(cn->bh) ;
1392 reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
1394 if (unlikely (!buffer_uptodate(cn->bh))) {
1395 #ifdef CONFIG_REISERFS_CHECK
1396 reiserfs_warning(s, "journal-949: buffer write failed\n") ;
1400 /* note, we must clear the JDirty_wait bit after the up to date
1401 ** check, otherwise we race against our flushpage routine
1403 BUG_ON (!test_clear_buffer_journal_dirty (cn->bh));
1405 /* undo the inc from journal_mark_dirty */
1414 reiserfs_abort (s, -EIO, "Write error while pushing transaction to disk in %s", __FUNCTION__);
1415 flush_older_and_return:
1418 /* before we can update the journal header block, we _must_ flush all
1419 ** real blocks from all older transactions to disk. This is because
1420 ** once the header block is updated, this transaction will not be
1421 ** replayed after a crash
1424 flush_older_journal_lists(s, jl);
1427 err = journal->j_errno;
1428 /* before we can remove everything from the hash tables for this
1429 ** transaction, we must make sure it can never be replayed
1431 ** since we are only called from do_journal_end, we know for sure there
1432 ** are no allocations going on while we are flushing journal lists. So,
1433 ** we only need to update the journal header block for the last list
1436 if (!err && flushall) {
1437 err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
1439 reiserfs_abort (s, -EIO, "Write error while updating journal header in %s", __FUNCTION__);
1441 remove_all_from_journal_list(s, jl, 0) ;
1442 list_del_init(&jl->j_list);
1443 journal->j_num_lists--;
1444 del_from_work_list(s, jl);
1446 if (journal->j_last_flush_id != 0 &&
1447 (jl->j_trans_id - journal->j_last_flush_id) != 1) {
1448 reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
1449 journal->j_last_flush_id,
1452 journal->j_last_flush_id = jl->j_trans_id;
1454 /* not strictly required since we are freeing the list, but it should
1455 * help find code using dead lists later on
1458 atomic_set(&(jl->j_nonzerolen), 0) ;
1460 jl->j_realblock = NULL ;
1461 jl->j_commit_bh = NULL ;
1462 jl->j_trans_id = 0 ;
1464 put_journal_list(s, jl);
1466 up(&journal->j_flush_sem);
1470 static int write_one_transaction(struct super_block *s,
1471 struct reiserfs_journal_list *jl,
1472 struct buffer_chunk *chunk)
1474 struct reiserfs_journal_cnode *cn;
1477 jl->j_state |= LIST_TOUCHED;
1478 del_from_work_list(s, jl);
1479 if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) {
1483 cn = jl->j_realblock ;
1485 /* if the blocknr == 0, this has been cleared from the hash,
1488 if (cn->blocknr == 0) {
1491 if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) {
1492 struct buffer_head *tmp_bh;
1493 /* we can race against journal_mark_freed when we try
1494 * to lock_buffer(cn->bh), so we have to inc the buffer
1495 * count, and recheck things after locking
1499 lock_buffer(tmp_bh);
1500 if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
1501 if (!buffer_journal_dirty(tmp_bh) ||
1502 buffer_journal_prepared(tmp_bh))
1504 add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
1507 /* note, cn->bh might be null now */
1508 unlock_buffer(tmp_bh);
1519 /* used by flush_commit_list */
1520 static int dirty_one_transaction(struct super_block *s,
1521 struct reiserfs_journal_list *jl)
1523 struct reiserfs_journal_cnode *cn;
1524 struct reiserfs_journal_list *pjl;
1527 jl->j_state |= LIST_DIRTY;
1528 cn = jl->j_realblock ;
1530 /* look for a more recent transaction that logged this
1531 ** buffer. Only the most recent transaction with a buffer in
1532 ** it is allowed to send that buffer to disk
1534 pjl = find_newer_jl_for_cn(cn) ;
1535 if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh))
1537 BUG_ON (!can_dirty(cn));
1538 /* if the buffer is prepared, it will either be logged
1539 * or restored. If restored, we need to make sure
1540 * it actually gets marked dirty
1542 clear_buffer_journal_new (cn->bh);
1543 if (buffer_journal_prepared (cn->bh)) {
1544 set_buffer_journal_restore_dirty (cn->bh);
1546 set_buffer_journal_test (cn->bh);
1547 mark_buffer_dirty(cn->bh);
1555 static int kupdate_transactions(struct super_block *s,
1556 struct reiserfs_journal_list *jl,
1557 struct reiserfs_journal_list **next_jl,
1558 unsigned long *next_trans_id,
1563 int transactions_flushed = 0;
1564 unsigned long orig_trans_id = jl->j_trans_id;
1565 struct buffer_chunk chunk;
1566 struct list_head *entry;
1567 struct reiserfs_journal *journal = SB_JOURNAL (s);
1570 down(&journal->j_flush_sem);
1571 if (!journal_list_still_alive(s, orig_trans_id)) {
1575 /* we've got j_flush_sem held, nobody is going to delete any
1576 * of these lists out from underneath us
1578 while((num_trans && transactions_flushed < num_trans) ||
1579 (!num_trans && written < num_blocks)) {
1581 if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
1582 atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY))
1584 del_from_work_list(s, jl);
1587 ret = write_one_transaction(s, jl, &chunk);
1591 transactions_flushed++;
1593 entry = jl->j_list.next;
1596 if (entry == &journal->j_journal_list) {
1599 jl = JOURNAL_LIST_ENTRY(entry);
1601 /* don't bother with older transactions */
1602 if (jl->j_trans_id <= orig_trans_id)
1606 write_chunk(&chunk);
1610 up(&journal->j_flush_sem);
1614 /* for o_sync and fsync heavy applications, they tend to use
1615 ** all the journa list slots with tiny transactions. These
1616 ** trigger lots and lots of calls to update the header block, which
1617 ** adds seeks and slows things down.
1619 ** This function tries to clear out a large chunk of the journal lists
1620 ** at once, which makes everything faster since only the newest journal
1621 ** list updates the header block
1623 static int flush_used_journal_lists(struct super_block *s,
1624 struct reiserfs_journal_list *jl) {
1625 unsigned long len = 0;
1626 unsigned long cur_len;
1630 struct reiserfs_journal_list *tjl;
1631 struct reiserfs_journal_list *flush_jl;
1632 unsigned long trans_id;
1633 struct reiserfs_journal *journal = SB_JOURNAL (s);
1635 flush_jl = tjl = jl;
1637 /* in data logging mode, try harder to flush a lot of blocks */
1638 if (reiserfs_data_log(s))
1640 /* flush for 256 transactions or limit blocks, whichever comes first */
1641 for(i = 0 ; i < 256 && len < limit ; i++) {
1642 if (atomic_read(&tjl->j_commit_left) ||
1643 tjl->j_trans_id < jl->j_trans_id) {
1646 cur_len = atomic_read(&tjl->j_nonzerolen);
1648 tjl->j_state &= ~LIST_TOUCHED;
1652 if (tjl->j_list.next == &journal->j_journal_list)
1654 tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
1656 /* try to find a group of blocks we can flush across all the
1657 ** transactions, but only bother if we've actually spanned
1658 ** across multiple lists
1660 if (flush_jl != jl) {
1661 ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i);
1663 flush_journal_list(s, flush_jl, 1);
1668 ** removes any nodes in table with name block and dev as bh.
1669 ** only touchs the hnext and hprev pointers.
1671 void remove_journal_hash(struct super_block *sb,
1672 struct reiserfs_journal_cnode **table,
1673 struct reiserfs_journal_list *jl,
1674 unsigned long block, int remove_freed)
1676 struct reiserfs_journal_cnode *cur ;
1677 struct reiserfs_journal_cnode **head ;
1679 head= &(journal_hash(table, sb, block)) ;
1685 if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) &&
1686 (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) {
1688 cur->hnext->hprev = cur->hprev ;
1691 cur->hprev->hnext = cur->hnext ;
1693 *head = cur->hnext ;
1698 if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */
1699 atomic_dec(&(cur->jlist->j_nonzerolen)) ;
1707 static void free_journal_ram(struct super_block *p_s_sb) {
1708 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1709 reiserfs_kfree(journal->j_current_jl,
1710 sizeof(struct reiserfs_journal_list), p_s_sb);
1711 journal->j_num_lists--;
1713 vfree(journal->j_cnode_free_orig) ;
1714 free_list_bitmaps(p_s_sb, journal->j_list_bitmap) ;
1715 free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
1716 if (journal->j_header_bh) {
1717 brelse(journal->j_header_bh) ;
1719 /* j_header_bh is on the journal dev, make sure not to release the journal
1720 * dev until we brelse j_header_bh
1722 release_journal_dev(p_s_sb, journal);
1727 ** call on unmount. Only set error to 1 if you haven't made your way out
1728 ** of read_super() yet. Any other caller must keep error at 0.
1730 static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
1731 struct reiserfs_transaction_handle myth ;
1733 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
1735 /* we only want to flush out transactions if we were called with error == 0
1737 if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
1738 /* end the current trans */
1739 BUG_ON (!th->t_trans_id);
1740 do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
1742 /* make sure something gets logged to force our way into the flush code */
1743 if (!journal_join(&myth, p_s_sb, 1)) {
1744 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
1745 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
1746 do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
1751 /* this also catches errors during the do_journal_end above */
1752 if (!error && reiserfs_is_journal_aborted(journal)) {
1753 memset(&myth, 0, sizeof(myth));
1754 if (!journal_join_abort(&myth, p_s_sb, 1)) {
1755 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
1756 journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
1757 do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL) ;
1761 reiserfs_mounted_fs_count-- ;
1762 /* wait for all commits to finish */
1763 cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
1764 flush_workqueue(commit_wq);
1765 if (!reiserfs_mounted_fs_count) {
1766 destroy_workqueue(commit_wq);
1770 free_journal_ram(p_s_sb) ;
1776 ** call on unmount. flush all journal trans, release all alloc'd ram
1778 int journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1779 return do_journal_release(th, p_s_sb, 0) ;
1782 ** only call from an error condition inside reiserfs_read_super!
1784 int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb) {
1785 return do_journal_release(th, p_s_sb, 1) ;
1788 /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
1789 static int journal_compare_desc_commit(struct super_block *p_s_sb, struct reiserfs_journal_desc *desc,
1790 struct reiserfs_journal_commit *commit) {
1791 if (get_commit_trans_id (commit) != get_desc_trans_id (desc) ||
1792 get_commit_trans_len (commit) != get_desc_trans_len (desc) ||
1793 get_commit_trans_len (commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
1794 get_commit_trans_len (commit) <= 0
1800 /* returns 0 if it did not find a description block
1801 ** returns -1 if it found a corrupt commit block
1802 ** returns 1 if both desc and commit were valid
1804 static int journal_transaction_is_valid(struct super_block *p_s_sb, struct buffer_head *d_bh, unsigned long *oldest_invalid_trans_id, unsigned long *newest_mount_id) {
1805 struct reiserfs_journal_desc *desc ;
1806 struct reiserfs_journal_commit *commit ;
1807 struct buffer_head *c_bh ;
1808 unsigned long offset ;
1813 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1814 if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8)) {
1815 if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
1816 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-986: transaction "
1817 "is valid returning because trans_id %d is greater than "
1818 "oldest_invalid %lu", get_desc_trans_id(desc),
1819 *oldest_invalid_trans_id);
1822 if (newest_mount_id && *newest_mount_id > get_desc_mount_id (desc)) {
1823 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1087: transaction "
1824 "is valid returning because mount_id %d is less than "
1825 "newest_mount_id %lu", get_desc_mount_id (desc),
1829 if ( get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max ) {
1830 reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc));
1833 offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1835 /* ok, we have a journal description block, lets see if the transaction was valid */
1836 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1837 ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1840 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1841 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1842 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
1843 "journal_transaction_is_valid, commit offset %ld had bad "
1844 "time %d or length %d",
1845 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1846 get_commit_trans_id (commit),
1847 get_commit_trans_len(commit));
1849 if (oldest_invalid_trans_id) {
1850 *oldest_invalid_trans_id = get_desc_trans_id(desc) ;
1851 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1004: "
1852 "transaction_is_valid setting oldest invalid trans_id "
1853 "to %d", get_desc_trans_id(desc)) ;
1858 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1006: found valid "
1859 "transaction start offset %llu, len %d id %d",
1860 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1861 get_desc_trans_len(desc), get_desc_trans_id(desc)) ;
1868 static void brelse_array(struct buffer_head **heads, int num) {
1870 for (i = 0 ; i < num ; i++) {
1876 ** given the start, and values for the oldest acceptable transactions,
1877 ** this either reads in a replays a transaction, or returns because the transaction
1878 ** is invalid, or too old.
1880 static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
1881 unsigned long oldest_trans_id, unsigned long newest_mount_id) {
1882 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
1883 struct reiserfs_journal_desc *desc ;
1884 struct reiserfs_journal_commit *commit ;
1885 unsigned long trans_id = 0 ;
1886 struct buffer_head *c_bh ;
1887 struct buffer_head *d_bh ;
1888 struct buffer_head **log_blocks = NULL ;
1889 struct buffer_head **real_blocks = NULL ;
1890 unsigned long trans_offset ;
1894 d_bh = journal_bread(p_s_sb, cur_dblock) ;
1897 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
1898 trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
1899 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
1900 "journal_read_transaction, offset %llu, len %d mount_id %d",
1901 d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1902 get_desc_trans_len(desc), get_desc_mount_id(desc)) ;
1903 if (get_desc_trans_id(desc) < oldest_trans_id) {
1904 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
1905 "journal_read_trans skipping because %lu is too old",
1906 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
1910 if (get_desc_mount_id(desc) != newest_mount_id) {
1911 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
1912 "journal_read_trans skipping because %d is != "
1913 "newest_mount_id %lu", get_desc_mount_id(desc),
1918 c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
1919 ((trans_offset + get_desc_trans_len(desc) + 1) %
1920 SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
1925 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
1926 if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
1927 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal_read_transaction, "
1928 "commit offset %llu had bad time %d or length %d",
1929 c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
1930 get_commit_trans_id(commit), get_commit_trans_len(commit));
1935 trans_id = get_desc_trans_id(desc) ;
1936 /* now we know we've got a good transaction, and it was inside the valid time ranges */
1937 log_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1938 real_blocks = reiserfs_kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS, p_s_sb) ;
1939 if (!log_blocks || !real_blocks) {
1942 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1943 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1944 reiserfs_warning(p_s_sb, "journal-1169: kmalloc failed, unable to mount FS") ;
1947 /* get all the buffer heads */
1948 trans_half = journal_trans_half (p_s_sb->s_blocksize) ;
1949 for(i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1950 log_blocks[i] = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
1951 if (i < trans_half) {
1952 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(desc->j_realblock[i])) ;
1954 real_blocks[i] = sb_getblk(p_s_sb, le32_to_cpu(commit->j_realblock[i - trans_half])) ;
1956 if ( real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb) ) {
1957 reiserfs_warning(p_s_sb, "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
1960 /* make sure we don't try to replay onto log or reserved area */
1961 if (is_block_in_log_or_reserved_area(p_s_sb, real_blocks[i]->b_blocknr)) {
1962 reiserfs_warning(p_s_sb, "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block") ;
1964 brelse_array(log_blocks, i) ;
1965 brelse_array(real_blocks, i) ;
1968 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1969 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1973 /* read in the log blocks, memcpy to the corresponding real block */
1974 ll_rw_block(READ, get_desc_trans_len(desc), log_blocks) ;
1975 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1976 wait_on_buffer(log_blocks[i]) ;
1977 if (!buffer_uptodate(log_blocks[i])) {
1978 reiserfs_warning(p_s_sb, "journal-1212: REPLAY FAILURE fsck required! buffer write failed") ;
1979 brelse_array(log_blocks + i, get_desc_trans_len(desc) - i) ;
1980 brelse_array(real_blocks, get_desc_trans_len(desc)) ;
1983 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1984 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
1987 memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
1988 set_buffer_uptodate(real_blocks[i]) ;
1989 brelse(log_blocks[i]) ;
1991 /* flush out the real blocks */
1992 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1993 set_buffer_dirty(real_blocks[i]) ;
1994 ll_rw_block(WRITE, 1, real_blocks + i) ;
1996 for (i = 0 ; i < get_desc_trans_len(desc) ; i++) {
1997 wait_on_buffer(real_blocks[i]) ;
1998 if (!buffer_uptodate(real_blocks[i])) {
1999 reiserfs_warning(p_s_sb, "journal-1226: REPLAY FAILURE, fsck required! buffer write failed") ;
2000 brelse_array(real_blocks + i, get_desc_trans_len(desc) - i) ;
2003 reiserfs_kfree(log_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
2004 reiserfs_kfree(real_blocks, get_desc_trans_len(desc) * sizeof(struct buffer_head *), p_s_sb) ;
2007 brelse(real_blocks[i]) ;
2009 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
2010 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal "
2011 "start to offset %ld",
2012 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
2014 /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
2015 journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
2016 journal->j_last_flush_trans_id = trans_id ;
2017 journal->j_trans_id = trans_id + 1;
2020 reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
2021 reiserfs_kfree(real_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
2025 /* This function reads blocks starting from block and to max_block of bufsize
2026 size (but no more than BUFNR blocks at a time). This proved to improve
2027 mounting speed on self-rebuilding raid5 arrays at least.
2028 Right now it is only used from journal code. But later we might use it
2030 Note: Do not use journal_getblk/sb_getblk functions here! */
2031 struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
2032 unsigned int max_block)
2034 struct buffer_head * bhlist[BUFNR];
2035 unsigned int blocks = BUFNR;
2036 struct buffer_head * bh;
2039 bh = __getblk (dev, block, bufsize );
2040 if (buffer_uptodate (bh))
2043 if (block + BUFNR > max_block) {
2044 blocks = max_block - block;
2048 for (i = 1; i < blocks; i++) {
2049 bh = __getblk (dev, block + i, bufsize);
2050 if (buffer_uptodate (bh)) {
2054 else bhlist[j++] = bh;
2056 ll_rw_block (READ, j, bhlist);
2057 for(i = 1; i < j; i++)
2060 wait_on_buffer (bh);
2061 if (buffer_uptodate (bh))
2068 ** read and replay the log
2069 ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
2070 ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
2072 ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
2074 ** On exit, it sets things up so the first transaction will work correctly.
2076 static int journal_read(struct super_block *p_s_sb) {
2077 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
2078 struct reiserfs_journal_desc *desc ;
2079 unsigned long oldest_trans_id = 0;
2080 unsigned long oldest_invalid_trans_id = 0 ;
2082 unsigned long oldest_start = 0;
2083 unsigned long cur_dblock = 0 ;
2084 unsigned long newest_mount_id = 9 ;
2085 struct buffer_head *d_bh ;
2086 struct reiserfs_journal_header *jh ;
2087 int valid_journal_header = 0 ;
2088 int replay_count = 0 ;
2089 int continue_replay = 1 ;
2091 char b[BDEVNAME_SIZE];
2093 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
2094 reiserfs_info (p_s_sb, "checking transaction log (%s)\n",
2095 bdevname(journal->j_dev_bd, b));
2096 start = get_seconds();
2098 /* step 1, read in the journal header block. Check the transaction it says
2099 ** is the first unflushed, and if that transaction is not valid,
2102 journal->j_header_bh = journal_bread(p_s_sb,
2103 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2104 SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2105 if (!journal->j_header_bh) {
2108 jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
2109 if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
2110 le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
2111 le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
2112 oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
2113 le32_to_cpu(jh->j_first_unflushed_offset) ;
2114 oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2115 newest_mount_id = le32_to_cpu(jh->j_mount_id);
2116 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1153: found in "
2117 "header: first_unflushed_offset %d, last_flushed_trans_id "
2118 "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
2119 le32_to_cpu(jh->j_last_flush_trans_id)) ;
2120 valid_journal_header = 1 ;
2122 /* now, we try to read the first unflushed offset. If it is not valid,
2123 ** there is nothing more we can do, and it makes no sense to read
2124 ** through the whole log.
2126 d_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + le32_to_cpu(jh->j_first_unflushed_offset)) ;
2127 ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL) ;
2129 continue_replay = 0 ;
2132 goto start_log_replay;
2135 if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
2136 reiserfs_warning (p_s_sb,
2137 "clm-2076: device is readonly, unable to replay log") ;
2141 /* ok, there are transactions that need to be replayed. start with the first log block, find
2142 ** all the valid transactions, and pick out the oldest.
2144 while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
2145 /* Note that it is required for blocksize of primary fs device and journal
2146 device to be the same */
2147 d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
2148 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
2149 ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
2151 desc = (struct reiserfs_journal_desc *)d_bh->b_data ;
2152 if (oldest_start == 0) { /* init all oldest_ values */
2153 oldest_trans_id = get_desc_trans_id(desc) ;
2154 oldest_start = d_bh->b_blocknr ;
2155 newest_mount_id = get_desc_mount_id(desc) ;
2156 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1179: Setting "
2157 "oldest_start to offset %llu, trans_id %lu",
2158 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2160 } else if (oldest_trans_id > get_desc_trans_id(desc)) {
2161 /* one we just read was older */
2162 oldest_trans_id = get_desc_trans_id(desc) ;
2163 oldest_start = d_bh->b_blocknr ;
2164 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting "
2165 "oldest_start to offset %lu, trans_id %lu",
2166 oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2169 if (newest_mount_id < get_desc_mount_id(desc)) {
2170 newest_mount_id = get_desc_mount_id(desc) ;
2171 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2172 "newest_mount_id to %d", get_desc_mount_id(desc));
2174 cur_dblock += get_desc_trans_len(desc) + 2 ;
2182 cur_dblock = oldest_start ;
2183 if (oldest_trans_id) {
2184 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay "
2185 "from offset %llu, trans_id %lu",
2186 cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2191 while(continue_replay && oldest_trans_id > 0) {
2192 ret = journal_read_transaction(p_s_sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id) ;
2195 } else if (ret != 0) {
2198 cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start ;
2200 if (cur_dblock == oldest_start)
2204 if (oldest_trans_id == 0) {
2205 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1225: No valid "
2206 "transactions found") ;
2208 /* j_start does not get set correctly if we don't replay any transactions.
2209 ** if we had a valid journal_header, set j_start to the first unflushed transaction value,
2210 ** copy the trans_id from the header
2212 if (valid_journal_header && replay_count == 0) {
2213 journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
2214 journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
2215 journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
2216 journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
2218 journal->j_mount_id = newest_mount_id + 1 ;
2220 reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
2221 "newest_mount_id to %lu", journal->j_mount_id) ;
2222 journal->j_first_unflushed_offset = journal->j_start ;
2223 if (replay_count > 0) {
2224 reiserfs_info (p_s_sb, "replayed %d transactions in %lu seconds\n",
2225 replay_count, get_seconds() - start) ;
2227 if (!bdev_read_only(p_s_sb->s_bdev) &&
2228 _update_journal_header_block(p_s_sb, journal->j_start,
2229 journal->j_last_flush_trans_id))
2231 /* replay failed, caller must call free_journal_ram and abort
2239 static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s)
2241 struct reiserfs_journal_list *jl;
2243 jl = reiserfs_kmalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS, s);
2248 memset(jl, 0, sizeof(*jl));
2249 INIT_LIST_HEAD(&jl->j_list);
2250 INIT_LIST_HEAD(&jl->j_working_list);
2251 INIT_LIST_HEAD(&jl->j_tail_bh_list);
2252 INIT_LIST_HEAD(&jl->j_bh_list);
2253 sema_init(&jl->j_commit_lock, 1);
2254 SB_JOURNAL(s)->j_num_lists++;
2255 get_journal_list(jl);
2259 static void journal_list_init(struct super_block *p_s_sb) {
2260 SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
2263 static int release_journal_dev( struct super_block *super,
2264 struct reiserfs_journal *journal )
2270 if( journal -> j_dev_file != NULL ) {
2271 result = filp_close( journal -> j_dev_file, NULL );
2272 journal -> j_dev_file = NULL;
2273 journal -> j_dev_bd = NULL;
2274 } else if( journal -> j_dev_bd != NULL ) {
2275 result = blkdev_put( journal -> j_dev_bd );
2276 journal -> j_dev_bd = NULL;
2280 reiserfs_warning(super, "sh-457: release_journal_dev: Cannot release journal device: %i", result );
2285 static int journal_init_dev( struct super_block *super,
2286 struct reiserfs_journal *journal,
2287 const char *jdev_name )
2291 int blkdev_mode = FMODE_READ | FMODE_WRITE;
2292 char b[BDEVNAME_SIZE];
2296 journal -> j_dev_bd = NULL;
2297 journal -> j_dev_file = NULL;
2298 jdev = SB_ONDISK_JOURNAL_DEVICE( super ) ?
2299 new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
2301 if (bdev_read_only(super->s_bdev))
2302 blkdev_mode = FMODE_READ;
2304 /* there is no "jdev" option and journal is on separate device */
2305 if( ( !jdev_name || !jdev_name[ 0 ] ) ) {
2306 journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode);
2307 if (IS_ERR(journal->j_dev_bd)) {
2308 result = PTR_ERR(journal->j_dev_bd);
2309 journal->j_dev_bd = NULL;
2310 reiserfs_warning (super, "sh-458: journal_init_dev: "
2311 "cannot init journal device '%s': %i",
2312 __bdevname(jdev, b), result );
2314 } else if (jdev != super->s_dev)
2315 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2319 journal -> j_dev_file = filp_open( jdev_name, 0, 0 );
2320 if( !IS_ERR( journal -> j_dev_file ) ) {
2321 struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
2322 if( !S_ISBLK( jdev_inode -> i_mode ) ) {
2323 reiserfs_warning (super, "journal_init_dev: '%s' is "
2324 "not a block device", jdev_name );
2328 journal->j_dev_bd = I_BDEV(jdev_inode);
2329 set_blocksize(journal->j_dev_bd, super->s_blocksize);
2332 result = PTR_ERR( journal -> j_dev_file );
2333 journal -> j_dev_file = NULL;
2334 reiserfs_warning (super,
2335 "journal_init_dev: Cannot open '%s': %i",
2336 jdev_name, result );
2339 release_journal_dev( super, journal );
2341 reiserfs_info(super, "journal_init_dev: journal device: %s\n",
2342 bdevname(journal->j_dev_bd, b));
2347 ** must be called once on fs mount. calls journal_read for you
2349 int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_format, unsigned int commit_max_age) {
2350 int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2 ;
2351 struct buffer_head *bhjh;
2352 struct reiserfs_super_block * rs;
2353 struct reiserfs_journal_header *jh;
2354 struct reiserfs_journal *journal;
2355 struct reiserfs_journal_list *jl;
2356 char b[BDEVNAME_SIZE];
2358 journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof (struct reiserfs_journal)) ;
2360 reiserfs_warning (p_s_sb, "journal-1256: unable to get memory for journal structure") ;
2363 memset(journal, 0, sizeof(struct reiserfs_journal)) ;
2364 INIT_LIST_HEAD(&journal->j_bitmap_nodes) ;
2365 INIT_LIST_HEAD (&journal->j_prealloc_list);
2366 INIT_LIST_HEAD(&journal->j_working_list);
2367 INIT_LIST_HEAD(&journal->j_journal_list);
2368 journal->j_persistent_trans = 0;
2369 if (reiserfs_allocate_list_bitmaps(p_s_sb,
2370 journal->j_list_bitmap,
2371 SB_BMAP_NR(p_s_sb)))
2372 goto free_and_return ;
2373 allocate_bitmap_nodes(p_s_sb) ;
2375 /* reserved for journal area support */
2376 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
2377 REISERFS_OLD_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize +
2378 SB_BMAP_NR(p_s_sb) + 1 :
2379 REISERFS_DISK_OFFSET_IN_BYTES / p_s_sb->s_blocksize + 2);
2381 /* Sanity check to see is the standard journal fitting withing first bitmap
2382 (actual for small blocksizes) */
2383 if ( !SB_ONDISK_JOURNAL_DEVICE( p_s_sb ) &&
2384 (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8) ) {
2385 reiserfs_warning (p_s_sb, "journal-1393: journal does not fit for area "
2386 "addressed by first of bitmap blocks. It starts at "
2387 "%u and its size is %u. Block size %ld",
2388 SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
2389 SB_ONDISK_JOURNAL_SIZE(p_s_sb), p_s_sb->s_blocksize);
2390 goto free_and_return;
2393 if( journal_init_dev( p_s_sb, journal, j_dev_name ) != 0 ) {
2394 reiserfs_warning (p_s_sb, "sh-462: unable to initialize jornal device");
2395 goto free_and_return;
2398 rs = SB_DISK_SUPER_BLOCK(p_s_sb);
2400 /* read journal header */
2401 bhjh = journal_bread(p_s_sb,
2402 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
2404 reiserfs_warning (p_s_sb, "sh-459: unable to read journal header");
2405 goto free_and_return;
2407 jh = (struct reiserfs_journal_header *)(bhjh->b_data);
2409 /* make sure that journal matches to the super block */
2410 if (is_reiserfs_jr(rs) && (jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs))) {
2411 reiserfs_warning (p_s_sb, "sh-460: journal header magic %x "
2412 "(device %s) does not match to magic found in super "
2414 jh->jh_journal.jp_journal_magic,
2415 bdevname( journal->j_dev_bd, b),
2416 sb_jp_journal_magic(rs));
2418 goto free_and_return;
2421 journal->j_trans_max = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
2422 journal->j_max_batch = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
2423 journal->j_max_commit_age = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
2424 journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
2426 if (journal->j_trans_max) {
2427 /* make sure these parameters are available, assign it if they are not */
2428 __u32 initial = journal->j_trans_max;
2431 if (p_s_sb->s_blocksize < 4096)
2432 ratio = 4096 / p_s_sb->s_blocksize;
2434 if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/journal->j_trans_max < JOURNAL_MIN_RATIO)
2435 journal->j_trans_max = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
2436 if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
2437 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT / ratio;
2438 if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
2439 journal->j_trans_max = JOURNAL_TRANS_MIN_DEFAULT / ratio;
2441 if (journal->j_trans_max != initial)
2442 reiserfs_warning (p_s_sb, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
2443 initial, journal->j_trans_max);
2445 journal->j_max_batch = journal->j_trans_max*
2446 JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
2449 if (!journal->j_trans_max) {
2450 /*we have the file system was created by old version of mkreiserfs
2451 so this field contains zero value */
2452 journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT ;
2453 journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT ;
2454 journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE ;
2456 /* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
2457 trans max size is decreased proportionally */
2458 if (p_s_sb->s_blocksize < 4096) {
2459 journal->j_trans_max /= (4096 / p_s_sb->s_blocksize) ;
2460 journal->j_max_batch = (journal->j_trans_max) * 9 / 10 ;
2464 journal->j_default_max_commit_age = journal->j_max_commit_age;
2466 if (commit_max_age != 0) {
2467 journal->j_max_commit_age = commit_max_age;
2468 journal->j_max_trans_age = commit_max_age;
2471 reiserfs_info (p_s_sb, "journal params: device %s, size %u, "
2472 "journal first block %u, max trans len %u, max batch %u, "
2473 "max commit age %u, max trans age %u\n",
2474 bdevname( journal->j_dev_bd, b),
2475 SB_ONDISK_JOURNAL_SIZE(p_s_sb),
2476 SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
2477 journal->j_trans_max,
2478 journal->j_max_batch,
2479 journal->j_max_commit_age,
2480 journal->j_max_trans_age);
2484 journal->j_list_bitmap_index = 0 ;
2485 journal_list_init(p_s_sb) ;
2487 memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
2489 INIT_LIST_HEAD(&journal->j_dirty_buffers) ;
2490 spin_lock_init(&journal->j_dirty_buffers_lock) ;
2492 journal->j_start = 0 ;
2493 journal->j_len = 0 ;
2494 journal->j_len_alloc = 0 ;
2495 atomic_set(&(journal->j_wcount), 0) ;
2496 atomic_set(&(journal->j_async_throttle), 0) ;
2497 journal->j_bcount = 0 ;
2498 journal->j_trans_start_time = 0 ;
2499 journal->j_last = NULL ;
2500 journal->j_first = NULL ;
2501 init_waitqueue_head(&(journal->j_join_wait)) ;
2502 sema_init(&journal->j_lock, 1);
2503 sema_init(&journal->j_flush_sem, 1);
2505 journal->j_trans_id = 10 ;
2506 journal->j_mount_id = 10 ;
2507 journal->j_state = 0 ;
2508 atomic_set(&(journal->j_jlock), 0) ;
2509 journal->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
2510 journal->j_cnode_free_orig = journal->j_cnode_free_list ;
2511 journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0 ;
2512 journal->j_cnode_used = 0 ;
2513 journal->j_must_wait = 0 ;
2515 init_journal_hash(p_s_sb) ;
2516 jl = journal->j_current_jl;
2517 jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
2518 if (!jl->j_list_bitmap) {
2519 reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0") ;
2520 goto free_and_return;
2522 if (journal_read(p_s_sb) < 0) {
2523 reiserfs_warning(p_s_sb, "Replay Failure, unable to mount") ;
2524 goto free_and_return;
2527 reiserfs_mounted_fs_count++ ;
2528 if (reiserfs_mounted_fs_count <= 1)
2529 commit_wq = create_workqueue("reiserfs");
2531 INIT_WORK(&journal->j_work, flush_async_commits, p_s_sb);
2534 free_journal_ram(p_s_sb);
2539 ** test for a polite end of the current transaction. Used by file_write, and should
2540 ** be used by delete to make sure they don't write more than can fit inside a single
2543 int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
2544 struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
2545 time_t now = get_seconds() ;
2546 /* cannot restart while nested */
2547 BUG_ON (!th->t_trans_id);
2548 if (th->t_refcount > 1)
2550 if ( journal->j_must_wait > 0 ||
2551 (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
2552 atomic_read(&(journal->j_jlock)) ||
2553 (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
2554 journal->j_cnode_free < (journal->j_trans_max * 3)) {
2560 /* this must be called inside a transaction, and requires the
2561 ** kernel_lock to be held
2563 void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
2564 struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
2565 BUG_ON (!th->t_trans_id);
2566 journal->j_must_wait = 1 ;
2567 set_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
2571 /* this must be called without a transaction started, and does not
2574 void reiserfs_allow_writes(struct super_block *s) {
2575 struct reiserfs_journal *journal = SB_JOURNAL (s);
2576 clear_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
2577 wake_up(&journal->j_join_wait) ;
2580 /* this must be called without a transaction started, and does not
2583 void reiserfs_wait_on_write_block(struct super_block *s) {
2584 struct reiserfs_journal *journal = SB_JOURNAL (s);
2585 wait_event(journal->j_join_wait,
2586 !test_bit(J_WRITERS_BLOCKED, &journal->j_state)) ;
2589 static void queue_log_writer(struct super_block *s) {
2591 struct reiserfs_journal *journal = SB_JOURNAL (s);
2592 set_bit(J_WRITERS_QUEUED, &journal->j_state);
2595 * we don't want to use wait_event here because
2596 * we only want to wait once.
2598 init_waitqueue_entry(&wait, current);
2599 add_wait_queue(&journal->j_join_wait, &wait);
2600 set_current_state(TASK_UNINTERRUPTIBLE);
2601 if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
2603 current->state = TASK_RUNNING;
2604 remove_wait_queue(&journal->j_join_wait, &wait);
2607 static void wake_queued_writers(struct super_block *s) {
2608 struct reiserfs_journal *journal = SB_JOURNAL (s);
2609 if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
2610 wake_up(&journal->j_join_wait);
2613 static void let_transaction_grow(struct super_block *sb,
2614 unsigned long trans_id)
2616 struct reiserfs_journal *journal = SB_JOURNAL (sb);
2617 unsigned long bcount = journal->j_bcount;
2619 set_current_state(TASK_UNINTERRUPTIBLE);
2620 schedule_timeout(1);
2621 journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
2622 while ((atomic_read(&journal->j_wcount) > 0 ||
2623 atomic_read(&journal->j_jlock)) &&
2624 journal->j_trans_id == trans_id) {
2625 queue_log_writer(sb);
2627 if (journal->j_trans_id != trans_id)
2629 if (bcount == journal->j_bcount)
2631 bcount = journal->j_bcount;
2635 /* join == true if you must join an existing transaction.
2636 ** join == false if you can deal with waiting for others to finish
2638 ** this will block until the transaction is joinable. send the number of blocks you
2639 ** expect to use in nblocks.
2641 static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,unsigned long nblocks,int join) {
2642 time_t now = get_seconds() ;
2644 struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
2645 struct reiserfs_transaction_handle myth;
2646 int sched_count = 0;
2649 reiserfs_check_lock_depth(p_s_sb, "journal_begin") ;
2651 PROC_INFO_INC( p_s_sb, journal.journal_being );
2652 /* set here for journal_join */
2654 th->t_super = p_s_sb ;
2657 lock_journal(p_s_sb) ;
2658 if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted (journal)) {
2659 unlock_journal (p_s_sb);
2660 retval = journal->j_errno;
2663 journal->j_bcount++;
2665 if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
2666 unlock_journal(p_s_sb) ;
2667 reiserfs_wait_on_write_block(p_s_sb) ;
2668 PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
2671 now = get_seconds();
2673 /* if there is no room in the journal OR
2674 ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
2675 ** we don't sleep if there aren't other writers
2678 if ( (!join && journal->j_must_wait > 0) ||
2679 ( !join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) ||
2680 (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 &&
2681 (now - journal->j_trans_start_time) > journal->j_max_trans_age) ||
2682 (!join && atomic_read(&journal->j_jlock)) ||
2683 (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
2685 old_trans_id = journal->j_trans_id;
2686 unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
2688 if (!join && (journal->j_len_alloc + nblocks + 2) >=
2689 journal->j_max_batch &&
2690 ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75))
2692 if (atomic_read(&journal->j_wcount) > 10) {
2694 queue_log_writer(p_s_sb);
2698 /* don't mess with joining the transaction if all we have to do is
2699 * wait for someone else to do a commit
2701 if (atomic_read(&journal->j_jlock)) {
2702 while (journal->j_trans_id == old_trans_id &&
2703 atomic_read(&journal->j_jlock)) {
2704 queue_log_writer(p_s_sb);
2708 retval = journal_join(&myth, p_s_sb, 1) ;
2712 /* someone might have ended the transaction while we joined */
2713 if (old_trans_id != journal->j_trans_id) {
2714 retval = do_journal_end(&myth, p_s_sb, 1, 0) ;
2716 retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
2722 PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
2725 /* we are the first writer, set trans_id */
2726 if (journal->j_trans_start_time == 0) {
2727 journal->j_trans_start_time = get_seconds();
2729 atomic_inc(&(journal->j_wcount)) ;
2730 journal->j_len_alloc += nblocks ;
2731 th->t_blocks_logged = 0 ;
2732 th->t_blocks_allocated = nblocks ;
2733 th->t_trans_id = journal->j_trans_id ;
2734 unlock_journal(p_s_sb) ;
2735 INIT_LIST_HEAD (&th->t_list);
2739 memset (th, 0, sizeof (*th));
2740 /* Re-set th->t_super, so we can properly keep track of how many
2741 * persistent transactions there are. We need to do this so if this
2742 * call is part of a failed restart_transaction, we can free it later */
2743 th->t_super = p_s_sb;
2747 struct reiserfs_transaction_handle *
2748 reiserfs_persistent_transaction(struct super_block *s, int nblocks) {
2750 struct reiserfs_transaction_handle *th ;
2752 /* if we're nesting into an existing transaction. It will be
2753 ** persistent on its own
2755 if (reiserfs_transaction_running(s)) {
2756 th = current->journal_info ;
2758 if (th->t_refcount < 2) {
2763 th = reiserfs_kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS, s) ;
2766 ret = journal_begin(th, s, nblocks) ;
2768 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
2772 SB_JOURNAL(s)->j_persistent_trans++;
2777 reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) {
2778 struct super_block *s = th->t_super;
2781 ret = journal_end(th, th->t_super, th->t_blocks_allocated);
2784 if (th->t_refcount == 0) {
2785 SB_JOURNAL(s)->j_persistent_trans--;
2786 reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
2791 static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2792 struct reiserfs_transaction_handle *cur_th = current->journal_info;
2794 /* this keeps do_journal_end from NULLing out the current->journal_info
2797 th->t_handle_save = cur_th ;
2798 if (cur_th && cur_th->t_refcount > 1) {
2801 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN) ;
2804 int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2805 struct reiserfs_transaction_handle *cur_th = current->journal_info;
2807 /* this keeps do_journal_end from NULLing out the current->journal_info
2810 th->t_handle_save = cur_th ;
2811 if (cur_th && cur_th->t_refcount > 1) {
2814 return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT) ;
2817 int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
2818 struct reiserfs_transaction_handle *cur_th = current->journal_info ;
2821 th->t_handle_save = NULL ;
2823 /* we are nesting into the current transaction */
2824 if (cur_th->t_super == p_s_sb) {
2825 BUG_ON (!cur_th->t_refcount);
2826 cur_th->t_refcount++ ;
2827 memcpy(th, cur_th, sizeof(*th));
2828 if (th->t_refcount <= 1)
2829 reiserfs_warning (p_s_sb, "BAD: refcount <= 1, but journal_info != 0");
2832 /* we've ended up with a handle from a different filesystem.
2833 ** save it and restore on journal_end. This should never
2836 reiserfs_warning(p_s_sb, "clm-2100: nesting info a different FS") ;
2837 th->t_handle_save = current->journal_info ;
2838 current->journal_info = th;
2841 current->journal_info = th;
2843 ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG) ;
2844 if (current->journal_info != th)
2847 /* I guess this boils down to being the reciprocal of clm-2100 above.
2848 * If do_journal_begin_r fails, we need to put it back, since journal_end
2849 * won't be called to do it. */
2851 current->journal_info = th->t_handle_save;
2853 BUG_ON (!th->t_refcount);
2859 ** puts bh into the current transaction. If it was already there, reorders removes the
2860 ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order).
2862 ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
2863 ** transaction is committed.
2865 ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
2867 int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
2868 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
2869 struct reiserfs_journal_cnode *cn = NULL;
2870 int count_already_incd = 0 ;
2872 BUG_ON (!th->t_trans_id);
2874 PROC_INFO_INC( p_s_sb, journal.mark_dirty );
2875 if (th->t_trans_id != journal->j_trans_id) {
2876 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
2877 th->t_trans_id, journal->j_trans_id);
2882 prepared = test_clear_buffer_journal_prepared (bh);
2883 clear_buffer_journal_restore_dirty (bh);
2884 /* already in this transaction, we are done */
2885 if (buffer_journaled(bh)) {
2886 PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
2890 /* this must be turned into a panic instead of a warning. We can't allow
2891 ** a dirty or journal_dirty or locked buffer to be logged, as some changes
2892 ** could get to disk too early. NOT GOOD.
2894 if (!prepared || buffer_dirty(bh)) {
2895 reiserfs_warning (p_s_sb, "journal-1777: buffer %llu bad state "
2896 "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
2897 (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!',
2898 buffer_locked(bh) ? ' ' : '!',
2899 buffer_dirty(bh) ? ' ' : '!',
2900 buffer_journal_dirty(bh) ? ' ' : '!') ;
2903 if (atomic_read(&(journal->j_wcount)) <= 0) {
2904 reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal->j_wcount))) ;
2907 /* this error means I've screwed up, and we've overflowed the transaction.
2908 ** Nothing can be done here, except make the FS readonly or panic.
2910 if (journal->j_len >= journal->j_trans_max) {
2911 reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal->j_len) ;
2914 if (buffer_journal_dirty(bh)) {
2915 count_already_incd = 1 ;
2916 PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
2917 clear_buffer_journal_dirty (bh);
2920 if (journal->j_len > journal->j_len_alloc) {
2921 journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT ;
2924 set_buffer_journaled (bh);
2926 /* now put this guy on the end */
2928 cn = get_cnode(p_s_sb) ;
2930 reiserfs_panic(p_s_sb, "get_cnode failed!\n");
2933 if (th->t_blocks_logged == th->t_blocks_allocated) {
2934 th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
2935 journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
2937 th->t_blocks_logged++ ;
2941 cn->blocknr = bh->b_blocknr ;
2944 insert_journal_hash(journal->j_hash_table, cn) ;
2945 if (!count_already_incd) {
2950 cn->prev = journal->j_last ;
2952 if (journal->j_last) {
2953 journal->j_last->next = cn ;
2954 journal->j_last = cn ;
2956 journal->j_first = cn ;
2957 journal->j_last = cn ;
2962 int journal_end(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
2963 if (!current->journal_info && th->t_refcount > 1)
2964 reiserfs_warning (p_s_sb, "REISER-NESTING: th NULL, refcount %d",
2967 if (!th->t_trans_id) {
2973 if (th->t_refcount > 0) {
2974 struct reiserfs_transaction_handle *cur_th = current->journal_info ;
2976 /* we aren't allowed to close a nested transaction on a different
2977 ** filesystem from the one in the task struct
2979 if (cur_th->t_super != th->t_super)
2983 memcpy(current->journal_info, th, sizeof(*th));
2988 return do_journal_end(th, p_s_sb, nblocks, 0) ;
2992 /* removes from the current transaction, relsing and descrementing any counters.
2993 ** also files the removed buffer directly onto the clean list
2995 ** called by journal_mark_freed when a block has been deleted
2997 ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
2999 static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
3000 struct buffer_head *bh ;
3001 struct reiserfs_journal_cnode *cn ;
3002 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3005 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr) ;
3006 if (!cn || !cn->bh) {
3011 cn->prev->next = cn->next ;
3014 cn->next->prev = cn->prev ;
3016 if (cn == journal->j_first) {
3017 journal->j_first = cn->next ;
3019 if (cn == journal->j_last) {
3020 journal->j_last = cn->prev ;
3023 remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, bh->b_blocknr, 0) ;
3024 clear_buffer_journaled (bh); /* don't log this one */
3026 if (!already_cleaned) {
3027 clear_buffer_journal_dirty (bh);
3029 if (atomic_read(&(bh->b_count)) < 0) {
3030 reiserfs_warning (p_s_sb, "journal-1752: remove from trans, b_count < 0");
3035 journal->j_len_alloc-- ;
3036 free_cnode(p_s_sb, cn) ;
3041 ** for any cnode in a journal list, it can only be dirtied of all the
3042 ** transactions that include it are commited to disk.
3043 ** this checks through each transaction, and returns 1 if you are allowed to dirty,
3044 ** and 0 if you aren't
3046 ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log
3047 ** blocks for a given transaction on disk
3050 static int can_dirty(struct reiserfs_journal_cnode *cn) {
3051 struct super_block *sb = cn->sb;
3052 b_blocknr_t blocknr = cn->blocknr ;
3053 struct reiserfs_journal_cnode *cur = cn->hprev ;
3056 /* first test hprev. These are all newer than cn, so any node here
3057 ** with the same block number and dev means this node can't be sent
3058 ** to disk right now.
3060 while(cur && can_dirty) {
3061 if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb &&
3062 cur->blocknr == blocknr) {
3067 /* then test hnext. These are all older than cn. As long as they
3068 ** are committed to the log, it is safe to write cn to disk
3071 while(cur && can_dirty) {
3072 if (cur->jlist && cur->jlist->j_len > 0 &&
3073 atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh &&
3074 cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) {
3082 /* syncs the commit blocks, but does not force the real buffers to disk
3083 ** will wait until the current transaction is done/commited before returning
3085 int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
3086 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3088 BUG_ON (!th->t_trans_id);
3089 /* you can sync while nested, very, very bad */
3090 if (th->t_refcount > 1) {
3093 if (journal->j_len == 0) {
3094 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
3095 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
3097 return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT) ;
3101 ** writeback the pending async commits to disk
3103 static void flush_async_commits(void *p) {
3104 struct super_block *p_s_sb = p;
3105 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3106 struct reiserfs_journal_list *jl;
3107 struct list_head *entry;
3110 if (!list_empty(&journal->j_journal_list)) {
3111 /* last entry is the youngest, commit it and you get everything */
3112 entry = journal->j_journal_list.prev;
3113 jl = JOURNAL_LIST_ENTRY(entry);
3114 flush_commit_list(p_s_sb, jl, 1);
3118 * this is a little racey, but there's no harm in missing
3119 * the filemap_fdata_write
3121 if (!atomic_read(&journal->j_async_throttle) && !reiserfs_is_journal_aborted (journal)) {
3122 atomic_inc(&journal->j_async_throttle);
3123 filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
3124 atomic_dec(&journal->j_async_throttle);
3129 ** flushes any old transactions to disk
3130 ** ends the current transaction if it is too old
3132 int reiserfs_flush_old_commits(struct super_block *p_s_sb) {
3134 struct reiserfs_transaction_handle th ;
3135 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3137 now = get_seconds();
3138 /* safety check so we don't flush while we are replaying the log during
3141 if (list_empty(&journal->j_journal_list)) {
3145 /* check the current transaction. If there are no writers, and it is
3146 * too old, finish it, and force the commit blocks to disk
3148 if (atomic_read(&journal->j_wcount) <= 0 &&
3149 journal->j_trans_start_time > 0 &&
3150 journal->j_len > 0 &&
3151 (now - journal->j_trans_start_time) > journal->j_max_trans_age)
3153 if (!journal_join(&th, p_s_sb, 1)) {
3154 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
3155 journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
3157 /* we're only being called from kreiserfsd, it makes no sense to do
3158 ** an async commit so that kreiserfsd can do it later
3160 do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
3163 return p_s_sb->s_dirt;
3167 ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
3169 ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
3170 ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
3171 ** flushes the commit list and returns 0.
3173 ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
3175 ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
3177 static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb,
3178 unsigned long nblocks, int flags) {
3181 int flush = flags & FLUSH_ALL ;
3182 int commit_now = flags & COMMIT_NOW ;
3183 int wait_on_commit = flags & WAIT ;
3184 struct reiserfs_journal_list *jl;
3185 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3187 BUG_ON (!th->t_trans_id);
3189 if (th->t_trans_id != journal->j_trans_id) {
3190 reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
3191 th->t_trans_id, journal->j_trans_id);
3194 journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
3195 if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
3196 atomic_dec(&(journal->j_wcount)) ;
3199 /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
3200 ** will be dealt with by next transaction that actually writes something, but should be taken
3201 ** care of in this trans
3203 if (journal->j_len == 0) {
3206 /* if wcount > 0, and we are called to with flush or commit_now,
3207 ** we wait on j_join_wait. We will wake up when the last writer has
3208 ** finished the transaction, and started it on its way to the disk.
3209 ** Then, we flush the commit or journal list, and just return 0
3210 ** because the rest of journal end was already done for this transaction.
3212 if (atomic_read(&(journal->j_wcount)) > 0) {
3213 if (flush || commit_now) {
3216 jl = journal->j_current_jl;
3217 trans_id = jl->j_trans_id;
3219 jl->j_state |= LIST_COMMIT_PENDING;
3220 atomic_set(&(journal->j_jlock), 1) ;
3222 journal->j_next_full_flush = 1 ;
3224 unlock_journal(p_s_sb) ;
3226 /* sleep while the current transaction is still j_jlocked */
3227 while(journal->j_trans_id == trans_id) {
3228 if (atomic_read(&journal->j_jlock)) {
3229 queue_log_writer(p_s_sb);
3231 lock_journal(p_s_sb);
3232 if (journal->j_trans_id == trans_id) {
3233 atomic_set(&(journal->j_jlock), 1) ;
3235 unlock_journal(p_s_sb);
3238 if (journal->j_trans_id == trans_id) {
3241 if (commit_now && journal_list_still_alive(p_s_sb, trans_id) &&
3244 flush_commit_list(p_s_sb, jl, 1) ;
3248 unlock_journal(p_s_sb) ;
3252 /* deal with old transactions where we are the last writers */
3253 now = get_seconds();
3254 if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
3256 journal->j_next_async_flush = 1 ;
3258 /* don't batch when someone is waiting on j_join_wait */
3259 /* don't batch when syncing the commit or flushing the whole trans */
3260 if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now &&
3261 (journal->j_len < journal->j_max_batch) &&
3262 journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) {
3263 journal->j_bcount++ ;
3264 unlock_journal(p_s_sb) ;
3268 if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
3269 reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", journal->j_start) ;
3275 ** Does all the work that makes deleting blocks safe.
3276 ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
3279 ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
3280 ** before this transaction has finished.
3282 ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with
3283 ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash,
3284 ** the block can't be reallocated yet.
3286 ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
3288 int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
3289 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3290 struct reiserfs_journal_cnode *cn = NULL ;
3291 struct buffer_head *bh = NULL ;
3292 struct reiserfs_list_bitmap *jb = NULL ;
3294 BUG_ON (!th->t_trans_id);
3296 cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
3301 /* if it is journal new, we just remove it from this transaction */
3302 if (bh && buffer_journal_new(bh)) {
3303 clear_buffer_journal_new (bh);
3304 clear_prepared_bits(bh) ;
3305 reiserfs_clean_and_file_buffer(bh) ;
3306 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
3308 /* set the bit for this block in the journal bitmap for this transaction */
3309 jb = journal->j_current_jl->j_list_bitmap;
3311 reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
3313 set_bit_in_list_bitmap(p_s_sb, blocknr, jb) ;
3315 /* Note, the entire while loop is not allowed to schedule. */
3318 clear_prepared_bits(bh) ;
3319 reiserfs_clean_and_file_buffer(bh) ;
3321 cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
3323 /* find all older transactions with this block, make sure they don't try to write it out */
3324 cn = get_journal_hash_dev(p_s_sb,journal->j_list_hash_table, blocknr) ;
3326 if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
3327 set_bit(BLOCK_FREED, &cn->state) ;
3330 /* remove_from_transaction will brelse the buffer if it was
3331 ** in the current trans
3333 clear_buffer_journal_dirty (cn->bh);
3336 if (atomic_read(&(cn->bh->b_count)) < 0) {
3337 reiserfs_warning (p_s_sb, "journal-2138: cn->bh->b_count < 0");
3340 if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
3341 atomic_dec(&(cn->jlist->j_nonzerolen)) ;
3351 put_bh(bh) ; /* get_hash grabs the buffer */
3352 if (atomic_read(&(bh->b_count)) < 0) {
3353 reiserfs_warning (p_s_sb, "journal-2165: bh->b_count < 0");
3359 void reiserfs_update_inode_transaction(struct inode *inode) {
3360 struct reiserfs_journal *journal = SB_JOURNAL (inode->i_sb);
3361 REISERFS_I(inode)->i_jl = journal->j_current_jl;
3362 REISERFS_I(inode)->i_trans_id = journal->j_trans_id ;
3366 * returns -1 on error, 0 if no commits/barriers were done and 1
3367 * if a transaction was actually committed and the barrier was done
3369 static int __commit_trans_jl(struct inode *inode, unsigned long id,
3370 struct reiserfs_journal_list *jl)
3372 struct reiserfs_transaction_handle th ;
3373 struct super_block *sb = inode->i_sb ;
3374 struct reiserfs_journal *journal = SB_JOURNAL (sb);
3377 /* is it from the current transaction, or from an unknown transaction? */
3378 if (id == journal->j_trans_id) {
3379 jl = journal->j_current_jl;
3380 /* try to let other writers come in and grow this transaction */
3381 let_transaction_grow(sb, id);
3382 if (journal->j_trans_id != id) {
3383 goto flush_commit_only;
3386 ret = journal_begin(&th, sb, 1) ;
3390 /* someone might have ended this transaction while we joined */
3391 if (journal->j_trans_id != id) {
3392 reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1) ;
3393 journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)) ;
3394 ret = journal_end(&th, sb, 1) ;
3395 goto flush_commit_only;
3398 ret = journal_end_sync(&th, sb, 1) ;
3403 /* this gets tricky, we have to make sure the journal list in
3404 * the inode still exists. We know the list is still around
3405 * if we've got a larger transaction id than the oldest list
3408 if (journal_list_still_alive(inode->i_sb, id)) {
3410 * we only set ret to 1 when we know for sure
3411 * the barrier hasn't been started yet on the commit
3414 if (atomic_read(&jl->j_commit_left) > 1)
3416 flush_commit_list(sb, jl, 1) ;
3417 if (journal->j_errno)
3418 ret = journal->j_errno;
3421 /* otherwise the list is gone, and long since committed */
3425 int reiserfs_commit_for_inode(struct inode *inode) {
3426 unsigned long id = REISERFS_I(inode)->i_trans_id;
3427 struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
3429 /* for the whole inode, assume unset id means it was
3430 * changed in the current transaction. More conservative
3433 reiserfs_update_inode_transaction(inode) ;
3434 id = REISERFS_I(inode)->i_trans_id;
3435 /* jl will be updated in __commit_trans_jl */
3438 return __commit_trans_jl(inode, id, jl);
3441 void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
3442 struct buffer_head *bh) {
3443 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3444 PROC_INFO_INC( p_s_sb, journal.restore_prepared );
3448 if (test_clear_buffer_journal_restore_dirty (bh) &&
3449 buffer_journal_dirty(bh)) {
3450 struct reiserfs_journal_cnode *cn;
3451 cn = get_journal_hash_dev(p_s_sb,
3452 journal->j_list_hash_table,
3454 if (cn && can_dirty(cn)) {
3455 set_buffer_journal_test (bh);
3456 mark_buffer_dirty(bh);
3459 clear_buffer_journal_prepared (bh);
3462 extern struct tree_balance *cur_tb ;
3464 ** before we can change a metadata block, we have to make sure it won't
3465 ** be written to disk while we are altering it. So, we must:
3470 int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
3471 struct buffer_head *bh, int wait) {
3472 PROC_INFO_INC( p_s_sb, journal.prepare );
3474 if (test_set_buffer_locked(bh)) {
3479 set_buffer_journal_prepared (bh);
3480 if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
3481 clear_buffer_journal_test (bh);
3482 set_buffer_journal_restore_dirty (bh);
3488 static void flush_old_journal_lists(struct super_block *s) {
3489 struct reiserfs_journal *journal = SB_JOURNAL (s);
3490 struct reiserfs_journal_list *jl;
3491 struct list_head *entry;
3492 time_t now = get_seconds();
3494 while(!list_empty(&journal->j_journal_list)) {
3495 entry = journal->j_journal_list.next;
3496 jl = JOURNAL_LIST_ENTRY(entry);
3497 /* this check should always be run, to send old lists to disk */
3498 if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
3499 flush_used_journal_lists(s, jl);
3507 ** long and ugly. If flush, will not return until all commit
3508 ** blocks and all real buffers in the trans are on disk.
3509 ** If no_async, won't return until all commit blocks are on disk.
3511 ** keep reading, there are comments as you go along
3513 ** If the journal is aborted, we just clean up. Things like flushing
3514 ** journal lists, etc just won't happen.
3516 static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
3518 struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
3519 struct reiserfs_journal_cnode *cn, *next, *jl_cn;
3520 struct reiserfs_journal_cnode *last_cn = NULL;
3521 struct reiserfs_journal_desc *desc ;
3522 struct reiserfs_journal_commit *commit ;
3523 struct buffer_head *c_bh ; /* commit bh */
3524 struct buffer_head *d_bh ; /* desc bh */
3525 int cur_write_start = 0 ; /* start index of current log write */
3528 int flush = flags & FLUSH_ALL ;
3529 int wait_on_commit = flags & WAIT ;
3530 struct reiserfs_journal_list *jl, *temp_jl;
3531 struct list_head *entry, *safe;
3532 unsigned long jindex;
3533 unsigned long commit_trans_id;
3536 BUG_ON (th->t_refcount > 1);
3537 BUG_ON (!th->t_trans_id);
3539 current->journal_info = th->t_handle_save;
3540 reiserfs_check_lock_depth(p_s_sb, "journal end");
3541 if (journal->j_len == 0) {
3542 reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
3543 journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
3546 lock_journal(p_s_sb) ;
3547 if (journal->j_next_full_flush) {
3548 flags |= FLUSH_ALL ;
3551 if (journal->j_next_async_flush) {
3552 flags |= COMMIT_NOW | WAIT;
3556 /* check_journal_end locks the journal, and unlocks if it does not return 1
3557 ** it tells us if we should continue with the journal_end, or just return
3559 if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
3561 wake_queued_writers(p_s_sb);
3562 reiserfs_async_progress_wait(p_s_sb);
3566 /* check_journal_end might set these, check again */
3567 if (journal->j_next_full_flush) {
3572 ** j must wait means we have to flush the log blocks, and the real blocks for
3575 if (journal->j_must_wait > 0) {
3579 #ifdef REISERFS_PREALLOCATE
3580 /* quota ops might need to nest, setup the journal_info pointer for them */
3581 current->journal_info = th ;
3582 reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into
3583 * the transaction */
3584 current->journal_info = th->t_handle_save ;
3587 /* setup description block */
3588 d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start) ;
3589 set_buffer_uptodate(d_bh);
3590 desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
3591 memset(d_bh->b_data, 0, d_bh->b_size) ;
3592 memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
3593 set_desc_trans_id(desc, journal->j_trans_id) ;
3595 /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
3596 c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3597 ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
3598 commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
3599 memset(c_bh->b_data, 0, c_bh->b_size) ;
3600 set_commit_trans_id(commit, journal->j_trans_id) ;
3601 set_buffer_uptodate(c_bh) ;
3603 /* init this journal list */
3604 jl = journal->j_current_jl;
3606 /* we lock the commit before doing anything because
3607 * we want to make sure nobody tries to run flush_commit_list until
3608 * the new transaction is fully setup, and we've already flushed the
3611 down(&jl->j_commit_lock);
3613 /* save the transaction id in case we need to commit it later */
3614 commit_trans_id = jl->j_trans_id;
3616 atomic_set(&jl->j_older_commits_done, 0) ;
3617 jl->j_trans_id = journal->j_trans_id ;
3618 jl->j_timestamp = journal->j_trans_start_time ;
3619 jl->j_commit_bh = c_bh ;
3620 jl->j_start = journal->j_start ;
3621 jl->j_len = journal->j_len ;
3622 atomic_set(&jl->j_nonzerolen, journal->j_len) ;
3623 atomic_set(&jl->j_commit_left, journal->j_len + 2);
3624 jl->j_realblock = NULL ;
3626 /* The ENTIRE FOR LOOP MUST not cause schedule to occur.
3627 ** for each real block, add it to the journal list hash,
3628 ** copy into real block index array in the commit or desc block
3630 trans_half = journal_trans_half(p_s_sb->s_blocksize);
3631 for (i = 0, cn = journal->j_first ; cn ; cn = cn->next, i++) {
3632 if (buffer_journaled (cn->bh)) {
3633 jl_cn = get_cnode(p_s_sb) ;
3635 reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
3638 jl->j_realblock = jl_cn ;
3640 jl_cn->prev = last_cn ;
3641 jl_cn->next = NULL ;
3643 last_cn->next = jl_cn ;
3646 /* make sure the block we are trying to log is not a block
3647 of journal or reserved area */
3649 if (is_block_in_log_or_reserved_area(p_s_sb, cn->bh->b_blocknr)) {
3650 reiserfs_panic(p_s_sb, "journal-2332: Trying to log block %lu, which is a log block\n", cn->bh->b_blocknr) ;
3652 jl_cn->blocknr = cn->bh->b_blocknr ;
3655 jl_cn->bh = cn->bh ;
3657 insert_journal_hash(journal->j_list_hash_table, jl_cn) ;
3658 if (i < trans_half) {
3659 desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
3661 commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr) ;
3667 set_desc_trans_len(desc, journal->j_len) ;
3668 set_desc_mount_id(desc, journal->j_mount_id) ;
3669 set_desc_trans_id(desc, journal->j_trans_id) ;
3670 set_commit_trans_len(commit, journal->j_len);
3672 /* special check in case all buffers in the journal were marked for not logging */
3673 if (journal->j_len == 0) {
3677 /* we're about to dirty all the log blocks, mark the description block
3678 * dirty now too. Don't mark the commit block dirty until all the
3679 * others are on disk
3681 mark_buffer_dirty(d_bh);
3683 /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
3684 cur_write_start = journal->j_start ;
3685 cn = journal->j_first ;
3686 jindex = 1 ; /* start at one so we don't get the desc again */
3688 clear_buffer_journal_new (cn->bh);
3689 /* copy all the real blocks into log area. dirty log blocks */
3690 if (buffer_journaled (cn->bh)) {
3691 struct buffer_head *tmp_bh ;
3694 tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
3695 ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
3696 set_buffer_uptodate(tmp_bh);
3697 page = cn->bh->b_page;
3699 memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data),
3702 mark_buffer_dirty(tmp_bh);
3704 set_buffer_journal_dirty (cn->bh);
3705 clear_buffer_journaled (cn->bh);
3707 /* JDirty cleared sometime during transaction. don't log this one */
3708 reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
3712 free_cnode(p_s_sb, cn) ;
3717 /* we are done with both the c_bh and d_bh, but
3718 ** c_bh must be written after all other commit blocks,
3719 ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
3722 journal->j_current_jl = alloc_journal_list(p_s_sb);
3724 /* now it is safe to insert this transaction on the main list */
3725 list_add_tail(&jl->j_list, &journal->j_journal_list);
3726 list_add_tail(&jl->j_working_list, &journal->j_working_list);
3727 journal->j_num_work_lists++;
3729 /* reset journal values for the next transaction */
3730 old_start = journal->j_start ;
3731 journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
3732 atomic_set(&(journal->j_wcount), 0) ;
3733 journal->j_bcount = 0 ;
3734 journal->j_last = NULL ;
3735 journal->j_first = NULL ;
3736 journal->j_len = 0 ;
3737 journal->j_trans_start_time = 0 ;
3738 journal->j_trans_id++ ;
3739 journal->j_current_jl->j_trans_id = journal->j_trans_id;
3740 journal->j_must_wait = 0 ;
3741 journal->j_len_alloc = 0 ;
3742 journal->j_next_full_flush = 0 ;
3743 journal->j_next_async_flush = 0 ;
3744 init_journal_hash(p_s_sb) ;
3746 // make sure reiserfs_add_jh sees the new current_jl before we
3747 // write out the tails
3750 /* tail conversion targets have to hit the disk before we end the
3751 * transaction. Otherwise a later transaction might repack the tail
3752 * before this transaction commits, leaving the data block unflushed and
3753 * clean, if we crash before the later transaction commits, the data block
3756 if (!list_empty(&jl->j_tail_bh_list)) {
3758 write_ordered_buffers(&journal->j_dirty_buffers_lock,
3759 journal, jl, &jl->j_tail_bh_list);
3762 if (!list_empty(&jl->j_tail_bh_list))
3764 up(&jl->j_commit_lock);
3766 /* honor the flush wishes from the caller, simple commits can
3767 ** be done outside the journal lock, they are done below
3769 ** if we don't flush the commit list right now, we put it into
3770 ** the work queue so the people waiting on the async progress work
3771 ** queue don't wait for this proc to flush journal lists and such.
3774 flush_commit_list(p_s_sb, jl, 1) ;
3775 flush_journal_list(p_s_sb, jl, 1) ;
3776 } else if (!(jl->j_state & LIST_COMMIT_PENDING))
3777 queue_delayed_work(commit_wq, &journal->j_work, HZ/10);
3780 /* if the next transaction has any chance of wrapping, flush
3781 ** transactions that might get overwritten. If any journal lists are very
3782 ** old flush them as well.
3785 list_for_each_safe(entry, safe, &journal->j_journal_list) {
3786 temp_jl = JOURNAL_LIST_ENTRY(entry);
3787 if (journal->j_start <= temp_jl->j_start) {
3788 if ((journal->j_start + journal->j_trans_max + 1) >=
3791 flush_used_journal_lists(p_s_sb, temp_jl);
3793 } else if ((journal->j_start +
3794 journal->j_trans_max + 1) <
3795 SB_ONDISK_JOURNAL_SIZE(p_s_sb))
3797 /* if we don't cross into the next transaction and we don't
3798 * wrap, there is no way we can overlap any later transactions
3803 } else if ((journal->j_start +
3804 journal->j_trans_max + 1) >
3805 SB_ONDISK_JOURNAL_SIZE(p_s_sb))
3807 if (((journal->j_start + journal->j_trans_max + 1) %
3808 SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= temp_jl->j_start)
3810 flush_used_journal_lists(p_s_sb, temp_jl);
3813 /* we don't overlap anything from out start to the end of the
3814 * log, and our wrapped portion doesn't overlap anything at
3815 * the start of the log. We can break
3821 flush_old_journal_lists(p_s_sb);
3823 journal->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, journal->j_current_jl) ;
3825 if (!(journal->j_current_jl->j_list_bitmap)) {
3826 reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
3829 atomic_set(&(journal->j_jlock), 0) ;
3830 unlock_journal(p_s_sb) ;
3831 /* wake up any body waiting to join. */
3832 clear_bit(J_WRITERS_QUEUED, &journal->j_state);
3833 wake_up(&(journal->j_join_wait)) ;
3835 if (!flush && wait_on_commit &&
3836 journal_list_still_alive(p_s_sb, commit_trans_id)) {
3837 flush_commit_list(p_s_sb, jl, 1) ;
3840 reiserfs_check_lock_depth(p_s_sb, "journal end2");
3842 memset (th, 0, sizeof (*th));
3843 /* Re-set th->t_super, so we can properly keep track of how many
3844 * persistent transactions there are. We need to do this so if this
3845 * call is part of a failed restart_transaction, we can free it later */
3846 th->t_super = p_s_sb;
3848 return journal->j_errno;
3852 __reiserfs_journal_abort_hard (struct super_block *sb)
3854 struct reiserfs_journal *journal = SB_JOURNAL (sb);
3855 if (test_bit (J_ABORTED, &journal->j_state))
3858 printk (KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
3859 reiserfs_bdevname (sb));
3861 sb->s_flags |= MS_RDONLY;
3862 set_bit (J_ABORTED, &journal->j_state);
3864 #ifdef CONFIG_REISERFS_CHECK
3870 __reiserfs_journal_abort_soft (struct super_block *sb, int errno)
3872 struct reiserfs_journal *journal = SB_JOURNAL (sb);
3873 if (test_bit (J_ABORTED, &journal->j_state))
3876 if (!journal->j_errno)
3877 journal->j_errno = errno;
3879 __reiserfs_journal_abort_hard (sb);
3883 reiserfs_journal_abort (struct super_block *sb, int errno)
3885 return __reiserfs_journal_abort_soft (sb, errno);