#include <linux/stat.h>
#include <linux/string.h>
#include <linux/smp_lock.h>
-#include <linux/suspend.h>
#include <linux/buffer_head.h>
#include <linux/workqueue.h>
#include <linux/writeback.h>
#define COMMIT_NOW 2 /* end and commit this transaction */
#define WAIT 4 /* wait for the log blocks to hit the disk*/
-/* state bits for the journal */
-#define WRITERS_BLOCKED 1 /* set when new writers not allowed */
-#define WRITERS_QUEUED 2 /* set when log is full due to too many
- * writers
- */
-
static int do_journal_end(struct reiserfs_transaction_handle *,struct super_block *,unsigned long nblocks,int flags) ;
static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) ;
static int dirty_one_transaction(struct super_block *s,
struct reiserfs_journal_list *jl);
static void flush_async_commits(void *p);
+static void queue_log_writer(struct super_block *s);
+
+/* values for join in do_journal_begin_r */
+enum {
+ JBEGIN_REG = 0, /* regular journal begin */
+ JBEGIN_JOIN = 1, /* join the running transaction if at all possible */
+ JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */
+};
+
+static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
+ struct super_block * p_s_sb,
+ unsigned long nblocks,int join);
static void init_journal_hash(struct super_block *p_s_sb) {
- memset(SB_JOURNAL(p_s_sb)->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+ memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
}
/*
static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
if (bh) {
clear_buffer_dirty(bh);
- clear_bit(BH_JTest, &bh->b_state);
+ clear_buffer_journal_test(bh);
}
return 0 ;
}
static struct reiserfs_bitmap_node *
get_bitmap_node(struct super_block *p_s_sb) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_bitmap_node *bn = NULL;
- struct list_head *entry = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
+ struct list_head *entry = journal->j_bitmap_nodes.next ;
- SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes++ ;
+ journal->j_used_bitmap_nodes++ ;
repeat:
- if(entry != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
+ if(entry != &journal->j_bitmap_nodes) {
bn = list_entry(entry, struct reiserfs_bitmap_node, list) ;
list_del(entry) ;
memset(bn->data, 0, p_s_sb->s_blocksize) ;
- SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
+ journal->j_free_bitmap_nodes-- ;
return bn ;
}
bn = allocate_bitmap_node(p_s_sb) ;
}
static inline void free_bitmap_node(struct super_block *p_s_sb,
struct reiserfs_bitmap_node *bn) {
- SB_JOURNAL(p_s_sb)->j_used_bitmap_nodes-- ;
- if (SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+ journal->j_used_bitmap_nodes-- ;
+ if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
} else {
- list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
- SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
+ list_add(&bn->list, &journal->j_bitmap_nodes) ;
+ journal->j_free_bitmap_nodes++ ;
}
}
static void allocate_bitmap_nodes(struct super_block *p_s_sb) {
int i ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_bitmap_node *bn = NULL ;
for (i = 0 ; i < REISERFS_MIN_BITMAP_NODES ; i++) {
bn = allocate_bitmap_node(p_s_sb) ;
if (bn) {
- list_add(&bn->list, &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
- SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes++ ;
+ list_add(&bn->list, &journal->j_bitmap_nodes) ;
+ journal->j_free_bitmap_nodes++ ;
} else {
break ; // this is ok, we'll try again when more are needed
}
}
static int free_bitmap_nodes(struct super_block *p_s_sb) {
- struct list_head *next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+ struct list_head *next = journal->j_bitmap_nodes.next ;
struct reiserfs_bitmap_node *bn ;
- while(next != &SB_JOURNAL(p_s_sb)->j_bitmap_nodes) {
+ while(next != &journal->j_bitmap_nodes) {
bn = list_entry(next, struct reiserfs_bitmap_node, list) ;
list_del(next) ;
reiserfs_kfree(bn->data, p_s_sb->s_blocksize, p_s_sb) ;
reiserfs_kfree(bn, sizeof(struct reiserfs_bitmap_node), p_s_sb) ;
- next = SB_JOURNAL(p_s_sb)->j_bitmap_nodes.next ;
- SB_JOURNAL(p_s_sb)->j_free_bitmap_nodes-- ;
+ next = journal->j_bitmap_nodes.next ;
+ journal->j_free_bitmap_nodes-- ;
}
return 0 ;
static struct reiserfs_list_bitmap *
get_list_bitmap(struct super_block *p_s_sb, struct reiserfs_journal_list *jl) {
int i,j ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_list_bitmap *jb = NULL ;
for (j = 0 ; j < (JOURNAL_NUM_BITMAPS * 3) ; j++) {
- i = SB_JOURNAL(p_s_sb)->j_list_bitmap_index ;
- SB_JOURNAL(p_s_sb)->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
- jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
- if (SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
- flush_commit_list(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list, 1) ;
- if (!SB_JOURNAL(p_s_sb)->j_list_bitmap[i].journal_list) {
+ i = journal->j_list_bitmap_index ;
+ journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS ;
+ jb = journal->j_list_bitmap + i ;
+ if (journal->j_list_bitmap[i].journal_list) {
+ flush_commit_list(p_s_sb, journal->j_list_bitmap[i].journal_list, 1) ;
+ if (!journal->j_list_bitmap[i].journal_list) {
break ;
}
} else {
*/
static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb) {
struct reiserfs_journal_cnode *cn ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
reiserfs_check_lock_depth(p_s_sb, "get_cnode") ;
- if (SB_JOURNAL(p_s_sb)->j_cnode_free <= 0) {
+ if (journal->j_cnode_free <= 0) {
return NULL ;
}
- SB_JOURNAL(p_s_sb)->j_cnode_used++ ;
- SB_JOURNAL(p_s_sb)->j_cnode_free-- ;
- cn = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
+ journal->j_cnode_used++ ;
+ journal->j_cnode_free-- ;
+ cn = journal->j_cnode_free_list ;
if (!cn) {
return cn ;
}
if (cn->next) {
cn->next->prev = NULL ;
}
- SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn->next ;
+ journal->j_cnode_free_list = cn->next ;
memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ;
return cn ;
}
** returns a cnode to the free list
*/
static void free_cnode(struct super_block *p_s_sb, struct reiserfs_journal_cnode *cn) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
reiserfs_check_lock_depth(p_s_sb, "free_cnode") ;
- SB_JOURNAL(p_s_sb)->j_cnode_used-- ;
- SB_JOURNAL(p_s_sb)->j_cnode_free++ ;
+ journal->j_cnode_used-- ;
+ journal->j_cnode_free++ ;
/* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */
- cn->next = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
- if (SB_JOURNAL(p_s_sb)->j_cnode_free_list) {
- SB_JOURNAL(p_s_sb)->j_cnode_free_list->prev = cn ;
+ cn->next = journal->j_cnode_free_list ;
+ if (journal->j_cnode_free_list) {
+ journal->j_cnode_free_list->prev = cn ;
}
cn->prev = NULL ; /* not needed with the memset, but I might kill the memset, and forget to do this */
- SB_JOURNAL(p_s_sb)->j_cnode_free_list = cn ;
-}
-
-static int clear_prepared_bits(struct buffer_head *bh) {
- clear_bit(BH_JPrepared, &bh->b_state) ;
- clear_bit(BH_JRestore_dirty, &bh->b_state) ;
- return 0 ;
-}
-
-/* buffer is in current transaction */
-inline int buffer_journaled(const struct buffer_head *bh) {
- if (bh)
- return test_bit(BH_JDirty, &bh->b_state) ;
- else
- return 0 ;
-}
-
-/* disk block was taken off free list before being in a finished transation, or written to disk
-** journal_new blocks can be reused immediately, for any purpose
-*/
-inline int buffer_journal_new(const struct buffer_head *bh) {
- if (bh)
- return test_bit(BH_JNew, &bh->b_state) ;
- else
- return 0 ;
-}
-
-inline int mark_buffer_journal_new(struct buffer_head *bh) {
- if (bh) {
- set_bit(BH_JNew, &bh->b_state) ;
- }
- return 0 ;
+ journal->j_cnode_free_list = cn ;
}
-inline int mark_buffer_not_journaled(struct buffer_head *bh) {
- if (bh)
- clear_bit(BH_JDirty, &bh->b_state) ;
- return 0 ;
+static void clear_prepared_bits(struct buffer_head *bh) {
+ clear_buffer_journal_prepared (bh);
+ clear_buffer_journal_restore_dirty (bh);
}
/* utility function to force a BUG if it is called without the big
return (struct reiserfs_journal_cnode *)0 ;
}
-/* returns a cnode with same size, block number and dev as bh in the current transaction hash. NULL if not found */
-static inline struct reiserfs_journal_cnode *get_journal_hash(struct super_block *p_s_sb, struct buffer_head *bh) {
- struct reiserfs_journal_cnode *cn ;
- if (bh) {
- cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bh->b_blocknr);
- }
- else {
- return (struct reiserfs_journal_cnode *)0 ;
- }
- return cn ;
-}
-
/*
** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated
** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever
int reiserfs_in_journal(struct super_block *p_s_sb,
int bmap_nr, int bit_nr, int search_all,
b_blocknr_t *next_zero_bit) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_cnode *cn ;
struct reiserfs_list_bitmap *jb ;
int i ;
if (search_all) {
for (i = 0 ; i < JOURNAL_NUM_BITMAPS ; i++) {
PROC_INFO_INC( p_s_sb, journal.in_journal_bitmap );
- jb = SB_JOURNAL(p_s_sb)->j_list_bitmap + i ;
+ jb = journal->j_list_bitmap + i ;
if (jb->journal_list && jb->bitmaps[bmap_nr] &&
test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data)) {
*next_zero_bit = find_next_zero_bit((unsigned long *)
bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
/* is it in any old transactions? */
- if (search_all && (cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, bl))) {
+ if (search_all && (cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
return 1;
}
/* is it in the current transaction. This should never happen */
- if ((cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, bl))) {
+ if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
BUG();
return 1;
}
/* insert cn into table
*/
-inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
+static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) {
struct reiserfs_journal_cnode *cn_orig ;
cn_orig = journal_hash(table, cn->sb, cn->blocknr) ;
static int journal_list_still_alive(struct super_block *s,
unsigned long trans_id)
{
- struct list_head *entry = &SB_JOURNAL(s)->j_journal_list;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ struct list_head *entry = &journal->j_journal_list;
struct reiserfs_journal_list *jl;
if (!list_empty(entry)) {
static void submit_logged_buffer(struct buffer_head *bh) {
get_bh(bh) ;
bh->b_end_io = reiserfs_end_buffer_io_sync ;
- mark_buffer_notjournal_new(bh) ;
+ clear_buffer_journal_new (bh);
clear_buffer_dirty(bh) ;
- if (!test_and_clear_bit(BH_JTest, &bh->b_state))
+ if (!test_clear_buffer_journal_test (bh))
BUG();
if (!buffer_uptodate(bh))
BUG();
}
-atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
+static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0);
static struct reiserfs_jh *alloc_jh(void) {
struct reiserfs_jh *jh;
while(1) {
{
struct buffer_head *bh;
struct reiserfs_jh *jh;
- int ret = 0;
+ int ret = j->j_errno;
struct buffer_chunk chunk;
struct list_head tmp;
INIT_LIST_HEAD(&tmp);
cond_resched();
spin_lock(lock);
goto loop_next;
- }
+ }
if (buffer_dirty(bh)) {
list_del_init(&jh->list);
list_add(&jh->list, &tmp);
- add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
+ add_to_chunk(&chunk, bh, lock, write_ordered_chunk);
} else {
reiserfs_free_jh(bh);
unlock_buffer(bh);
wait_on_buffer(bh);
spin_lock(lock);
}
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
ret = -EIO;
+ }
put_bh(bh);
cond_resched_lock(lock);
}
}
static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) {
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
struct reiserfs_journal_list *other_jl;
struct reiserfs_journal_list *first_jl;
struct list_head *entry;
entry = jl->j_list.prev;
while(1) {
other_jl = JOURNAL_LIST_ENTRY(entry);
- if (entry == &SB_JOURNAL(s)->j_journal_list ||
+ if (entry == &journal->j_journal_list ||
atomic_read(&other_jl->j_older_commits_done))
break;
}
}
entry = entry->next;
- if (entry == &SB_JOURNAL(s)->j_journal_list)
+ if (entry == &journal->j_journal_list)
return 0;
} else {
return 0;
int bn ;
struct buffer_head *tbh = NULL ;
unsigned long trans_id = jl->j_trans_id;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
int barrier = 0;
+ int retval = 0;
reiserfs_check_lock_depth(s, "flush_commit_list") ;
/* before we can put our commit blocks on disk, we have to make sure everyone older than
** us is on disk too
*/
- if (jl->j_len <= 0)
- BUG();
- if (trans_id == SB_JOURNAL(s)->j_trans_id)
- BUG();
+ BUG_ON (jl->j_len <= 0);
+ BUG_ON (trans_id == journal->j_trans_id);
get_journal_list(jl);
if (flushall) {
up(&jl->j_commit_lock);
goto put_jl;
}
- if (jl->j_trans_id == 0)
- BUG();
+ BUG_ON (jl->j_trans_id == 0);
/* this commit is done, exit */
if (atomic_read(&(jl->j_commit_left)) <= 0) {
if (!list_empty(&jl->j_bh_list)) {
unlock_kernel();
- write_ordered_buffers(&SB_JOURNAL(s)->j_dirty_buffers_lock,
- SB_JOURNAL(s), jl, &jl->j_bh_list);
+ write_ordered_buffers(&journal->j_dirty_buffers_lock,
+ journal, jl, &jl->j_bh_list);
lock_kernel();
}
- if (!list_empty(&jl->j_bh_list))
- BUG();
+ BUG_ON (!list_empty(&jl->j_bh_list));
/*
* for the description block and all the log blocks, submit any buffers
* that haven't already reached the disk
*/
- atomic_inc(&SB_JOURNAL(s)->j_async_throttle);
+ atomic_inc(&journal->j_async_throttle);
for (i = 0 ; i < (jl->j_len + 1) ; i++) {
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start+i) %
SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn) ;
- if (buffer_dirty(tbh))
+ if (buffer_dirty(tbh)) /* redundant, ll_rw_block() checks */
ll_rw_block(WRITE, 1, &tbh) ;
put_bh(tbh) ;
}
- atomic_dec(&SB_JOURNAL(s)->j_async_throttle);
+ atomic_dec(&journal->j_async_throttle);
/* wait on everything written so far before writing the commit
* if we are in barrier mode, send the commit down now
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
- if (buffer_dirty(tbh))
+ if (buffer_dirty(tbh)) /* redundant, sync_dirty_buffer() checks */
sync_dirty_buffer(tbh);
- if (!buffer_uptodate(tbh)) {
- reiserfs_panic(s, "journal-601, buffer write failed\n") ;
+ if (unlikely (!buffer_uptodate(tbh))) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning(s, "journal-601, buffer write failed") ;
+#endif
+ retval = -EIO;
}
put_bh(tbh) ; /* once for journal_find_get_block */
put_bh(tbh) ; /* once due to original getblk in do_journal_end */
atomic_dec(&(jl->j_commit_left)) ;
}
- if (atomic_read(&(jl->j_commit_left)) != 1)
- BUG();
+ BUG_ON (atomic_read(&(jl->j_commit_left)) != 1);
if (!barrier) {
if (buffer_dirty(jl->j_commit_bh))
wait_on_buffer(jl->j_commit_bh);
check_barrier_completion(s, jl->j_commit_bh);
- if (!buffer_uptodate(jl->j_commit_bh)) {
- reiserfs_panic(s, "journal-615: buffer write failed\n") ;
+
+ /* If there was a write error in the journal - we can't commit this
+ * transaction - it will be invalid and, if successful, will just end
+ * up propogating the write error out to the filesystem. */
+ if (unlikely (!buffer_uptodate(jl->j_commit_bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning(s, "journal-615: buffer write failed") ;
+#endif
+ retval = -EIO;
}
bforget(jl->j_commit_bh) ;
- if (SB_JOURNAL(s)->j_last_commit_id != 0 &&
- (jl->j_trans_id - SB_JOURNAL(s)->j_last_commit_id) != 1) {
+ if (journal->j_last_commit_id != 0 &&
+ (jl->j_trans_id - journal->j_last_commit_id) != 1) {
reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
- SB_JOURNAL(s)->j_last_commit_id,
+ journal->j_last_commit_id,
jl->j_trans_id);
}
- SB_JOURNAL(s)->j_last_commit_id = jl->j_trans_id;
+ journal->j_last_commit_id = jl->j_trans_id;
/* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */
cleanup_freed_for_journal_list(s, jl) ;
+ retval = retval ? retval : journal->j_errno;
+
/* mark the metadata dirty */
- dirty_one_transaction(s, jl);
+ if (!retval)
+ dirty_one_transaction(s, jl);
atomic_dec(&(jl->j_commit_left)) ;
if (flushall) {
up(&jl->j_commit_lock);
put_jl:
put_journal_list(s, jl);
- return 0 ;
+
+ if (retval)
+ reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__);
+ return retval;
}
/*
return NULL ;
}
-void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
+static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **,
struct reiserfs_journal_list *, unsigned long, int);
/*
** block to be reallocated for data blocks if it had been deleted.
*/
static void remove_all_from_journal_list(struct super_block *p_s_sb, struct reiserfs_journal_list *jl, int debug) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_cnode *cn, *last ;
cn = jl->j_realblock ;
cn->bh ? 1: 0, cn->state) ;
}
cn->state = 0 ;
- remove_journal_hash(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_hash_table, jl, cn->blocknr, 1) ;
+ remove_journal_hash(p_s_sb, journal->j_list_hash_table, jl, cn->blocknr, 1) ;
}
last = cn ;
cn = cn->next ;
*/
static int _update_journal_header_block(struct super_block *p_s_sb, unsigned long offset, unsigned long trans_id) {
struct reiserfs_journal_header *jh ;
- if (trans_id >= SB_JOURNAL(p_s_sb)->j_last_flush_trans_id) {
- if (buffer_locked((SB_JOURNAL(p_s_sb)->j_header_bh))) {
- wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
- if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
- reiserfs_panic(p_s_sb, "journal-699: buffer write failed\n") ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+
+ if (reiserfs_is_journal_aborted (journal))
+ return -EIO;
+
+ if (trans_id >= journal->j_last_flush_trans_id) {
+ if (buffer_locked((journal->j_header_bh))) {
+ wait_on_buffer((journal->j_header_bh)) ;
+ if (unlikely (!buffer_uptodate(journal->j_header_bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning (p_s_sb, "journal-699: buffer write failed") ;
+#endif
+ return -EIO;
}
}
- SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
- SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = offset ;
- jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
+ journal->j_last_flush_trans_id = trans_id ;
+ journal->j_first_unflushed_offset = offset ;
+ jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
- jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
+ jh->j_mount_id = cpu_to_le32(journal->j_mount_id) ;
if (reiserfs_barrier_flush(p_s_sb)) {
int ret;
- lock_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
- ret = submit_barrier_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
+ lock_buffer(journal->j_header_bh);
+ ret = submit_barrier_buffer(journal->j_header_bh);
if (ret == -EOPNOTSUPP) {
- set_buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh);
+ set_buffer_uptodate(journal->j_header_bh);
disable_barrier(p_s_sb);
goto sync;
}
- wait_on_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
- check_barrier_completion(p_s_sb, SB_JOURNAL(p_s_sb)->j_header_bh);
+ wait_on_buffer(journal->j_header_bh);
+ check_barrier_completion(p_s_sb, journal->j_header_bh);
} else {
sync:
- set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
- sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ set_buffer_dirty(journal->j_header_bh) ;
+ sync_dirty_buffer(journal->j_header_bh) ;
}
- if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
+ if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
return -EIO ;
}
static int update_journal_header_block(struct super_block *p_s_sb,
unsigned long offset,
unsigned long trans_id) {
- if (_update_journal_header_block(p_s_sb, offset, trans_id)) {
- reiserfs_panic(p_s_sb, "journal-712: buffer write failed\n") ;
- }
- return 0 ;
+ return _update_journal_header_block(p_s_sb, offset, trans_id);
}
/*
** flush any and all journal lists older than you are
{
struct list_head *entry;
struct reiserfs_journal_list *other_jl ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
unsigned long trans_id = jl->j_trans_id;
/* we know we are the only ones flushing things, no extra race
* protection is required.
*/
restart:
- entry = SB_JOURNAL(p_s_sb)->j_journal_list.next;
+ entry = journal->j_journal_list.next;
+ /* Did we wrap? */
+ if (entry == &journal->j_journal_list)
+ return 0;
other_jl = JOURNAL_LIST_ENTRY(entry);
if (other_jl->j_trans_id < trans_id) {
+ BUG_ON (other_jl->j_refcount <= 0);
/* do not flush all */
flush_journal_list(p_s_sb, other_jl, 0) ;
static void del_from_work_list(struct super_block *s,
struct reiserfs_journal_list *jl) {
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
if (!list_empty(&jl->j_working_list)) {
list_del_init(&jl->j_working_list);
- SB_JOURNAL(s)->j_num_work_lists--;
+ journal->j_num_work_lists--;
}
}
int was_dirty = 0 ;
struct buffer_head *saved_bh ;
unsigned long j_len_saved = jl->j_len ;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ int err = 0;
- if (j_len_saved <= 0) {
- BUG();
- }
+ BUG_ON (j_len_saved <= 0);
- if (atomic_read(&SB_JOURNAL(s)->j_wcount) != 0) {
+ if (atomic_read(&journal->j_wcount) != 0) {
reiserfs_warning(s, "clm-2048: flush_journal_list called with wcount %d",
- atomic_read(&SB_JOURNAL(s)->j_wcount)) ;
+ atomic_read(&journal->j_wcount)) ;
}
- if (jl->j_trans_id == 0)
- BUG();
+ BUG_ON (jl->j_trans_id == 0);
/* if flushall == 0, the lock is already held */
if (flushall) {
- down(&SB_JOURNAL(s)->j_flush_sem);
- } else if (!down_trylock(&SB_JOURNAL(s)->j_flush_sem)) {
+ down(&journal->j_flush_sem);
+ } else if (!down_trylock(&journal->j_flush_sem)) {
BUG();
}
count = 0 ;
- if (j_len_saved > SB_JOURNAL_TRANS_MAX(s)) {
+ if (j_len_saved > journal->j_trans_max) {
reiserfs_panic(s, "journal-715: flush_journal_list, length is %lu, trans id %lu\n", j_len_saved, jl->j_trans_id);
return 0 ;
}
*/
flush_commit_list(s, jl, 1) ;
- if (!(jl->j_state & LIST_DIRTY))
+ if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted (journal))
BUG();
/* are we done now? */
/* loop through each cnode, see if we need to write it,
** or wait on a more recent transaction, or just ignore it
*/
- if (atomic_read(&(SB_JOURNAL(s)->j_wcount)) != 0) {
+ if (atomic_read(&(journal->j_wcount)) != 0) {
reiserfs_panic(s, "journal-844: panic journal list is flushing, wcount is not 0\n") ;
}
cn = jl->j_realblock ;
if (cn->blocknr == 0) {
goto free_cnode ;
}
+
+ /* This transaction failed commit. Don't write out to the disk */
+ if (!(jl->j_state & LIST_DIRTY))
+ goto free_cnode;
+
pjl = find_newer_jl_for_cn(cn) ;
/* the order is important here. We check pjl to make sure we
** don't clear BH_JDirty_wait if we aren't the one writing this
get_bh(saved_bh) ;
if (buffer_journal_dirty(saved_bh)) {
- if (!can_dirty(cn))
- BUG();
+ BUG_ON (!can_dirty (cn));
was_jwait = 1 ;
was_dirty = 1 ;
} else if (can_dirty(cn)) {
get_bh(saved_bh) ;
set_bit(BLOCK_NEEDS_FLUSH, &cn->state) ;
lock_buffer(saved_bh);
- if (cn->blocknr != saved_bh->b_blocknr)
- BUG();
+ BUG_ON (cn->blocknr != saved_bh->b_blocknr);
if (buffer_dirty(saved_bh))
submit_logged_buffer(saved_bh) ;
else
if (!cn->bh) {
reiserfs_panic(s, "journal-1012: cn->bh is NULL\n") ;
}
- if (!buffer_uptodate(cn->bh)) {
- reiserfs_panic(s, "journal-949: buffer write failed\n") ;
- }
+ if (unlikely (!buffer_uptodate(cn->bh))) {
+#ifdef CONFIG_REISERFS_CHECK
+ reiserfs_warning(s, "journal-949: buffer write failed\n") ;
+#endif
+ err = -EIO;
+ }
/* note, we must clear the JDirty_wait bit after the up to date
** check, otherwise we race against our flushpage routine
*/
- if (!test_and_clear_bit(BH_JDirty_wait, &cn->bh->b_state))
- BUG();
+ BUG_ON (!test_clear_buffer_journal_dirty (cn->bh));
/* undo the inc from journal_mark_dirty */
put_bh(cn->bh) ;
}
}
+ if (err)
+ reiserfs_abort (s, -EIO, "Write error while pushing transaction to disk in %s", __FUNCTION__);
flush_older_and_return:
+
+
/* before we can update the journal header block, we _must_ flush all
** real blocks from all older transactions to disk. This is because
** once the header block is updated, this transaction will not be
flush_older_journal_lists(s, jl);
}
+ err = journal->j_errno;
/* before we can remove everything from the hash tables for this
** transaction, we must make sure it can never be replayed
**
** we only need to update the journal header block for the last list
** being flushed
*/
- if (flushall) {
- update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
+ if (!err && flushall) {
+ err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id) ;
+ if (err)
+ reiserfs_abort (s, -EIO, "Write error while updating journal header in %s", __FUNCTION__);
}
remove_all_from_journal_list(s, jl, 0) ;
- list_del(&jl->j_list);
- SB_JOURNAL(s)->j_num_lists--;
+ list_del_init(&jl->j_list);
+ journal->j_num_lists--;
del_from_work_list(s, jl);
- if (SB_JOURNAL(s)->j_last_flush_id != 0 &&
- (jl->j_trans_id - SB_JOURNAL(s)->j_last_flush_id) != 1) {
+ if (journal->j_last_flush_id != 0 &&
+ (jl->j_trans_id - journal->j_last_flush_id) != 1) {
reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
- SB_JOURNAL(s)->j_last_flush_id,
+ journal->j_last_flush_id,
jl->j_trans_id);
}
- SB_JOURNAL(s)->j_last_flush_id = jl->j_trans_id;
+ journal->j_last_flush_id = jl->j_trans_id;
/* not strictly required since we are freeing the list, but it should
* help find code using dead lists later on
jl->j_state = 0;
put_journal_list(s, jl);
if (flushall)
- up(&SB_JOURNAL(s)->j_flush_sem);
- return 0 ;
+ up(&journal->j_flush_sem);
+ return err ;
}
static int write_one_transaction(struct super_block *s,
lock_buffer(tmp_bh);
if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) {
if (!buffer_journal_dirty(tmp_bh) ||
- reiserfs_buffer_prepared(tmp_bh))
+ buffer_journal_prepared(tmp_bh))
BUG();
add_to_chunk(chunk, tmp_bh, NULL, write_chunk);
ret++;
pjl = find_newer_jl_for_cn(cn) ;
if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh))
{
- if (!can_dirty(cn))
- BUG();
+ BUG_ON (!can_dirty(cn));
/* if the buffer is prepared, it will either be logged
* or restored. If restored, we need to make sure
* it actually gets marked dirty
*/
- mark_buffer_notjournal_new(cn->bh) ;
- if (test_bit(BH_JPrepared, &cn->bh->b_state)) {
- set_bit(BH_JRestore_dirty, &cn->bh->b_state);
+ clear_buffer_journal_new (cn->bh);
+ if (buffer_journal_prepared (cn->bh)) {
+ set_buffer_journal_restore_dirty (cn->bh);
} else {
- set_bit(BH_JTest, &cn->bh->b_state);
+ set_buffer_journal_test (cn->bh);
mark_buffer_dirty(cn->bh);
}
}
unsigned long orig_trans_id = jl->j_trans_id;
struct buffer_chunk chunk;
struct list_head *entry;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
chunk.nr = 0;
- down(&SB_JOURNAL(s)->j_flush_sem);
+ down(&journal->j_flush_sem);
if (!journal_list_still_alive(s, orig_trans_id)) {
goto done;
}
(!num_trans && written < num_blocks)) {
if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) ||
- atomic_read(&jl->j_commit_left))
+ atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY))
{
del_from_work_list(s, jl);
break;
entry = jl->j_list.next;
/* did we wrap? */
- if (entry == &SB_JOURNAL(s)->j_journal_list) {
+ if (entry == &journal->j_journal_list) {
break;
}
jl = JOURNAL_LIST_ENTRY(entry);
}
done:
- up(&SB_JOURNAL(s)->j_flush_sem);
+ up(&journal->j_flush_sem);
return ret;
}
struct reiserfs_journal_list *tjl;
struct reiserfs_journal_list *flush_jl;
unsigned long trans_id;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
flush_jl = tjl = jl;
}
len += cur_len;
flush_jl = tjl;
- if (tjl->j_list.next == &SB_JOURNAL(s)->j_journal_list)
+ if (tjl->j_list.next == &journal->j_journal_list)
break;
tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next);
}
}
static void free_journal_ram(struct super_block *p_s_sb) {
- reiserfs_kfree(SB_JOURNAL(p_s_sb)->j_current_jl,
+ struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+ reiserfs_kfree(journal->j_current_jl,
sizeof(struct reiserfs_journal_list), p_s_sb);
- SB_JOURNAL(p_s_sb)->j_num_lists--;
+ journal->j_num_lists--;
- vfree(SB_JOURNAL(p_s_sb)->j_cnode_free_orig) ;
- free_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap) ;
+ vfree(journal->j_cnode_free_orig) ;
+ free_list_bitmaps(p_s_sb, journal->j_list_bitmap) ;
free_bitmap_nodes(p_s_sb) ; /* must be after free_list_bitmaps */
- if (SB_JOURNAL(p_s_sb)->j_header_bh) {
- brelse(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ if (journal->j_header_bh) {
+ brelse(journal->j_header_bh) ;
}
/* j_header_bh is on the journal dev, make sure not to release the journal
* dev until we brelse j_header_bh
*/
- release_journal_dev(p_s_sb, SB_JOURNAL(p_s_sb));
- vfree(SB_JOURNAL(p_s_sb)) ;
+ release_journal_dev(p_s_sb, journal);
+ vfree(journal) ;
}
/*
*/
static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, int error) {
struct reiserfs_transaction_handle myth ;
+ int flushed = 0;
+ struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
/* we only want to flush out transactions if we were called with error == 0
*/
if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
/* end the current trans */
+ BUG_ON (!th->t_trans_id);
do_journal_end(th, p_s_sb,10, FLUSH_ALL) ;
/* make sure something gets logged to force our way into the flush code */
- journal_join(&myth, p_s_sb, 1) ;
- reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
- journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
- do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
+ if (!journal_join(&myth, p_s_sb, 1)) {
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&myth, p_s_sb,1, FLUSH_ALL) ;
+ flushed = 1;
+ }
+ }
+
+ /* this also catches errors during the do_journal_end above */
+ if (!error && reiserfs_is_journal_aborted(journal)) {
+ memset(&myth, 0, sizeof(myth));
+ if (!journal_join_abort(&myth, p_s_sb, 1)) {
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&myth, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+ do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL) ;
+ }
}
reiserfs_mounted_fs_count-- ;
struct reiserfs_journal_commit *commit) {
if (get_commit_trans_id (commit) != get_desc_trans_id (desc) ||
get_commit_trans_len (commit) != get_desc_trans_len (desc) ||
- get_commit_trans_len (commit) > SB_JOURNAL_TRANS_MAX(p_s_sb) ||
+ get_commit_trans_len (commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
get_commit_trans_len (commit) <= 0
) {
return 1 ;
*newest_mount_id) ;
return -1 ;
}
- if ( get_desc_trans_len(desc) > SB_JOURNAL_TRANS_MAX(p_s_sb) ) {
+ if ( get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max ) {
reiserfs_warning(p_s_sb, "journal-2018: Bad transaction length %d encountered, ignoring transaction", get_desc_trans_len(desc));
return -1 ;
}
*/
static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cur_dblock, unsigned long oldest_start,
unsigned long oldest_trans_id, unsigned long newest_mount_id) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_desc *desc ;
struct reiserfs_journal_commit *commit ;
unsigned long trans_id = 0 ;
cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)) ;
/* init starting values for the first transaction, in case this is the last transaction to be replayed. */
- SB_JOURNAL(p_s_sb)->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
- SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = trans_id ;
- SB_JOURNAL(p_s_sb)->j_trans_id = trans_id + 1;
+ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
+ journal->j_last_flush_trans_id = trans_id ;
+ journal->j_trans_id = trans_id + 1;
brelse(c_bh) ;
brelse(d_bh) ;
reiserfs_kfree(log_blocks, le32_to_cpu(desc->j_len) * sizeof(struct buffer_head *), p_s_sb) ;
Right now it is only used from journal code. But later we might use it
from other places.
Note: Do not use journal_getblk/sb_getblk functions here! */
-struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
+static struct buffer_head * reiserfs_breada (struct block_device *dev, int block, int bufsize,
unsigned int max_block)
{
struct buffer_head * bhlist[BUFNR];
** On exit, it sets things up so the first transaction will work correctly.
*/
static int journal_read(struct super_block *p_s_sb) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_desc *desc ;
unsigned long oldest_trans_id = 0;
unsigned long oldest_invalid_trans_id = 0 ;
cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) ;
reiserfs_info (p_s_sb, "checking transaction log (%s)\n",
- bdevname(SB_JOURNAL(p_s_sb)->j_dev_bd, b));
+ bdevname(journal->j_dev_bd, b));
start = get_seconds();
/* step 1, read in the journal header block. Check the transaction it says
** is the first unflushed, and if that transaction is not valid,
** replay is done
*/
- SB_JOURNAL(p_s_sb)->j_header_bh = journal_bread(p_s_sb,
+ journal->j_header_bh = journal_bread(p_s_sb,
SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
SB_ONDISK_JOURNAL_SIZE(p_s_sb));
- if (!SB_JOURNAL(p_s_sb)->j_header_bh) {
+ if (!journal->j_header_bh) {
return 1 ;
}
- jh = (struct reiserfs_journal_header *)(SB_JOURNAL(p_s_sb)->j_header_bh->b_data) ;
+ jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data) ;
if (le32_to_cpu(jh->j_first_unflushed_offset) >= 0 &&
le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(p_s_sb) &&
le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
while(continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
/* Note that it is required for blocksize of primary fs device and journal
device to be the same */
- d_bh = reiserfs_breada(SB_JOURNAL(p_s_sb)->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
+ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, p_s_sb->s_blocksize,
SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_ONDISK_JOURNAL_SIZE(p_s_sb)) ;
ret = journal_transaction_is_valid(p_s_sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id) ;
if (ret == 1) {
} else if (ret != 0) {
break ;
}
- cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start ;
+ cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start ;
replay_count++ ;
if (cur_dblock == oldest_start)
break;
** copy the trans_id from the header
*/
if (valid_journal_header && replay_count == 0) {
- SB_JOURNAL(p_s_sb)->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
- SB_JOURNAL(p_s_sb)->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
- SB_JOURNAL(p_s_sb)->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
- SB_JOURNAL(p_s_sb)->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
+ journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset) ;
+ journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
+ journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) ;
+ journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1;
} else {
- SB_JOURNAL(p_s_sb)->j_mount_id = newest_mount_id + 1 ;
+ journal->j_mount_id = newest_mount_id + 1 ;
}
reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
- "newest_mount_id to %lu", SB_JOURNAL(p_s_sb)->j_mount_id) ;
- SB_JOURNAL(p_s_sb)->j_first_unflushed_offset = SB_JOURNAL(p_s_sb)->j_start ;
+ "newest_mount_id to %lu", journal->j_mount_id) ;
+ journal->j_first_unflushed_offset = journal->j_start ;
if (replay_count > 0) {
reiserfs_info (p_s_sb, "replayed %d transactions in %lu seconds\n",
replay_count, get_seconds() - start) ;
}
if (!bdev_read_only(p_s_sb->s_bdev) &&
- _update_journal_header_block(p_s_sb, SB_JOURNAL(p_s_sb)->j_start,
- SB_JOURNAL(p_s_sb)->j_last_flush_trans_id))
+ _update_journal_header_block(p_s_sb, journal->j_start,
+ journal->j_last_flush_trans_id))
{
/* replay failed, caller must call free_journal_ram and abort
** the mount
return 1 ;
}
memset(journal, 0, sizeof(struct reiserfs_journal)) ;
- INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
- INIT_LIST_HEAD (&SB_JOURNAL(p_s_sb)->j_prealloc_list);
- INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_working_list);
- INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_journal_list);
+ INIT_LIST_HEAD(&journal->j_bitmap_nodes) ;
+ INIT_LIST_HEAD (&journal->j_prealloc_list);
+ INIT_LIST_HEAD(&journal->j_working_list);
+ INIT_LIST_HEAD(&journal->j_journal_list);
+ journal->j_persistent_trans = 0;
if (reiserfs_allocate_list_bitmaps(p_s_sb,
- SB_JOURNAL(p_s_sb)->j_list_bitmap,
+ journal->j_list_bitmap,
SB_BMAP_NR(p_s_sb)))
goto free_and_return ;
allocate_bitmap_nodes(p_s_sb) ;
"(device %s) does not match to magic found in super "
"block %x",
jh->jh_journal.jp_journal_magic,
- bdevname( SB_JOURNAL(p_s_sb)->j_dev_bd, b),
+ bdevname( journal->j_dev_bd, b),
sb_jp_journal_magic(rs));
brelse (bhjh);
goto free_and_return;
}
- SB_JOURNAL_TRANS_MAX(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
- SB_JOURNAL_MAX_BATCH(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
- SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
- SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = JOURNAL_MAX_TRANS_AGE;
+ journal->j_trans_max = le32_to_cpu (jh->jh_journal.jp_journal_trans_max);
+ journal->j_max_batch = le32_to_cpu (jh->jh_journal.jp_journal_max_batch);
+ journal->j_max_commit_age = le32_to_cpu (jh->jh_journal.jp_journal_max_commit_age);
+ journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
- if (SB_JOURNAL_TRANS_MAX(p_s_sb)) {
+ if (journal->j_trans_max) {
/* make sure these parameters are available, assign it if they are not */
- __u32 initial = SB_JOURNAL_TRANS_MAX(p_s_sb);
+ __u32 initial = journal->j_trans_max;
__u32 ratio = 1;
if (p_s_sb->s_blocksize < 4096)
ratio = 4096 / p_s_sb->s_blocksize;
- if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_MIN_RATIO)
- SB_JOURNAL_TRANS_MAX(p_s_sb) = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
- if (SB_JOURNAL_TRANS_MAX(p_s_sb) > JOURNAL_TRANS_MAX_DEFAULT / ratio)
- SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT / ratio;
- if (SB_JOURNAL_TRANS_MAX(p_s_sb) < JOURNAL_TRANS_MIN_DEFAULT / ratio)
- SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MIN_DEFAULT / ratio;
+ if (SB_ONDISK_JOURNAL_SIZE(p_s_sb)/journal->j_trans_max < JOURNAL_MIN_RATIO)
+ journal->j_trans_max = SB_ONDISK_JOURNAL_SIZE(p_s_sb) / JOURNAL_MIN_RATIO;
+ if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio)
+ journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT / ratio;
+ if (journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio)
+ journal->j_trans_max = JOURNAL_TRANS_MIN_DEFAULT / ratio;
- if (SB_JOURNAL_TRANS_MAX(p_s_sb) != initial)
+ if (journal->j_trans_max != initial)
reiserfs_warning (p_s_sb, "sh-461: journal_init: wrong transaction max size (%u). Changed to %u",
- initial, SB_JOURNAL_TRANS_MAX(p_s_sb));
+ initial, journal->j_trans_max);
- SB_JOURNAL_MAX_BATCH(p_s_sb) = SB_JOURNAL_TRANS_MAX(p_s_sb)*
+ journal->j_max_batch = journal->j_trans_max*
JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT;
}
- if (!SB_JOURNAL_TRANS_MAX(p_s_sb)) {
+ if (!journal->j_trans_max) {
/*we have the file system was created by old version of mkreiserfs
so this field contains zero value */
- SB_JOURNAL_TRANS_MAX(p_s_sb) = JOURNAL_TRANS_MAX_DEFAULT ;
- SB_JOURNAL_MAX_BATCH(p_s_sb) = JOURNAL_MAX_BATCH_DEFAULT ;
- SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = JOURNAL_MAX_COMMIT_AGE ;
+ journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT ;
+ journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT ;
+ journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE ;
/* for blocksize >= 4096 - max transaction size is 1024. For block size < 4096
trans max size is decreased proportionally */
if (p_s_sb->s_blocksize < 4096) {
- SB_JOURNAL_TRANS_MAX(p_s_sb) /= (4096 / p_s_sb->s_blocksize) ;
- SB_JOURNAL_MAX_BATCH(p_s_sb) = (SB_JOURNAL_TRANS_MAX(p_s_sb)) * 9 / 10 ;
+ journal->j_trans_max /= (4096 / p_s_sb->s_blocksize) ;
+ journal->j_max_batch = (journal->j_trans_max) * 9 / 10 ;
}
}
- SB_JOURNAL_DEFAULT_MAX_COMMIT_AGE(p_s_sb) = SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb);
+ journal->j_default_max_commit_age = journal->j_max_commit_age;
if (commit_max_age != 0) {
- SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb) = commit_max_age;
- SB_JOURNAL_MAX_TRANS_AGE(p_s_sb) = commit_max_age;
+ journal->j_max_commit_age = commit_max_age;
+ journal->j_max_trans_age = commit_max_age;
}
reiserfs_info (p_s_sb, "journal params: device %s, size %u, "
"journal first block %u, max trans len %u, max batch %u, "
"max commit age %u, max trans age %u\n",
- bdevname( SB_JOURNAL(p_s_sb)->j_dev_bd, b),
+ bdevname( journal->j_dev_bd, b),
SB_ONDISK_JOURNAL_SIZE(p_s_sb),
SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
- SB_JOURNAL_TRANS_MAX(p_s_sb),
- SB_JOURNAL_MAX_BATCH(p_s_sb),
- SB_JOURNAL_MAX_COMMIT_AGE(p_s_sb),
- SB_JOURNAL_MAX_TRANS_AGE(p_s_sb));
+ journal->j_trans_max,
+ journal->j_max_batch,
+ journal->j_max_commit_age,
+ journal->j_max_trans_age);
brelse (bhjh);
- SB_JOURNAL(p_s_sb)->j_list_bitmap_index = 0 ;
+ journal->j_list_bitmap_index = 0 ;
journal_list_init(p_s_sb) ;
- memset(SB_JOURNAL(p_s_sb)->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
-
- INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_dirty_buffers) ;
- spin_lock_init(&SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock) ;
-
- SB_JOURNAL(p_s_sb)->j_start = 0 ;
- SB_JOURNAL(p_s_sb)->j_len = 0 ;
- SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_async_throttle), 0) ;
- SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
- SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
- SB_JOURNAL(p_s_sb)->j_last = NULL ;
- SB_JOURNAL(p_s_sb)->j_first = NULL ;
- init_waitqueue_head(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
- sema_init(&SB_JOURNAL(p_s_sb)->j_lock, 1);
- sema_init(&SB_JOURNAL(p_s_sb)->j_flush_sem, 1);
-
- SB_JOURNAL(p_s_sb)->j_trans_id = 10 ;
- SB_JOURNAL(p_s_sb)->j_mount_id = 10 ;
- SB_JOURNAL(p_s_sb)->j_state = 0 ;
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
- SB_JOURNAL(p_s_sb)->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
- SB_JOURNAL(p_s_sb)->j_cnode_free_orig = SB_JOURNAL(p_s_sb)->j_cnode_free_list ;
- SB_JOURNAL(p_s_sb)->j_cnode_free = SB_JOURNAL(p_s_sb)->j_cnode_free_list ? num_cnodes : 0 ;
- SB_JOURNAL(p_s_sb)->j_cnode_used = 0 ;
- SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
+ memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)) ;
+
+ INIT_LIST_HEAD(&journal->j_dirty_buffers) ;
+ spin_lock_init(&journal->j_dirty_buffers_lock) ;
+
+ journal->j_start = 0 ;
+ journal->j_len = 0 ;
+ journal->j_len_alloc = 0 ;
+ atomic_set(&(journal->j_wcount), 0) ;
+ atomic_set(&(journal->j_async_throttle), 0) ;
+ journal->j_bcount = 0 ;
+ journal->j_trans_start_time = 0 ;
+ journal->j_last = NULL ;
+ journal->j_first = NULL ;
+ init_waitqueue_head(&(journal->j_join_wait)) ;
+ sema_init(&journal->j_lock, 1);
+ sema_init(&journal->j_flush_sem, 1);
+
+ journal->j_trans_id = 10 ;
+ journal->j_mount_id = 10 ;
+ journal->j_state = 0 ;
+ atomic_set(&(journal->j_jlock), 0) ;
+ journal->j_cnode_free_list = allocate_cnodes(num_cnodes) ;
+ journal->j_cnode_free_orig = journal->j_cnode_free_list ;
+ journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0 ;
+ journal->j_cnode_used = 0 ;
+ journal->j_must_wait = 0 ;
init_journal_hash(p_s_sb) ;
- jl = SB_JOURNAL(p_s_sb)->j_current_jl;
+ jl = journal->j_current_jl;
jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
if (!jl->j_list_bitmap) {
reiserfs_warning(p_s_sb, "journal-2005, get_list_bitmap failed for journal list 0") ;
** transaction
*/
int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) {
+ struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
time_t now = get_seconds() ;
/* cannot restart while nested */
+ BUG_ON (!th->t_trans_id);
if (th->t_refcount > 1)
return 0 ;
- if ( SB_JOURNAL(th->t_super)->j_must_wait > 0 ||
- (SB_JOURNAL(th->t_super)->j_len_alloc + new_alloc) >= SB_JOURNAL_MAX_BATCH(th->t_super) ||
- atomic_read(&(SB_JOURNAL(th->t_super)->j_jlock)) ||
- (now - SB_JOURNAL(th->t_super)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(th->t_super) ||
- SB_JOURNAL(th->t_super)->j_cnode_free < (SB_JOURNAL_TRANS_MAX(th->t_super) * 3)) {
+ if ( journal->j_must_wait > 0 ||
+ (journal->j_len_alloc + new_alloc) >= journal->j_max_batch ||
+ atomic_read(&(journal->j_jlock)) ||
+ (now - journal->j_trans_start_time) > journal->j_max_trans_age ||
+ journal->j_cnode_free < (journal->j_trans_max * 3)) {
return 1 ;
}
return 0 ;
** kernel_lock to be held
*/
void reiserfs_block_writes(struct reiserfs_transaction_handle *th) {
- struct super_block *s = th->t_super ;
- SB_JOURNAL(s)->j_must_wait = 1 ;
- set_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
+ struct reiserfs_journal *journal = SB_JOURNAL (th->t_super);
+ BUG_ON (!th->t_trans_id);
+ journal->j_must_wait = 1 ;
+ set_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
return ;
}
** require BKL
*/
void reiserfs_allow_writes(struct super_block *s) {
- clear_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state) ;
- wake_up(&SB_JOURNAL(s)->j_join_wait) ;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ clear_bit(J_WRITERS_BLOCKED, &journal->j_state) ;
+ wake_up(&journal->j_join_wait) ;
}
/* this must be called without a transaction started, and does not
** require BKL
*/
void reiserfs_wait_on_write_block(struct super_block *s) {
- wait_event(SB_JOURNAL(s)->j_join_wait,
- !test_bit(WRITERS_BLOCKED, &SB_JOURNAL(s)->j_state)) ;
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ wait_event(journal->j_join_wait,
+ !test_bit(J_WRITERS_BLOCKED, &journal->j_state)) ;
}
static void queue_log_writer(struct super_block *s) {
wait_queue_t wait;
- set_bit(WRITERS_QUEUED, &SB_JOURNAL(s)->j_state);
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ set_bit(J_WRITERS_QUEUED, &journal->j_state);
/*
* we don't want to use wait_event here because
* we only want to wait once.
*/
init_waitqueue_entry(&wait, current);
- add_wait_queue(&SB_JOURNAL(s)->j_join_wait, &wait);
+ add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (test_bit(WRITERS_QUEUED, &SB_JOURNAL(s)->j_state))
+ if (test_bit(J_WRITERS_QUEUED, &journal->j_state))
schedule();
current->state = TASK_RUNNING;
- remove_wait_queue(&SB_JOURNAL(s)->j_join_wait, &wait);
+ remove_wait_queue(&journal->j_join_wait, &wait);
}
static void wake_queued_writers(struct super_block *s) {
- if (test_and_clear_bit(WRITERS_QUEUED, &SB_JOURNAL(s)->j_state))
- wake_up(&SB_JOURNAL(s)->j_join_wait);
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
+ if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state))
+ wake_up(&journal->j_join_wait);
}
static void let_transaction_grow(struct super_block *sb,
unsigned long trans_id)
{
- unsigned long bcount = SB_JOURNAL(sb)->j_bcount;
+ struct reiserfs_journal *journal = SB_JOURNAL (sb);
+ unsigned long bcount = journal->j_bcount;
while(1) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(1);
- SB_JOURNAL(sb)->j_current_jl->j_state |= LIST_COMMIT_PENDING;
- while ((atomic_read(&SB_JOURNAL(sb)->j_wcount) > 0 ||
- atomic_read(&SB_JOURNAL(sb)->j_jlock)) &&
- SB_JOURNAL(sb)->j_trans_id == trans_id) {
+ journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
+ while ((atomic_read(&journal->j_wcount) > 0 ||
+ atomic_read(&journal->j_jlock)) &&
+ journal->j_trans_id == trans_id) {
queue_log_writer(sb);
}
- if (SB_JOURNAL(sb)->j_trans_id != trans_id)
+ if (journal->j_trans_id != trans_id)
break;
- if (bcount == SB_JOURNAL(sb)->j_bcount)
+ if (bcount == journal->j_bcount)
break;
- bcount = SB_JOURNAL(sb)->j_bcount;
+ bcount = journal->j_bcount;
}
}
struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
struct reiserfs_transaction_handle myth;
int sched_count = 0;
+ int retval;
reiserfs_check_lock_depth(p_s_sb, "journal_begin") ;
- RFALSE( p_s_sb->s_flags & MS_RDONLY,
- "clm-2078: calling journal_begin on readonly FS") ;
PROC_INFO_INC( p_s_sb, journal.journal_being );
/* set here for journal_join */
relock:
lock_journal(p_s_sb) ;
+ if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted (journal)) {
+ unlock_journal (p_s_sb);
+ retval = journal->j_errno;
+ goto out_fail;
+ }
journal->j_bcount++;
- if (test_bit(WRITERS_BLOCKED, &journal->j_state)) {
+ if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(p_s_sb) ;
reiserfs_wait_on_write_block(p_s_sb) ;
PROC_INFO_INC( p_s_sb, journal.journal_relock_writers );
*/
if ( (!join && journal->j_must_wait > 0) ||
- ( !join && (journal->j_len_alloc + nblocks + 2) >= SB_JOURNAL_MAX_BATCH(p_s_sb)) ||
+ ( !join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) ||
(!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 &&
- (now - journal->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) ||
+ (now - journal->j_trans_start_time) > journal->j_max_trans_age) ||
(!join && atomic_read(&journal->j_jlock)) ||
- (!join && journal->j_cnode_free < (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3))) {
+ (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
old_trans_id = journal->j_trans_id;
unlock_journal(p_s_sb) ; /* allow others to finish this transaction */
if (!join && (journal->j_len_alloc + nblocks + 2) >=
- SB_JOURNAL_MAX_BATCH(p_s_sb) &&
+ journal->j_max_batch &&
((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75))
{
if (atomic_read(&journal->j_wcount) > 10) {
}
goto relock;
}
- journal_join(&myth, p_s_sb, 1) ;
+ retval = journal_join(&myth, p_s_sb, 1) ;
+ if (retval)
+ goto out_fail;
/* someone might have ended the transaction while we joined */
- if (old_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
- do_journal_end(&myth, p_s_sb, 1, 0) ;
+ if (old_trans_id != journal->j_trans_id) {
+ retval = do_journal_end(&myth, p_s_sb, 1, 0) ;
} else {
- do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
+ retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW) ;
}
+ if (retval)
+ goto out_fail;
+
PROC_INFO_INC( p_s_sb, journal.journal_relock_wcount );
goto relock ;
}
th->t_blocks_allocated = nblocks ;
th->t_trans_id = journal->j_trans_id ;
unlock_journal(p_s_sb) ;
+ INIT_LIST_HEAD (&th->t_list);
return 0 ;
+
+out_fail:
+ memset (th, 0, sizeof (*th));
+ /* Re-set th->t_super, so we can properly keep track of how many
+ * persistent transactions there are. We need to do this so if this
+ * call is part of a failed restart_transaction, we can free it later */
+ th->t_super = p_s_sb;
+ return retval;
}
struct reiserfs_transaction_handle *
reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
return NULL;
}
+
+ SB_JOURNAL(s)->j_persistent_trans++;
return th ;
}
int
reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) {
struct super_block *s = th->t_super;
- int ret;
- ret = journal_end(th, th->t_super, th->t_blocks_allocated);
- if (th->t_refcount == 0)
+ int ret = 0;
+ if (th->t_trans_id)
+ ret = journal_end(th, th->t_super, th->t_blocks_allocated);
+ else
+ ret = -EIO;
+ if (th->t_refcount == 0) {
+ SB_JOURNAL(s)->j_persistent_trans--;
reiserfs_kfree(th, sizeof(struct reiserfs_transaction_handle), s) ;
+ }
return ret;
}
if (cur_th && cur_th->t_refcount > 1) {
BUG() ;
}
- return do_journal_begin_r(th, p_s_sb, nblocks, 1) ;
+ return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN) ;
+}
+
+int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+ struct reiserfs_transaction_handle *cur_th = current->journal_info;
+
+ /* this keeps do_journal_end from NULLing out the current->journal_info
+ ** pointer
+ */
+ th->t_handle_save = cur_th ;
+ if (cur_th && cur_th->t_refcount > 1) {
+ BUG() ;
+ }
+ return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT) ;
}
int journal_begin(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks) {
if (cur_th) {
/* we are nesting into the current transaction */
if (cur_th->t_super == p_s_sb) {
+ BUG_ON (!cur_th->t_refcount);
cur_th->t_refcount++ ;
memcpy(th, cur_th, sizeof(*th));
if (th->t_refcount <= 1)
} else {
current->journal_info = th;
}
- ret = do_journal_begin_r(th, p_s_sb, nblocks, 0) ;
+ ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG) ;
if (current->journal_info != th)
BUG() ;
+
+ /* I guess this boils down to being the reciprocal of clm-2100 above.
+ * If do_journal_begin_r fails, we need to put it back, since journal_end
+ * won't be called to do it. */
+ if (ret)
+ current->journal_info = th->t_handle_save;
+ else
+ BUG_ON (!th->t_refcount);
+
return ret ;
}
** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
*/
int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, struct buffer_head *bh) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_cnode *cn = NULL;
int count_already_incd = 0 ;
int prepared = 0 ;
+ BUG_ON (!th->t_trans_id);
PROC_INFO_INC( p_s_sb, journal.mark_dirty );
- if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
+ if (th->t_trans_id != journal->j_trans_id) {
reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
- th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
+ th->t_trans_id, journal->j_trans_id);
}
+
p_s_sb->s_dirt = 1;
- prepared = test_and_clear_bit(BH_JPrepared, &bh->b_state) ;
- clear_bit(BH_JRestore_dirty, &bh->b_state);
+ prepared = test_clear_buffer_journal_prepared (bh);
+ clear_buffer_journal_restore_dirty (bh);
/* already in this transaction, we are done */
if (buffer_journaled(bh)) {
PROC_INFO_INC( p_s_sb, journal.mark_dirty_already );
buffer_journal_dirty(bh) ? ' ' : '!') ;
}
- if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0) {
- reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount))) ;
+ if (atomic_read(&(journal->j_wcount)) <= 0) {
+ reiserfs_warning (p_s_sb, "journal-1409: journal_mark_dirty returning because j_wcount was %d", atomic_read(&(journal->j_wcount))) ;
return 1 ;
}
/* this error means I've screwed up, and we've overflowed the transaction.
** Nothing can be done here, except make the FS readonly or panic.
*/
- if (SB_JOURNAL(p_s_sb)->j_len >= SB_JOURNAL_TRANS_MAX(p_s_sb)) {
- reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", SB_JOURNAL(p_s_sb)->j_len) ;
+ if (journal->j_len >= journal->j_trans_max) {
+ reiserfs_panic(th->t_super, "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n", journal->j_len) ;
}
if (buffer_journal_dirty(bh)) {
count_already_incd = 1 ;
PROC_INFO_INC( p_s_sb, journal.mark_dirty_notjournal );
- mark_buffer_notjournal_dirty(bh) ;
+ clear_buffer_journal_dirty (bh);
}
- if (SB_JOURNAL(p_s_sb)->j_len > SB_JOURNAL(p_s_sb)->j_len_alloc) {
- SB_JOURNAL(p_s_sb)->j_len_alloc = SB_JOURNAL(p_s_sb)->j_len + JOURNAL_PER_BALANCE_CNT ;
+ if (journal->j_len > journal->j_len_alloc) {
+ journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT ;
}
- set_bit(BH_JDirty, &bh->b_state) ;
+ set_buffer_journaled (bh);
/* now put this guy on the end */
if (!cn) {
if (th->t_blocks_logged == th->t_blocks_allocated) {
th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT ;
- SB_JOURNAL(p_s_sb)->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
+ journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT ;
}
th->t_blocks_logged++ ;
- SB_JOURNAL(p_s_sb)->j_len++ ;
+ journal->j_len++ ;
cn->bh = bh ;
cn->blocknr = bh->b_blocknr ;
cn->sb = p_s_sb;
cn->jlist = NULL ;
- insert_journal_hash(SB_JOURNAL(p_s_sb)->j_hash_table, cn) ;
+ insert_journal_hash(journal->j_hash_table, cn) ;
if (!count_already_incd) {
get_bh(bh) ;
}
}
cn->next = NULL ;
- cn->prev = SB_JOURNAL(p_s_sb)->j_last ;
+ cn->prev = journal->j_last ;
cn->bh = bh ;
- if (SB_JOURNAL(p_s_sb)->j_last) {
- SB_JOURNAL(p_s_sb)->j_last->next = cn ;
- SB_JOURNAL(p_s_sb)->j_last = cn ;
+ if (journal->j_last) {
+ journal->j_last->next = cn ;
+ journal->j_last = cn ;
} else {
- SB_JOURNAL(p_s_sb)->j_first = cn ;
- SB_JOURNAL(p_s_sb)->j_last = cn ;
+ journal->j_first = cn ;
+ journal->j_last = cn ;
}
return 0 ;
}
reiserfs_warning (p_s_sb, "REISER-NESTING: th NULL, refcount %d",
th->t_refcount);
+ if (!th->t_trans_id) {
+ WARN_ON (1);
+ return -EIO;
+ }
+
th->t_refcount--;
if (th->t_refcount > 0) {
struct reiserfs_transaction_handle *cur_th = current->journal_info ;
static int remove_from_transaction(struct super_block *p_s_sb, b_blocknr_t blocknr, int already_cleaned) {
struct buffer_head *bh ;
struct reiserfs_journal_cnode *cn ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
int ret = 0;
- cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, blocknr) ;
+ cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr) ;
if (!cn || !cn->bh) {
return ret ;
}
if (cn->next) {
cn->next->prev = cn->prev ;
}
- if (cn == SB_JOURNAL(p_s_sb)->j_first) {
- SB_JOURNAL(p_s_sb)->j_first = cn->next ;
+ if (cn == journal->j_first) {
+ journal->j_first = cn->next ;
}
- if (cn == SB_JOURNAL(p_s_sb)->j_last) {
- SB_JOURNAL(p_s_sb)->j_last = cn->prev ;
+ if (cn == journal->j_last) {
+ journal->j_last = cn->prev ;
}
if (bh)
- remove_journal_hash(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, NULL, bh->b_blocknr, 0) ;
- mark_buffer_not_journaled(bh) ; /* don't log this one */
+ remove_journal_hash(p_s_sb, journal->j_hash_table, NULL, bh->b_blocknr, 0) ;
+ clear_buffer_journaled (bh); /* don't log this one */
if (!already_cleaned) {
- mark_buffer_notjournal_dirty(bh) ;
+ clear_buffer_journal_dirty (bh);
put_bh(bh) ;
if (atomic_read(&(bh->b_count)) < 0) {
reiserfs_warning (p_s_sb, "journal-1752: remove from trans, b_count < 0");
}
ret = 1 ;
}
- SB_JOURNAL(p_s_sb)->j_len-- ;
- SB_JOURNAL(p_s_sb)->j_len_alloc-- ;
+ journal->j_len-- ;
+ journal->j_len_alloc-- ;
free_cnode(p_s_sb, cn) ;
return ret ;
}
** will wait until the current transaction is done/commited before returning
*/
int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, unsigned long nblocks) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
+ BUG_ON (!th->t_trans_id);
/* you can sync while nested, very, very bad */
if (th->t_refcount > 1) {
BUG() ;
}
- if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ if (journal->j_len == 0) {
reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
}
*/
static void flush_async_commits(void *p) {
struct super_block *p_s_sb = p;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_list *jl;
struct list_head *entry;
lock_kernel();
- if (!list_empty(&SB_JOURNAL(p_s_sb)->j_journal_list)) {
+ if (!list_empty(&journal->j_journal_list)) {
/* last entry is the youngest, commit it and you get everything */
- entry = SB_JOURNAL(p_s_sb)->j_journal_list.prev;
+ entry = journal->j_journal_list.prev;
jl = JOURNAL_LIST_ENTRY(entry);
flush_commit_list(p_s_sb, jl, 1);
}
* this is a little racey, but there's no harm in missing
* the filemap_fdata_write
*/
- if (!atomic_read(&SB_JOURNAL(p_s_sb)->j_async_throttle)) {
- atomic_inc(&SB_JOURNAL(p_s_sb)->j_async_throttle);
+ if (!atomic_read(&journal->j_async_throttle) && !reiserfs_is_journal_aborted (journal)) {
+ atomic_inc(&journal->j_async_throttle);
filemap_fdatawrite(p_s_sb->s_bdev->bd_inode->i_mapping);
- atomic_dec(&SB_JOURNAL(p_s_sb)->j_async_throttle);
+ atomic_dec(&journal->j_async_throttle);
}
}
int reiserfs_flush_old_commits(struct super_block *p_s_sb) {
time_t now ;
struct reiserfs_transaction_handle th ;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
now = get_seconds();
/* safety check so we don't flush while we are replaying the log during
* mount
*/
- if (list_empty(&SB_JOURNAL(p_s_sb)->j_journal_list)) {
+ if (list_empty(&journal->j_journal_list)) {
return 0 ;
}
/* check the current transaction. If there are no writers, and it is
* too old, finish it, and force the commit blocks to disk
*/
- if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) <= 0 &&
- SB_JOURNAL(p_s_sb)->j_trans_start_time > 0 &&
- SB_JOURNAL(p_s_sb)->j_len > 0 &&
- (now - SB_JOURNAL(p_s_sb)->j_trans_start_time) >
- SB_JOURNAL_MAX_TRANS_AGE(p_s_sb))
+ if (atomic_read(&journal->j_wcount) <= 0 &&
+ journal->j_trans_start_time > 0 &&
+ journal->j_len > 0 &&
+ (now - journal->j_trans_start_time) > journal->j_max_trans_age)
{
- journal_join(&th, p_s_sb, 1) ;
- reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
- journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
-
- /* we're only being called from kreiserfsd, it makes no sense to do
- ** an async commit so that kreiserfsd can do it later
- */
- do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
+ if (!journal_join(&th, p_s_sb, 1)) {
+ reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
+ journal_mark_dirty(&th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
+
+ /* we're only being called from kreiserfsd, it makes no sense to do
+ ** an async commit so that kreiserfsd can do it later
+ */
+ do_journal_end(&th, p_s_sb,1, COMMIT_NOW | WAIT) ;
+ }
}
return p_s_sb->s_dirt;
}
int commit_now = flags & COMMIT_NOW ;
int wait_on_commit = flags & WAIT ;
struct reiserfs_journal_list *jl;
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
- if (th->t_trans_id != SB_JOURNAL(p_s_sb)->j_trans_id) {
+ BUG_ON (!th->t_trans_id);
+
+ if (th->t_trans_id != journal->j_trans_id) {
reiserfs_panic(th->t_super, "journal-1577: handle trans id %ld != current trans id %ld\n",
- th->t_trans_id, SB_JOURNAL(p_s_sb)->j_trans_id);
+ th->t_trans_id, journal->j_trans_id);
}
- SB_JOURNAL(p_s_sb)->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
- if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
- atomic_dec(&(SB_JOURNAL(p_s_sb)->j_wcount)) ;
+ journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged) ;
+ if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */
+ atomic_dec(&(journal->j_wcount)) ;
}
/* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
** will be dealt with by next transaction that actually writes something, but should be taken
** care of in this trans
*/
- if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ if (journal->j_len == 0) {
BUG();
}
/* if wcount > 0, and we are called to with flush or commit_now,
** Then, we flush the commit or journal list, and just return 0
** because the rest of journal end was already done for this transaction.
*/
- if (atomic_read(&(SB_JOURNAL(p_s_sb)->j_wcount)) > 0) {
+ if (atomic_read(&(journal->j_wcount)) > 0) {
if (flush || commit_now) {
unsigned trans_id ;
- jl = SB_JOURNAL(p_s_sb)->j_current_jl;
+ jl = journal->j_current_jl;
trans_id = jl->j_trans_id;
if (wait_on_commit)
jl->j_state |= LIST_COMMIT_PENDING;
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
+ atomic_set(&(journal->j_jlock), 1) ;
if (flush) {
- SB_JOURNAL(p_s_sb)->j_next_full_flush = 1 ;
+ journal->j_next_full_flush = 1 ;
}
unlock_journal(p_s_sb) ;
/* sleep while the current transaction is still j_jlocked */
- while(SB_JOURNAL(p_s_sb)->j_trans_id == trans_id) {
- if (atomic_read(&SB_JOURNAL(p_s_sb)->j_jlock)) {
+ while(journal->j_trans_id == trans_id) {
+ if (atomic_read(&journal->j_jlock)) {
queue_log_writer(p_s_sb);
} else {
lock_journal(p_s_sb);
- if (SB_JOURNAL(p_s_sb)->j_trans_id == trans_id) {
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 1) ;
+ if (journal->j_trans_id == trans_id) {
+ atomic_set(&(journal->j_jlock), 1) ;
}
unlock_journal(p_s_sb);
}
}
- if (SB_JOURNAL(p_s_sb)->j_trans_id == trans_id) {
+ if (journal->j_trans_id == trans_id) {
BUG();
}
if (commit_now && journal_list_still_alive(p_s_sb, trans_id) &&
/* deal with old transactions where we are the last writers */
now = get_seconds();
- if ((now - SB_JOURNAL(p_s_sb)->j_trans_start_time) > SB_JOURNAL_MAX_TRANS_AGE(p_s_sb)) {
+ if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) {
commit_now = 1 ;
- SB_JOURNAL(p_s_sb)->j_next_async_flush = 1 ;
+ journal->j_next_async_flush = 1 ;
}
/* don't batch when someone is waiting on j_join_wait */
/* don't batch when syncing the commit or flushing the whole trans */
- if (!(SB_JOURNAL(p_s_sb)->j_must_wait > 0) && !(atomic_read(&(SB_JOURNAL(p_s_sb)->j_jlock))) && !flush && !commit_now &&
- (SB_JOURNAL(p_s_sb)->j_len < SB_JOURNAL_MAX_BATCH(p_s_sb)) &&
- SB_JOURNAL(p_s_sb)->j_len_alloc < SB_JOURNAL_MAX_BATCH(p_s_sb) && SB_JOURNAL(p_s_sb)->j_cnode_free > (SB_JOURNAL_TRANS_MAX(p_s_sb) * 3)) {
- SB_JOURNAL(p_s_sb)->j_bcount++ ;
+ if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now &&
+ (journal->j_len < journal->j_max_batch) &&
+ journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) {
+ journal->j_bcount++ ;
unlock_journal(p_s_sb) ;
return 0 ;
}
- if (SB_JOURNAL(p_s_sb)->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
- reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", SB_JOURNAL(p_s_sb)->j_start) ;
+ if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
+ reiserfs_panic(p_s_sb, "journal-003: journal_end: j_start (%ld) is too high\n", journal->j_start) ;
}
return 1 ;
}
** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
*/
int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *p_s_sb, b_blocknr_t blocknr) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_cnode *cn = NULL ;
struct buffer_head *bh = NULL ;
struct reiserfs_list_bitmap *jb = NULL ;
int cleaned = 0 ;
+ BUG_ON (!th->t_trans_id);
- cn = get_journal_hash_dev(p_s_sb, SB_JOURNAL(p_s_sb)->j_hash_table, blocknr);
+ cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
if (cn && cn->bh) {
bh = cn->bh ;
get_bh(bh) ;
}
/* if it is journal new, we just remove it from this transaction */
if (bh && buffer_journal_new(bh)) {
- mark_buffer_notjournal_new(bh) ;
+ clear_buffer_journal_new (bh);
clear_prepared_bits(bh) ;
reiserfs_clean_and_file_buffer(bh) ;
cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
} else {
/* set the bit for this block in the journal bitmap for this transaction */
- jb = SB_JOURNAL(p_s_sb)->j_current_jl->j_list_bitmap;
+ jb = journal->j_current_jl->j_list_bitmap;
if (!jb) {
reiserfs_panic(p_s_sb, "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n") ;
}
cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned) ;
/* find all older transactions with this block, make sure they don't try to write it out */
- cn = get_journal_hash_dev(p_s_sb,SB_JOURNAL(p_s_sb)->j_list_hash_table, blocknr) ;
+ cn = get_journal_hash_dev(p_s_sb,journal->j_list_hash_table, blocknr) ;
while (cn) {
if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
set_bit(BLOCK_FREED, &cn->state) ;
/* remove_from_transaction will brelse the buffer if it was
** in the current trans
*/
- mark_buffer_notjournal_dirty(cn->bh) ;
+ clear_buffer_journal_dirty (cn->bh);
cleaned = 1 ;
put_bh(cn->bh) ;
if (atomic_read(&(cn->bh->b_count)) < 0) {
}
void reiserfs_update_inode_transaction(struct inode *inode) {
- REISERFS_I(inode)->i_jl = SB_JOURNAL(inode->i_sb)->j_current_jl;
- REISERFS_I(inode)->i_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
+ struct reiserfs_journal *journal = SB_JOURNAL (inode->i_sb);
+ REISERFS_I(inode)->i_jl = journal->j_current_jl;
+ REISERFS_I(inode)->i_trans_id = journal->j_trans_id ;
}
/*
{
struct reiserfs_transaction_handle th ;
struct super_block *sb = inode->i_sb ;
+ struct reiserfs_journal *journal = SB_JOURNAL (sb);
int ret = 0;
/* is it from the current transaction, or from an unknown transaction? */
- if (id == SB_JOURNAL(sb)->j_trans_id) {
- jl = SB_JOURNAL(sb)->j_current_jl;
+ if (id == journal->j_trans_id) {
+ jl = journal->j_current_jl;
/* try to let other writers come in and grow this transaction */
let_transaction_grow(sb, id);
- if (SB_JOURNAL(sb)->j_trans_id != id) {
+ if (journal->j_trans_id != id) {
goto flush_commit_only;
}
- journal_begin(&th, sb, 1) ;
+ ret = journal_begin(&th, sb, 1) ;
+ if (ret)
+ return ret;
/* someone might have ended this transaction while we joined */
- if (SB_JOURNAL(sb)->j_trans_id != id) {
+ if (journal->j_trans_id != id) {
reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1) ;
journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)) ;
- journal_end(&th, sb, 1) ;
+ ret = journal_end(&th, sb, 1) ;
goto flush_commit_only;
}
- journal_end_sync(&th, sb, 1) ;
- ret = 1;
+ ret = journal_end_sync(&th, sb, 1) ;
+ if (!ret)
+ ret = 1;
} else {
/* this gets tricky, we have to make sure the journal list in
if (atomic_read(&jl->j_commit_left) > 1)
ret = 1;
flush_commit_list(sb, jl, 1) ;
+ if (journal->j_errno)
+ ret = journal->j_errno;
}
}
/* otherwise the list is gone, and long since committed */
void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
struct buffer_head *bh) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
PROC_INFO_INC( p_s_sb, journal.restore_prepared );
if (!bh) {
return ;
}
- if (test_and_clear_bit(BH_JRestore_dirty, &bh->b_state) &&
+ if (test_clear_buffer_journal_restore_dirty (bh) &&
buffer_journal_dirty(bh)) {
struct reiserfs_journal_cnode *cn;
cn = get_journal_hash_dev(p_s_sb,
- SB_JOURNAL(p_s_sb)->j_list_hash_table,
+ journal->j_list_hash_table,
bh->b_blocknr);
if (cn && can_dirty(cn)) {
- set_bit(BH_JTest, &bh->b_state);
+ set_buffer_journal_test (bh);
mark_buffer_dirty(bh);
}
}
- clear_bit(BH_JPrepared, &bh->b_state) ;
+ clear_buffer_journal_prepared (bh);
}
extern struct tree_balance *cur_tb ;
return 0;
lock_buffer(bh);
}
- set_bit(BH_JPrepared, &bh->b_state);
+ set_buffer_journal_prepared (bh);
if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) {
- clear_bit(BH_JTest, &bh->b_state);
- set_bit(BH_JRestore_dirty, &bh->b_state);
+ clear_buffer_journal_test (bh);
+ set_buffer_journal_restore_dirty (bh);
}
unlock_buffer(bh);
return 1;
}
static void flush_old_journal_lists(struct super_block *s) {
+ struct reiserfs_journal *journal = SB_JOURNAL (s);
struct reiserfs_journal_list *jl;
struct list_head *entry;
time_t now = get_seconds();
- while(!list_empty(&SB_JOURNAL(s)->j_journal_list)) {
- entry = SB_JOURNAL(s)->j_journal_list.next;
+ while(!list_empty(&journal->j_journal_list)) {
+ entry = journal->j_journal_list.next;
jl = JOURNAL_LIST_ENTRY(entry);
/* this check should always be run, to send old lists to disk */
if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4))) {
** If no_async, won't return until all commit blocks are on disk.
**
** keep reading, there are comments as you go along
+**
+** If the journal is aborted, we just clean up. Things like flushing
+** journal lists, etc just won't happen.
*/
static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block * p_s_sb, unsigned long nblocks,
int flags) {
+ struct reiserfs_journal *journal = SB_JOURNAL (p_s_sb);
struct reiserfs_journal_cnode *cn, *next, *jl_cn;
struct reiserfs_journal_cnode *last_cn = NULL;
struct reiserfs_journal_desc *desc ;
unsigned long commit_trans_id;
int trans_half;
- if (th->t_refcount > 1)
- BUG() ;
+ BUG_ON (th->t_refcount > 1);
+ BUG_ON (!th->t_trans_id);
current->journal_info = th->t_handle_save;
reiserfs_check_lock_depth(p_s_sb, "journal end");
- if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ if (journal->j_len == 0) {
reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb), 1) ;
journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb)) ;
}
lock_journal(p_s_sb) ;
- if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
+ if (journal->j_next_full_flush) {
flags |= FLUSH_ALL ;
flush = 1 ;
}
- if (SB_JOURNAL(p_s_sb)->j_next_async_flush) {
+ if (journal->j_next_async_flush) {
flags |= COMMIT_NOW | WAIT;
wait_on_commit = 1;
}
}
/* check_journal_end might set these, check again */
- if (SB_JOURNAL(p_s_sb)->j_next_full_flush) {
+ if (journal->j_next_full_flush) {
flush = 1 ;
}
** j must wait means we have to flush the log blocks, and the real blocks for
** this transaction
*/
- if (SB_JOURNAL(p_s_sb)->j_must_wait > 0) {
+ if (journal->j_must_wait > 0) {
flush = 1 ;
}
#endif
/* setup description block */
- d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
+ d_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start) ;
set_buffer_uptodate(d_bh);
desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
memset(d_bh->b_data, 0, d_bh->b_size) ;
memcpy(get_journal_desc_magic (d_bh), JOURNAL_DESC_MAGIC, 8) ;
- set_desc_trans_id(desc, SB_JOURNAL(p_s_sb)->j_trans_id) ;
+ set_desc_trans_id(desc, journal->j_trans_id) ;
/* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
- ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
+ ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
memset(c_bh->b_data, 0, c_bh->b_size) ;
- set_commit_trans_id(commit, SB_JOURNAL(p_s_sb)->j_trans_id) ;
+ set_commit_trans_id(commit, journal->j_trans_id) ;
set_buffer_uptodate(c_bh) ;
/* init this journal list */
- jl = SB_JOURNAL(p_s_sb)->j_current_jl;
+ jl = journal->j_current_jl;
/* we lock the commit before doing anything because
* we want to make sure nobody tries to run flush_commit_list until
commit_trans_id = jl->j_trans_id;
atomic_set(&jl->j_older_commits_done, 0) ;
- jl->j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id ;
- jl->j_timestamp = SB_JOURNAL(p_s_sb)->j_trans_start_time ;
+ jl->j_trans_id = journal->j_trans_id ;
+ jl->j_timestamp = journal->j_trans_start_time ;
jl->j_commit_bh = c_bh ;
- jl->j_start = SB_JOURNAL(p_s_sb)->j_start ;
- jl->j_len = SB_JOURNAL(p_s_sb)->j_len ;
- atomic_set(&jl->j_nonzerolen, SB_JOURNAL(p_s_sb)->j_len) ;
- atomic_set(&jl->j_commit_left, SB_JOURNAL(p_s_sb)->j_len + 2);
+ jl->j_start = journal->j_start ;
+ jl->j_len = journal->j_len ;
+ atomic_set(&jl->j_nonzerolen, journal->j_len) ;
+ atomic_set(&jl->j_commit_left, journal->j_len + 2);
jl->j_realblock = NULL ;
/* The ENTIRE FOR LOOP MUST not cause schedule to occur.
** copy into real block index array in the commit or desc block
*/
trans_half = journal_trans_half(p_s_sb->s_blocksize);
- for (i = 0, cn = SB_JOURNAL(p_s_sb)->j_first ; cn ; cn = cn->next, i++) {
- if (test_bit(BH_JDirty, &cn->bh->b_state) ) {
+ for (i = 0, cn = journal->j_first ; cn ; cn = cn->next, i++) {
+ if (buffer_journaled (cn->bh)) {
jl_cn = get_cnode(p_s_sb) ;
if (!jl_cn) {
reiserfs_panic(p_s_sb, "journal-1676, get_cnode returned NULL\n") ;
jl_cn->sb = p_s_sb;
jl_cn->bh = cn->bh ;
jl_cn->jlist = jl;
- insert_journal_hash(SB_JOURNAL(p_s_sb)->j_list_hash_table, jl_cn) ;
+ insert_journal_hash(journal->j_list_hash_table, jl_cn) ;
if (i < trans_half) {
desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr) ;
} else {
i-- ;
}
}
- set_desc_trans_len(desc, SB_JOURNAL(p_s_sb)->j_len) ;
- set_desc_mount_id(desc, SB_JOURNAL(p_s_sb)->j_mount_id) ;
- set_desc_trans_id(desc, SB_JOURNAL(p_s_sb)->j_trans_id) ;
- set_commit_trans_len(commit, SB_JOURNAL(p_s_sb)->j_len);
+ set_desc_trans_len(desc, journal->j_len) ;
+ set_desc_mount_id(desc, journal->j_mount_id) ;
+ set_desc_trans_id(desc, journal->j_trans_id) ;
+ set_commit_trans_len(commit, journal->j_len);
/* special check in case all buffers in the journal were marked for not logging */
- if (SB_JOURNAL(p_s_sb)->j_len == 0) {
+ if (journal->j_len == 0) {
BUG();
}
mark_buffer_dirty(d_bh);
/* first data block is j_start + 1, so add one to cur_write_start wherever you use it */
- cur_write_start = SB_JOURNAL(p_s_sb)->j_start ;
- cn = SB_JOURNAL(p_s_sb)->j_first ;
+ cur_write_start = journal->j_start ;
+ cn = journal->j_first ;
jindex = 1 ; /* start at one so we don't get the desc again */
while(cn) {
- clear_bit(BH_JNew, &(cn->bh->b_state)) ;
+ clear_buffer_journal_new (cn->bh);
/* copy all the real blocks into log area. dirty log blocks */
- if (test_bit(BH_JDirty, &cn->bh->b_state)) {
+ if (buffer_journaled (cn->bh)) {
struct buffer_head *tmp_bh ;
char *addr;
struct page *page;
kunmap(page);
mark_buffer_dirty(tmp_bh);
jindex++ ;
- set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ;
- clear_bit(BH_JDirty, &(cn->bh->b_state)) ;
+ set_buffer_journal_dirty (cn->bh);
+ clear_buffer_journaled (cn->bh);
} else {
/* JDirty cleared sometime during transaction. don't log this one */
reiserfs_warning(p_s_sb, "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!") ;
** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
*/
- SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
+ journal->j_current_jl = alloc_journal_list(p_s_sb);
/* now it is safe to insert this transaction on the main list */
- list_add_tail(&jl->j_list, &SB_JOURNAL(p_s_sb)->j_journal_list);
- list_add_tail(&jl->j_working_list, &SB_JOURNAL(p_s_sb)->j_working_list);
- SB_JOURNAL(p_s_sb)->j_num_work_lists++;
+ list_add_tail(&jl->j_list, &journal->j_journal_list);
+ list_add_tail(&jl->j_working_list, &journal->j_working_list);
+ journal->j_num_work_lists++;
/* reset journal values for the next transaction */
- old_start = SB_JOURNAL(p_s_sb)->j_start ;
- SB_JOURNAL(p_s_sb)->j_start = (SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL(p_s_sb)->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_wcount), 0) ;
- SB_JOURNAL(p_s_sb)->j_bcount = 0 ;
- SB_JOURNAL(p_s_sb)->j_last = NULL ;
- SB_JOURNAL(p_s_sb)->j_first = NULL ;
- SB_JOURNAL(p_s_sb)->j_len = 0 ;
- SB_JOURNAL(p_s_sb)->j_trans_start_time = 0 ;
- SB_JOURNAL(p_s_sb)->j_trans_id++ ;
- SB_JOURNAL(p_s_sb)->j_current_jl->j_trans_id = SB_JOURNAL(p_s_sb)->j_trans_id;
- SB_JOURNAL(p_s_sb)->j_must_wait = 0 ;
- SB_JOURNAL(p_s_sb)->j_len_alloc = 0 ;
- SB_JOURNAL(p_s_sb)->j_next_full_flush = 0 ;
- SB_JOURNAL(p_s_sb)->j_next_async_flush = 0 ;
+ old_start = journal->j_start ;
+ journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
+ atomic_set(&(journal->j_wcount), 0) ;
+ journal->j_bcount = 0 ;
+ journal->j_last = NULL ;
+ journal->j_first = NULL ;
+ journal->j_len = 0 ;
+ journal->j_trans_start_time = 0 ;
+ journal->j_trans_id++ ;
+ journal->j_current_jl->j_trans_id = journal->j_trans_id;
+ journal->j_must_wait = 0 ;
+ journal->j_len_alloc = 0 ;
+ journal->j_next_full_flush = 0 ;
+ journal->j_next_async_flush = 0 ;
init_journal_hash(p_s_sb) ;
// make sure reiserfs_add_jh sees the new current_jl before we
*/
if (!list_empty(&jl->j_tail_bh_list)) {
unlock_kernel();
- write_ordered_buffers(&SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock,
- SB_JOURNAL(p_s_sb), jl, &jl->j_tail_bh_list);
+ write_ordered_buffers(&journal->j_dirty_buffers_lock,
+ journal, jl, &jl->j_tail_bh_list);
lock_kernel();
}
if (!list_empty(&jl->j_tail_bh_list))
flush_commit_list(p_s_sb, jl, 1) ;
flush_journal_list(p_s_sb, jl, 1) ;
} else if (!(jl->j_state & LIST_COMMIT_PENDING))
- queue_delayed_work(commit_wq, &SB_JOURNAL(p_s_sb)->j_work, HZ/10);
+ queue_delayed_work(commit_wq, &journal->j_work, HZ/10);
/* if the next transaction has any chance of wrapping, flush
** old flush them as well.
*/
first_jl:
- list_for_each_safe(entry, safe, &SB_JOURNAL(p_s_sb)->j_journal_list) {
+ list_for_each_safe(entry, safe, &journal->j_journal_list) {
temp_jl = JOURNAL_LIST_ENTRY(entry);
- if (SB_JOURNAL(p_s_sb)->j_start <= temp_jl->j_start) {
- if ((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >=
+ if (journal->j_start <= temp_jl->j_start) {
+ if ((journal->j_start + journal->j_trans_max + 1) >=
temp_jl->j_start)
{
flush_used_journal_lists(p_s_sb, temp_jl);
goto first_jl;
- } else if ((SB_JOURNAL(p_s_sb)->j_start +
- SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) <
+ } else if ((journal->j_start +
+ journal->j_trans_max + 1) <
SB_ONDISK_JOURNAL_SIZE(p_s_sb))
{
/* if we don't cross into the next transaction and we don't
*/
break;
}
- } else if ((SB_JOURNAL(p_s_sb)->j_start +
- SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) >
+ } else if ((journal->j_start +
+ journal->j_trans_max + 1) >
SB_ONDISK_JOURNAL_SIZE(p_s_sb))
{
- if (((SB_JOURNAL(p_s_sb)->j_start + SB_JOURNAL_TRANS_MAX(p_s_sb) + 1) %
+ if (((journal->j_start + journal->j_trans_max + 1) %
SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >= temp_jl->j_start)
{
flush_used_journal_lists(p_s_sb, temp_jl);
}
flush_old_journal_lists(p_s_sb);
- SB_JOURNAL(p_s_sb)->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, SB_JOURNAL(p_s_sb)->j_current_jl) ;
+ journal->j_current_jl->j_list_bitmap = get_list_bitmap(p_s_sb, journal->j_current_jl) ;
- if (!(SB_JOURNAL(p_s_sb)->j_current_jl->j_list_bitmap)) {
+ if (!(journal->j_current_jl->j_list_bitmap)) {
reiserfs_panic(p_s_sb, "journal-1996: do_journal_end, could not get a list bitmap\n") ;
}
- atomic_set(&(SB_JOURNAL(p_s_sb)->j_jlock), 0) ;
+ atomic_set(&(journal->j_jlock), 0) ;
unlock_journal(p_s_sb) ;
/* wake up any body waiting to join. */
- clear_bit(WRITERS_QUEUED, &SB_JOURNAL(p_s_sb)->j_state);
- wake_up(&(SB_JOURNAL(p_s_sb)->j_join_wait)) ;
+ clear_bit(J_WRITERS_QUEUED, &journal->j_state);
+ wake_up(&(journal->j_join_wait)) ;
if (!flush && wait_on_commit &&
journal_list_still_alive(p_s_sb, commit_trans_id)) {
}
out:
reiserfs_check_lock_depth(p_s_sb, "journal end2");
- th->t_trans_id = 0;
- return 0 ;
+
+ memset (th, 0, sizeof (*th));
+ /* Re-set th->t_super, so we can properly keep track of how many
+ * persistent transactions there are. We need to do this so if this
+ * call is part of a failed restart_transaction, we can free it later */
+ th->t_super = p_s_sb;
+
+ return journal->j_errno;
+}
+
+static void
+__reiserfs_journal_abort_hard (struct super_block *sb)
+{
+ struct reiserfs_journal *journal = SB_JOURNAL (sb);
+ if (test_bit (J_ABORTED, &journal->j_state))
+ return;
+
+ printk (KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
+ reiserfs_bdevname (sb));
+
+ sb->s_flags |= MS_RDONLY;
+ set_bit (J_ABORTED, &journal->j_state);
+
+#ifdef CONFIG_REISERFS_CHECK
+ dump_stack();
+#endif
+}
+
+static void
+__reiserfs_journal_abort_soft (struct super_block *sb, int errno)
+{
+ struct reiserfs_journal *journal = SB_JOURNAL (sb);
+ if (test_bit (J_ABORTED, &journal->j_state))
+ return;
+
+ if (!journal->j_errno)
+ journal->j_errno = errno;
+
+ __reiserfs_journal_abort_hard (sb);
+}
+
+void
+reiserfs_journal_abort (struct super_block *sb, int errno)
+{
+ return __reiserfs_journal_abort_soft (sb, errno);
}