return 0 ;
}
+static void disable_barrier(struct super_block *s)
+{
+ REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_BARRIER_FLUSH);
+ printk("reiserfs: disabling flush barriers on %s\n", reiserfs_bdevname(s));
+}
+
static struct reiserfs_bitmap_node *
allocate_bitmap_node(struct super_block *p_s_sb) {
struct reiserfs_bitmap_node *bn ;
submit_bh(WRITE, bh) ;
}
+static int submit_barrier_buffer(struct buffer_head *bh) {
+ get_bh(bh) ;
+ bh->b_end_io = reiserfs_end_ordered_io;
+ clear_buffer_dirty(bh) ;
+ if (!buffer_uptodate(bh))
+ BUG();
+ return submit_bh(WRITE_BARRIER, bh) ;
+}
+
+static void check_barrier_completion(struct super_block *s,
+ struct buffer_head *bh) {
+ if (buffer_eopnotsupp(bh)) {
+ clear_buffer_eopnotsupp(bh);
+ disable_barrier(s);
+ set_buffer_uptodate(bh);
+ set_buffer_dirty(bh);
+ sync_dirty_buffer(bh);
+ }
+}
+
#define CHUNK_SIZE 32
struct buffer_chunk {
struct buffer_head *bh[CHUNK_SIZE];
int bn ;
struct buffer_head *tbh = NULL ;
unsigned long trans_id = jl->j_trans_id;
+ int barrier = 0;
reiserfs_check_lock_depth(s, "flush_commit_list") ;
}
atomic_dec(&SB_JOURNAL(s)->j_async_throttle);
- /* wait on everything written so far before writing the commit */
+ /* wait on everything written so far before writing the commit
+ * if we are in barrier mode, send the commit down now
+ */
+ barrier = reiserfs_barrier_flush(s);
+ if (barrier) {
+ int ret;
+ lock_buffer(jl->j_commit_bh);
+ ret = submit_barrier_buffer(jl->j_commit_bh);
+ if (ret == -EOPNOTSUPP) {
+ set_buffer_uptodate(jl->j_commit_bh);
+ disable_barrier(s);
+ barrier = 0;
+ }
+ }
for (i = 0 ; i < (jl->j_len + 1) ; i++) {
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s) ;
if (atomic_read(&(jl->j_commit_left)) != 1)
BUG();
- if (buffer_dirty(jl->j_commit_bh))
- BUG();
- mark_buffer_dirty(jl->j_commit_bh) ;
- sync_dirty_buffer(jl->j_commit_bh) ;
+ if (!barrier) {
+ if (buffer_dirty(jl->j_commit_bh))
+ BUG();
+ mark_buffer_dirty(jl->j_commit_bh) ;
+ sync_dirty_buffer(jl->j_commit_bh) ;
+ } else
+ wait_on_buffer(jl->j_commit_bh);
+
+ check_barrier_completion(s, jl->j_commit_bh);
if (!buffer_uptodate(jl->j_commit_bh)) {
reiserfs_panic(s, "journal-615: buffer write failed\n") ;
}
up(&jl->j_commit_lock);
put_jl:
put_journal_list(s, jl);
-
return 0 ;
}
jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
- set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
- sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+
+ if (reiserfs_barrier_flush(p_s_sb)) {
+ int ret;
+ lock_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
+ ret = submit_barrier_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
+ if (ret == -EOPNOTSUPP) {
+ set_buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh);
+ disable_barrier(p_s_sb);
+ goto sync;
+ }
+ wait_on_buffer(SB_JOURNAL(p_s_sb)->j_header_bh);
+ check_barrier_completion(p_s_sb, SB_JOURNAL(p_s_sb)->j_header_bh);
+ } else {
+sync:
+ set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
+ }
if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
reiserfs_warning (p_s_sb, "journal-837: IO error during journal replay");
return -EIO ;
unsigned long cur_len;
int ret;
int i;
+ int limit = 256;
struct reiserfs_journal_list *tjl;
struct reiserfs_journal_list *flush_jl;
unsigned long trans_id;
flush_jl = tjl = jl;
- /* flush for 256 transactions or 256 blocks, whichever comes first */
- for(i = 0 ; i < 256 && len < 256 ; i++) {
+ /* in data logging mode, try harder to flush a lot of blocks */
+ if (reiserfs_data_log(s))
+ limit = 1024;
+ /* flush for 256 transactions or limit blocks, whichever comes first */
+ for(i = 0 ; i < 256 && len < limit ; i++) {
if (atomic_read(&tjl->j_commit_left) ||
tjl->j_trans_id < jl->j_trans_id) {
break;
REISERFS_I(inode)->i_trans_id = SB_JOURNAL(inode->i_sb)->j_trans_id ;
}
-static void __commit_trans_jl(struct inode *inode, unsigned long id,
+/*
+ * returns -1 on error, 0 if no commits/barriers were done and 1
+ * if a transaction was actually committed and the barrier was done
+ */
+static int __commit_trans_jl(struct inode *inode, unsigned long id,
struct reiserfs_journal_list *jl)
{
struct reiserfs_transaction_handle th ;
struct super_block *sb = inode->i_sb ;
+ int ret = 0;
/* is it from the current transaction, or from an unknown transaction? */
if (id == SB_JOURNAL(sb)->j_trans_id) {
}
journal_end_sync(&th, sb, 1) ;
+ ret = 1;
} else {
/* this gets tricky, we have to make sure the journal list in
*/
flush_commit_only:
if (journal_list_still_alive(inode->i_sb, id)) {
+ /*
+ * we only set ret to 1 when we know for sure
+ * the barrier hasn't been started yet on the commit
+ * block.
+ */
+ if (atomic_read(&jl->j_commit_left) > 1)
+ ret = 1;
flush_commit_list(sb, jl, 1) ;
}
}
/* otherwise the list is gone, and long since committed */
+ return ret;
}
-void reiserfs_commit_for_inode(struct inode *inode) {
+int reiserfs_commit_for_inode(struct inode *inode) {
unsigned long id = REISERFS_I(inode)->i_trans_id;
struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl;
/* jl will be updated in __commit_trans_jl */
}
- __commit_trans_jl(inode, id, jl);
+ return __commit_trans_jl(inode, id, jl);
}
void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
/* copy all the real blocks into log area. dirty log blocks */
if (test_bit(BH_JDirty, &cn->bh->b_state)) {
struct buffer_head *tmp_bh ;
+ char *addr;
+ struct page *page;
tmp_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
set_buffer_uptodate(tmp_bh);
- memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
+ page = cn->bh->b_page;
+ addr = kmap(page);
+ memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data),
+ cn->bh->b_size);
+ kunmap(page);
mark_buffer_dirty(tmp_bh);
jindex++ ;
set_bit(BH_JDirty_wait, &(cn->bh->b_state)) ;