/* local debug functions */
#if defined(DEBUG) && !defined(XLOG_NOLOG)
STATIC void xlog_verify_dest_ptr(xlog_t *log, __psint_t ptr);
-STATIC void xlog_verify_disk_cycle_no(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_verify_grant_head(xlog_t *log, int equals);
STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
int count, boolean_t syncing);
xfs_lsn_t tail_lsn);
#else
#define xlog_verify_dest_ptr(a,b)
-#define xlog_verify_disk_cycle_no(a,b)
#define xlog_verify_grant_head(a,b)
#define xlog_verify_iclog(a,b,c,d)
#define xlog_verify_tail_lsn(a,b,c)
if (!xlog_debug && xlog_target == log->l_targ)
return 0;
#endif
- cb->cb_next = 0;
+ cb->cb_next = NULL;
spl = LOG_LOCK(log);
abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
if (!abortflg) {
ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
(iclog->ic_state == XLOG_STATE_WANT_SYNC));
- cb->cb_next = 0;
+ cb->cb_next = NULL;
*(iclog->ic_callback_tail) = cb;
iclog->ic_callback_tail = &(cb->cb_next);
}
xlog_in_core_t *first_iclog;
#endif
xfs_log_iovec_t reg[1];
- xfs_log_ticket_t tic = 0;
+ xfs_log_ticket_t tic = NULL;
xfs_lsn_t lsn;
int error;
SPLDECL(s);
xlog_t *log = mp->m_log;
vfs_t *vfsp = XFS_MTOVFS(mp);
- if (vfsp->vfs_super->s_frozen || XFS_FORCED_SHUTDOWN(mp) ||
+ if (fs_frozen(vfsp) || XFS_FORCED_SHUTDOWN(mp) ||
(vfsp->vfs_flag & VFS_RDONLY))
return 0;
} else {
/*
* The reservation head is behind the tail.
- * This can only happen when the AIL is empty so the tail
- * is equal to the head and the l_roundoff value in the
- * log structure is taking up the difference between the
- * reservation head and the tail. The bytes accounted for
- * by the l_roundoff field are temporarily 'lost' to the
- * reservation mechanism, but they are cleaned up when the
- * log buffers that created them are reused. These lost
- * bytes are what allow the reservation head to fall behind
- * the tail in the case that the log is 'empty'.
* In this case we just want to return the size of the
* log as the amount of space left.
*/
- ASSERT((tail_cycle == (cycle + 1)) ||
- ((bytes + log->l_roundoff) >= tail_bytes));
+ xfs_fs_cmn_err(CE_ALERT, log->l_mp,
+ "xlog_space_left: head behind tail\n"
+ " tail_cycle = %d, tail_bytes = %d\n"
+ " GH cycle = %d, GH bytes = %d",
+ tail_cycle, tail_bytes, cycle, bytes);
+ ASSERT(0);
free_bytes = log->l_logsize;
}
return free_bytes;
int error;
xfs_log_iovec_t reg[1];
- reg[0].i_addr = 0;
+ reg[0].i_addr = NULL;
reg[0].i_len = 0;
ASSERT_ALWAYS(iclog);
/*
- * Flush out the in-core log (iclog) to the on-disk log in a synchronous or
- * asynchronous fashion. Previously, we should have moved the current iclog
+ * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
+ * fashion. Previously, we should have moved the current iclog
* ptr in the log to point to the next available iclog. This allows further
* write to continue while this code syncs out an iclog ready to go.
* Before an in-core log can be written out, the data section must be scanned
int i, ops;
uint count; /* byte count of bwrite */
uint count_init; /* initial count before roundup */
+ int roundoff; /* roundoff to BB or stripe */
int split = 0; /* split write into two regions */
int error;
+ SPLDECL(s);
+ int v2 = XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb);
XFS_STATS_INC(xs_log_writes);
ASSERT(iclog->ic_refcnt == 0);
count_init = log->l_iclog_hsize + iclog->ic_offset;
/* Round out the log write size */
- if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
- log->l_mp->m_sb.sb_logsunit > 1) {
+ if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
/* we have a v2 stripe unit to use */
count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
} else {
count = BBTOB(BTOBB(count_init));
}
- iclog->ic_roundoff = count - count_init;
- log->l_roundoff += iclog->ic_roundoff;
+ roundoff = count - count_init;
+ ASSERT(roundoff >= 0);
+ ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
+ roundoff < log->l_mp->m_sb.sb_logsunit)
+ ||
+ (log->l_mp->m_sb.sb_logsunit <= 1 &&
+ roundoff < BBTOB(1)));
+
+ /* move grant heads by roundoff in sync */
+ s = GRANT_LOCK(log);
+ XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
+ XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
+ GRANT_UNLOCK(log, s);
- xlog_pack_data(log, iclog); /* put cycle number in every block */
+ /* put cycle number in every block */
+ xlog_pack_data(log, iclog, roundoff);
/* real byte length */
- INT_SET(iclog->ic_header.h_len,
- ARCH_CONVERT,
- iclog->ic_offset + iclog->ic_roundoff);
+ if (v2) {
+ INT_SET(iclog->ic_header.h_len,
+ ARCH_CONVERT,
+ iclog->ic_offset + roundoff);
+ } else {
+ INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset);
+ }
/* put ops count in correct order */
ops = iclog->ic_header.h_num_logops;
if (iclog->ic_state == XLOG_STATE_DIRTY) {
iclog->ic_state = XLOG_STATE_ACTIVE;
iclog->ic_offset = 0;
- iclog->ic_callback = 0; /* don't need to free */
+ iclog->ic_callback = NULL; /* don't need to free */
/*
* If the number of ops in this iclog indicate it just
* contains the dummy transaction, we can
while (cb != 0) {
iclog->ic_callback_tail = &(iclog->ic_callback);
- iclog->ic_callback = 0;
+ iclog->ic_callback = NULL;
LOG_UNLOCK(log, s);
/* perform callbacks in the order given */
INT_SET(head->h_cycle, ARCH_CONVERT, log->l_curr_cycle);
ASSIGN_LSN(head->h_lsn, log, ARCH_CONVERT);
ASSERT(log->l_curr_block >= 0);
-
- /* round off error from last write with this iclog */
- ticket->t_curr_res -= iclog->ic_roundoff;
- log->l_roundoff -= iclog->ic_roundoff;
- iclog->ic_roundoff = 0;
}
/* If there is enough room to write everything, then do it. Otherwise,
* has already taken care of the roundoff from
* the previous sync.
*/
- ASSERT(iclog->ic_roundoff == 0);
iclog->ic_refcnt++;
lsn = INT_GET(iclog->ic_header.h_lsn, ARCH_CONVERT);
xlog_state_switch_iclogs(log, iclog, 0);
log->l_ticket_cnt++;
log->l_ticket_tcnt++;
}
- t_list->t_next = 0;
+ t_list->t_next = NULL;
log->l_tail = t_list;
LOG_UNLOCK(log, s);
} /* xlog_state_ticket_alloc */
/* no need to clear fields */
#else
/* When we debug, it is easier if tickets are cycled */
- ticket->t_next = 0;
+ ticket->t_next = NULL;
if (log->l_tail != 0) {
log->l_tail->t_next = ticket;
} else {
xlog_panic("xlog_verify_dest_ptr: invalid ptr");
} /* xlog_verify_dest_ptr */
-
-#ifdef DEBUG
-/* check split LR write */
-STATIC void
-xlog_verify_disk_cycle_no(xlog_t *log,
- xlog_in_core_t *iclog)
-{
- xfs_buf_t *bp;
- uint cycle_no;
- xfs_caddr_t ptr;
- xfs_daddr_t i;
-
- if (BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT) < 10) {
- cycle_no = CYCLE_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT);
- bp = xlog_get_bp(log, 1);
- ASSERT(bp);
- for (i = 0; i < BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT); i++) {
- xlog_bread(log, i, 1, bp);
- ptr = xlog_align(log, i, 1, bp);
- if (GET_CYCLE(ptr, ARCH_CONVERT) != cycle_no)
- xlog_warn("XFS: xlog_verify_disk_cycle_no: bad cycle no");
- }
- xlog_put_bp(bp);
- }
-} /* xlog_verify_disk_cycle_no */
-#endif
-
STATIC void
xlog_verify_grant_head(xlog_t *log, int equals)
{