* multiple hb threads are watching multiple regions. A node is live
* whenever any of the threads sees activity from the node in its region.
*/
-static DEFINE_SPINLOCK(o2hb_live_lock);
+static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
struct o2hb_bio_wait_ctxt {
atomic_t wc_num_reqs;
struct completion wc_io_complete;
- int wc_error;
};
static void o2hb_write_timeout(void *arg)
{
atomic_set(&wc->wc_num_reqs, num_ios);
init_completion(&wc->wc_io_complete);
- wc->wc_error = 0;
}
/* Used in error paths too */
{
struct o2hb_bio_wait_ctxt *wc = bio->bi_private;
- if (error) {
+ if (error)
mlog(ML_ERROR, "IO Error %d\n", error);
- wc->wc_error = error;
- }
if (bio->bi_size)
return 1;
bail_and_wait:
o2hb_wait_on_io(reg, &wc);
- if (wc.wc_error && !status)
- status = wc.wc_error;
if (bios) {
for(i = 0; i < num_bios; i++)
static void o2hb_dump_slot(struct o2hb_disk_heartbeat_block *hb_block)
{
- mlog(ML_ERROR, "Dump slot information: seq = 0x%llx, node = %u, "
- "cksum = 0x%x, generation 0x%llx\n",
- (long long)le64_to_cpu(hb_block->hb_seq),
- hb_block->hb_node, le32_to_cpu(hb_block->hb_cksum),
- (long long)le64_to_cpu(hb_block->hb_generation));
+ mlog(ML_ERROR, "Dump slot information: seq = 0x%"MLFx64", node = %u, "
+ "cksum = 0x%x, generation 0x%"MLFx64"\n",
+ le64_to_cpu(hb_block->hb_seq), hb_block->hb_node,
+ le32_to_cpu(hb_block->hb_cksum),
+ le64_to_cpu(hb_block->hb_generation));
}
static int o2hb_verify_crc(struct o2hb_region *reg,
hb_block->hb_seq = cpu_to_le64(cputime);
hb_block->hb_node = node_num;
hb_block->hb_generation = cpu_to_le64(generation);
- hb_block->hb_dead_ms = cpu_to_le32(o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS);
/* This step must always happen last! */
hb_block->hb_cksum = cpu_to_le32(o2hb_compute_block_crc_le(reg,
hb_block));
- mlog(ML_HB_BIO, "our node generation = 0x%llx, cksum = 0x%x\n",
- (long long)cpu_to_le64(generation),
- le32_to_cpu(hb_block->hb_cksum));
+ mlog(ML_HB_BIO, "our node generation = 0x%"MLFx64", cksum = 0x%x\n",
+ cpu_to_le64(generation), le32_to_cpu(hb_block->hb_cksum));
}
static void o2hb_fire_callbacks(struct o2hb_callback *hbcall,
struct o2nm_node *node;
struct o2hb_disk_heartbeat_block *hb_block = reg->hr_tmp_block;
u64 cputime;
- unsigned int dead_ms = o2hb_dead_threshold * O2HB_REGION_TIMEOUT_MS;
- unsigned int slot_dead_ms;
memcpy(hb_block, slot->ds_raw_block, reg->hr_block_bytes);
if (slot->ds_last_generation != le64_to_cpu(hb_block->hb_generation)) {
gen_changed = 1;
slot->ds_equal_samples = 0;
- mlog(ML_HEARTBEAT, "Node %d changed generation (0x%llx "
- "to 0x%llx)\n", slot->ds_node_num,
- (long long)slot->ds_last_generation,
- (long long)le64_to_cpu(hb_block->hb_generation));
+ mlog(ML_HEARTBEAT, "Node %d changed generation (0x%"MLFx64" "
+ "to 0x%"MLFx64")\n", slot->ds_node_num,
+ slot->ds_last_generation,
+ le64_to_cpu(hb_block->hb_generation));
}
slot->ds_last_generation = le64_to_cpu(hb_block->hb_generation);
- mlog(ML_HEARTBEAT, "Slot %d gen 0x%llx cksum 0x%x "
- "seq %llu last %llu changed %u equal %u\n",
- slot->ds_node_num, (long long)slot->ds_last_generation,
- le32_to_cpu(hb_block->hb_cksum),
- (unsigned long long)le64_to_cpu(hb_block->hb_seq),
- (unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
+ mlog(ML_HEARTBEAT, "Slot %d gen 0x%"MLFx64" cksum 0x%x "
+ "seq %"MLFu64" last %"MLFu64" changed %u equal %u\n",
+ slot->ds_node_num, slot->ds_last_generation,
+ le32_to_cpu(hb_block->hb_cksum), le64_to_cpu(hb_block->hb_seq),
+ slot->ds_last_time, slot->ds_changed_samples,
slot->ds_equal_samples);
spin_lock(&o2hb_live_lock);
* changes at any time during their dead time */
if (list_empty(&slot->ds_live_item) &&
slot->ds_changed_samples >= O2HB_LIVE_THRESHOLD) {
- mlog(ML_HEARTBEAT, "Node %d (id 0x%llx) joined my region\n",
- slot->ds_node_num, (long long)slot->ds_last_generation);
+ mlog(ML_HEARTBEAT, "Node %d (id 0x%"MLFx64") joined my "
+ "region\n", slot->ds_node_num, slot->ds_last_generation);
/* first on the list generates a callback */
if (list_empty(&o2hb_live_slots[slot->ds_node_num])) {
&o2hb_live_slots[slot->ds_node_num]);
slot->ds_equal_samples = 0;
-
- /* We want to be sure that all nodes agree on the
- * number of milliseconds before a node will be
- * considered dead. The self-fencing timeout is
- * computed from this value, and a discrepancy might
- * result in heartbeat calling a node dead when it
- * hasn't self-fenced yet. */
- slot_dead_ms = le32_to_cpu(hb_block->hb_dead_ms);
- if (slot_dead_ms && slot_dead_ms != dead_ms) {
- /* TODO: Perhaps we can fail the region here. */
- mlog(ML_ERROR, "Node %d on device %s has a dead count "
- "of %u ms, but our count is %u ms.\n"
- "Please double check your configuration values "
- "for 'O2CB_HEARTBEAT_THRESHOLD'\n",
- slot->ds_node_num, reg->hr_dev_name, slot_dead_ms,
- dead_ms);
- }
goto out;
}
return highest;
}
-static int o2hb_do_disk_heartbeat(struct o2hb_region *reg)
+static void o2hb_do_disk_heartbeat(struct o2hb_region *reg)
{
int i, ret, highest_node, change = 0;
unsigned long configured_nodes[BITS_TO_LONGS(O2NM_MAX_NODES)];
struct bio *write_bio;
struct o2hb_bio_wait_ctxt write_wc;
- ret = o2nm_configured_node_map(configured_nodes,
- sizeof(configured_nodes));
- if (ret) {
- mlog_errno(ret);
- return ret;
- }
+ if (o2nm_configured_node_map(configured_nodes, sizeof(configured_nodes)))
+ return;
highest_node = o2hb_highest_node(configured_nodes, O2NM_MAX_NODES);
if (highest_node >= O2NM_MAX_NODES) {
mlog(ML_NOTICE, "ocfs2_heartbeat: no configured nodes found!\n");
- return -EINVAL;
+ return;
}
/* No sense in reading the slots of nodes that don't exist
ret = o2hb_read_slots(reg, highest_node + 1);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ return;
}
/* With an up to date view of the slots, we can check that no
ret = o2hb_issue_node_write(reg, &write_bio, &write_wc);
if (ret < 0) {
mlog_errno(ret);
- return ret;
+ return;
}
i = -1;
*/
o2hb_wait_on_io(reg, &write_wc);
bio_put(write_bio);
- if (write_wc.wc_error) {
- /* Do not re-arm the write timeout on I/O error - we
- * can't be sure that the new block ever made it to
- * disk */
- mlog(ML_ERROR, "Write error %d on device \"%s\"\n",
- write_wc.wc_error, reg->hr_dev_name);
- return write_wc.wc_error;
- }
-
o2hb_arm_write_timeout(reg);
/* let the person who launched us know when things are steady */
if (atomic_dec_and_test(®->hr_steady_iterations))
wake_up(&o2hb_steady_queue);
}
-
- return 0;
}
/* Subtract b from a, storing the result in a. a *must* have a larger
* likely to time itself out. */
do_gettimeofday(&before_hb);
- i = 0;
- do {
- ret = o2hb_do_disk_heartbeat(reg);
- } while (ret && ++i < 2);
+ o2hb_do_disk_heartbeat(reg);
do_gettimeofday(&after_hb);
elapsed_msec = o2hb_elapsed_msecs(&before_hb, &after_hb);