#define CCM_OPCODE 1 /* CFM message opcode meaning CCM. */
#define CCM_RDI_MASK 0x80
#define CFM_HEALTH_INTERVAL 6
+
+OVS_PACKED(
struct ccm {
uint8_t mdlevel_version; /* MD Level and Version */
uint8_t opcode;
/* TLV space. */
uint8_t end_tlv;
-} __attribute__((packed));
+});
BUILD_ASSERT_DECL(CCM_LEN == sizeof(struct ccm));
struct cfm {
int health_interval; /* Number of fault_intervals since health was
recomputed. */
long long int last_tx; /* Last CCM transmission time. */
+
+ int ref_cnt;
};
/* Remote MPs represent foreign network entities that are configured to have
cfm->fault_override = -1;
cfm->health = -1;
cfm->last_tx = 0;
+ cfm->ref_cnt = 1;
return cfm;
}
void
-cfm_destroy(struct cfm *cfm)
+cfm_unref(struct cfm *cfm)
{
struct remote_mp *rmp, *rmp_next;
return;
}
+ ovs_assert(cfm->ref_cnt);
+ if (--cfm->ref_cnt) {
+ return;
+ }
+
HMAP_FOR_EACH_SAFE (rmp, rmp_next, node, &cfm->remote_mps) {
hmap_remove(&cfm->remote_mps, &rmp->node);
free(rmp);
free(cfm);
}
+struct cfm *
+cfm_ref(const struct cfm *cfm_)
+{
+ struct cfm *cfm = CONST_CAST(struct cfm *, cfm_);
+ if (cfm) {
+ ovs_assert(cfm->ref_cnt > 0);
+ cfm->ref_cnt++;
+ }
+ return cfm;
+}
+
/* Should be run periodically to update fault statistics messages. */
void
cfm_run(struct cfm *cfm)
}
}
-/* Returns true if 'cfm' should process packets from 'flow'. */
+/* Returns true if 'cfm' should process packets from 'flow'. Sets
+ * fields in 'wc' that were used to make the determination. */
bool
-cfm_should_process_flow(const struct cfm *cfm, const struct flow *flow)
+cfm_should_process_flow(const struct cfm *cfm, const struct flow *flow,
+ struct flow_wildcards *wc)
{
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
+ if (cfm->check_tnl_key) {
+ memset(&wc->masks.tunnel.tun_id, 0xff, sizeof wc->masks.tunnel.tun_id);
+ }
return (ntohs(flow->dl_type) == ETH_TYPE_CFM
&& eth_addr_equals(flow->dl_dst, cfm_ccm_addr(cfm))
&& (!cfm->check_tnl_key || flow->tunnel.tun_id == htonll(0)));