X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif-xlate.c;h=e2ac9adbc0e1468782ca30da7c964a9fbc007b8a;hb=dc723c447a797e555d400594133a35b9841eb1de;hp=7b6e9f7937d2c96aefcb959e9716994a84c91d3f;hpb=54ecb5a2331ca95264879252e413becaba130573;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index 7b6e9f793..e2ac9adbc 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -222,6 +222,72 @@ struct skb_priority_to_dscp { uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */ }; +enum xc_type { + XC_RULE, + XC_BOND, + XC_NETDEV, + XC_NETFLOW, + XC_MIRROR, + XC_LEARN, + XC_NORMAL, + XC_FIN_TIMEOUT, +}; + +/* xlate_cache entries hold enough information to perform the side effects of + * xlate_actions() for a rule, without needing to perform rule translation + * from scratch. The primary usage of these is to submit statistics to objects + * that a flow relates to, although they may be used for other effects as well + * (for instance, refreshing hard timeouts for learned flows). */ +struct xc_entry { + enum xc_type type; + union { + struct rule_dpif *rule; + struct { + struct netdev *tx; + struct netdev *rx; + struct bfd *bfd; + } dev; + struct { + struct netflow *netflow; + struct flow *flow; + ofp_port_t iface; + } nf; + struct { + struct mbridge *mbridge; + mirror_mask_t mirrors; + } mirror; + struct { + struct bond *bond; + struct flow *flow; + uint16_t vid; + } bond; + struct { + struct ofproto_dpif *ofproto; + struct rule_dpif *rule; + } learn; + struct { + struct ofproto_dpif *ofproto; + struct flow *flow; + int vlan; + } normal; + struct { + struct rule_dpif *rule; + uint16_t idle; + uint16_t hard; + } fin; + } u; +}; + +#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \ + entries = xcache->entries; \ + for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \ + entry; \ + entry = ofpbuf_try_pull(&entries, sizeof *entry)) + +struct xlate_cache { + struct ofpbuf entries; +}; + static struct hmap xbridges = HMAP_INITIALIZER(&xbridges); static struct hmap xbundles = HMAP_INITIALIZER(&xbundles); static struct hmap xports = HMAP_INITIALIZER(&xports); @@ -252,6 +318,9 @@ static void clear_skb_priorities(struct xport *); static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority, uint8_t *dscp); +static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc, + enum xc_type type); + void xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name, struct dpif *dpif, struct rule_dpif *miss_rule, @@ -701,8 +770,9 @@ xport_stp_listen_state(const struct xport *xport) static bool stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc) { + /* is_stp() also checks dl_type, but dl_type is always set in 'wc'. */ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst); - return eth_addr_equals(flow->dl_dst, eth_addr_stp); + return is_stp(flow); } static void @@ -1148,11 +1218,11 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle, if (ctx->xbridge->enable_recirc) { ctx->xout->use_recirc = bond_may_recirc( - out_xbundle->bond, &xr->recirc_id, &xr->hash_bias); + out_xbundle->bond, &xr->recirc_id, &xr->hash_basis); if (ctx->xout->use_recirc) { /* Only TCP mode uses recirculation. */ - xr->hash_alg = OVS_RECIRC_HASH_ALG_L4; + xr->hash_alg = OVS_HASH_ALG_L4; bond_update_post_recirc_rules(out_xbundle->bond, false); /* Recirculation does not require unmasking hash fields. */ @@ -1169,9 +1239,23 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle, return; } - if (ctx->xin->resubmit_stats) { - bond_account(out_xbundle->bond, &ctx->xin->flow, vid, - ctx->xin->resubmit_stats->n_bytes); + /* If ctx->xout->use_recirc is set, the main thread will handle stats + * accounting for this bond. */ + if (!ctx->xout->use_recirc) { + if (ctx->xin->resubmit_stats) { + bond_account(out_xbundle->bond, &ctx->xin->flow, vid, + ctx->xin->resubmit_stats->n_bytes); + } + if (ctx->xin->xcache) { + struct xc_entry *entry; + struct flow *flow; + + flow = &ctx->xin->flow; + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND); + entry->u.bond.bond = bond_ref(out_xbundle->bond); + entry->u.bond.flow = xmemdup(flow, sizeof *flow); + entry->u.bond.vid = vid; + } } } @@ -1459,6 +1543,15 @@ xlate_normal(struct xlate_ctx *ctx) if (ctx->xin->may_learn) { update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle); } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + /* Save enough info to update mac learning table later. */ + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL); + entry->u.normal.ofproto = ctx->xin->ofproto; + entry->u.normal.flow = xmemdup(flow, sizeof *flow); + entry->u.normal.vlan = vlan; + } /* Determine output bundle. */ ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock); @@ -1516,8 +1609,10 @@ compose_sample_action(const struct xbridge *xbridge, actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS); odp_port = ofp_port_to_odp_port(xbridge, flow->in_port.ofp_port); - pid = dpif_port_get_pid(xbridge->dpif, odp_port, 0); - cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, odp_actions); + pid = dpif_port_get_pid(xbridge->dpif, odp_port, + flow_hash_5tuple(flow, 0)); + cookie_offset = odp_put_userspace_action(pid, cookie, cookie_size, + odp_actions); nl_msg_end_nested(odp_actions, actions_offset); nl_msg_end_nested(odp_actions, sample_offset); @@ -1717,7 +1812,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, /* If 'struct flow' gets additional metadata, we'll need to zero it out * before traversing a patch port. */ - BUILD_ASSERT_DECL(FLOW_WC_SEQ == 25); + BUILD_ASSERT_DECL(FLOW_WC_SEQ == 26); if (!xport) { xlate_report(ctx, "Nonexistent output port"); @@ -1726,7 +1821,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, xlate_report(ctx, "OFPPC_NO_FWD set, skipping output"); return; } else if (check_stp) { - if (eth_addr_equals(ctx->base_flow.dl_dst, eth_addr_stp)) { + if (is_stp(&ctx->base_flow)) { if (!xport_stp_listen_state(xport)) { xlate_report(ctx, "STP not in listening state, " "skipping bpdu output"); @@ -1785,6 +1880,14 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats); } } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV); + entry->u.dev.tx = netdev_ref(xport->netdev); + entry->u.dev.rx = netdev_ref(peer->netdev); + entry->u.dev.bfd = bfd_ref(peer->bfd); + } return; } @@ -1794,7 +1897,7 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, flow_nw_tos = flow->nw_tos; if (dscp_from_skb_priority(xport, flow->skb_priority, &dscp)) { - wc->masks.nw_tos |= IP_ECN_MASK; + wc->masks.nw_tos |= IP_DSCP_MASK; flow->nw_tos &= ~IP_DSCP_MASK; flow->nw_tos |= dscp; } @@ -1817,6 +1920,12 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, if (ctx->xin->resubmit_stats) { netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats); } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV); + entry->u.dev.tx = netdev_ref(xport->netdev); + } out_port = odp_port; commit_odp_tunnel_action(flow, &ctx->base_flow, &ctx->xout->odp_actions); @@ -1843,14 +1952,19 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, &ctx->xout->wc); if (ctx->xout->use_recirc) { - struct ovs_action_recirc *act_recirc; + struct ovs_action_hash *act_hash; struct xlate_recirc *xr = &ctx->xout->recirc; - act_recirc = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions, - OVS_ACTION_ATTR_RECIRC, sizeof *act_recirc); - act_recirc->recirc_id = xr->recirc_id; - act_recirc->hash_alg = xr->hash_alg; - act_recirc->hash_bias = xr->hash_bias; + /* Hash action. */ + act_hash = nl_msg_put_unspec_uninit(&ctx->xout->odp_actions, + OVS_ACTION_ATTR_HASH, + sizeof *act_hash); + act_hash->hash_alg = xr->hash_alg; + act_hash->hash_basis = xr->hash_basis; + + /* Recirc action. */ + nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_RECIRC, + xr->recirc_id); } else { nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port); @@ -1878,7 +1992,7 @@ static void xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule) { struct rule_dpif *old_rule = ctx->rule; - struct rule_actions *actions; + const struct rule_actions *actions; if (ctx->xin->resubmit_stats) { rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats); @@ -1937,7 +2051,8 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id, !skip_wildcards ? &ctx->xout->wc : NULL, honor_table_miss, - &ctx->table_id, &rule); + &ctx->table_id, &rule, + ctx->xin->xcache != NULL); ctx->xin->flow.in_port.ofp_port = old_in_port; if (ctx->xin->resubmit_hook) { @@ -1970,12 +2085,22 @@ xlate_table_action(struct xlate_ctx *ctx, ofp_port_t in_port, uint8_t table_id, } choose_miss_rule(config, ctx->xbridge->miss_rule, - ctx->xbridge->no_packet_in_rule, &rule); + ctx->xbridge->no_packet_in_rule, &rule, + ctx->xin->xcache != NULL); match: if (rule) { + /* Fill in the cache entry here instead of xlate_recursively + * to make the reference counting more explicit. We take a + * reference in the lookups above if we are going to cache the + * rule. */ + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE); + entry->u.rule = rule; + } xlate_recursively(ctx, rule); - rule_dpif_unref(rule); } ctx->table_id = old_table_id; @@ -2527,15 +2652,45 @@ xlate_learn_action(struct xlate_ctx *ctx, learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts); ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm); ofpbuf_uninit(&ofpacts); + + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN); + entry->u.learn.ofproto = ctx->xin->ofproto; + /* Lookup the learned rule, taking a reference on it. The reference + * is released when this cache entry is deleted. */ + rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL, + &entry->u.learn.rule, true); + } +} + +static void +xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags, + uint16_t idle_timeout, uint16_t hard_timeout) +{ + if (tcp_flags & (TCP_FIN | TCP_RST)) { + rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout); + } } static void xlate_fin_timeout(struct xlate_ctx *ctx, const struct ofpact_fin_timeout *oft) { - if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) { - rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout, - oft->fin_hard_timeout); + if (ctx->rule) { + xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags, + oft->fin_idle_timeout, oft->fin_hard_timeout); + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT); + /* XC_RULE already holds a reference on the rule, none is taken + * here. */ + entry->u.fin.rule = ctx->rule; + entry->u.fin.idle = oft->fin_idle_timeout; + entry->u.fin.hard = oft->fin_hard_timeout; + } } } @@ -2570,7 +2725,7 @@ xlate_sample_action(struct xlate_ctx *ctx, static bool may_receive(const struct xport *xport, struct xlate_ctx *ctx) { - if (xport->config & (eth_addr_equals(ctx->xin->flow.dl_dst, eth_addr_stp) + if (xport->config & (is_stp(&ctx->xin->flow) ? OFPUTIL_PC_NO_RECV_STP : OFPUTIL_PC_NO_RECV)) { return false; @@ -2771,15 +2926,22 @@ do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len, case OFPACT_SET_FIELD: set_field = ofpact_get_SET_FIELD(a); mf = set_field->field; - mf_mask_field_and_prereqs(mf, &wc->masks); /* Set field action only ever overwrites packet's outermost * applicable header fields. Do nothing if no header exists. */ - if ((mf->id != MFF_VLAN_VID || flow->vlan_tci & htons(VLAN_CFI)) - && ((mf->id != MFF_MPLS_LABEL && mf->id != MFF_MPLS_TC) - || eth_type_mpls(flow->dl_type))) { - mf_set_flow_value(mf, &set_field->value, flow); + if (mf->id == MFF_VLAN_VID) { + wc->masks.vlan_tci |= htons(VLAN_CFI); + if (!(flow->vlan_tci & htons(VLAN_CFI))) { + break; + } + } else if ((mf->id == MFF_MPLS_LABEL || mf->id == MFF_MPLS_TC) + /* 'dl_type' is already unwildcarded. */ + && !eth_type_mpls(flow->dl_type)) { + break; } + + mf_mask_field_and_prereqs(mf, &wc->masks); + mf_set_flow_value(mf, &set_field->value, flow); break; case OFPACT_STACK_PUSH: @@ -2900,6 +3062,7 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto, xin->packet = packet; xin->may_learn = packet != NULL; xin->rule = rule; + xin->xcache = NULL; xin->ofpacts = NULL; xin->ofpacts_len = 0; xin->tcp_flags = tcp_flags; @@ -3028,7 +3191,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) struct flow *flow = &xin->flow; struct rule_dpif *rule = NULL; - struct rule_actions *actions = NULL; + const struct rule_actions *actions = NULL; enum slow_path_reason special; const struct ofpact *ofpacts; struct xport *in_port; @@ -3075,7 +3238,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) ctx.xbridge = xbridge_lookup(xin->ofproto); if (!ctx.xbridge) { - goto out; + return; } ctx.rule = xin->rule; @@ -3108,10 +3271,16 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) if (!xin->ofpacts && !ctx.rule) { ctx.table_id = rule_dpif_lookup(ctx.xbridge->ofproto, flow, !xin->skip_wildcards ? wc : NULL, - &rule); + &rule, ctx.xin->xcache != NULL); if (ctx.xin->resubmit_stats) { rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats); } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE); + entry->u.rule = rule; + } ctx.rule = rule; } xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule); @@ -3147,7 +3316,7 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) break; case OFPC_FRAG_DROP: - goto out; + return; case OFPC_FRAG_REASM: OVS_NOT_REACHED(); @@ -3162,10 +3331,19 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) } in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port); - if (in_port && in_port->is_tunnel && ctx.xin->resubmit_stats) { - netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); - if (in_port->bfd) { - bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); + if (in_port && in_port->is_tunnel) { + if (ctx.xin->resubmit_stats) { + netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); + if (in_port->bfd) { + bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); + } + } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV); + entry->u.dev.rx = netdev_ref(in_port->netdev); + entry->u.dev.bfd = bfd_ref(in_port->bfd); } } @@ -3222,28 +3400,45 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) ctx.xout->slow |= SLOW_ACTION; } - if (ctx.xin->resubmit_stats) { - mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors, - ctx.xin->resubmit_stats->n_packets, - ctx.xin->resubmit_stats->n_bytes); - - if (ctx.xbridge->netflow) { - const struct ofpact *ofpacts; - size_t ofpacts_len; - - ofpacts_len = actions->ofpacts_len; - ofpacts = actions->ofpacts; - if (ofpacts_len == 0 - || ofpacts->type != OFPACT_CONTROLLER - || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) { - /* Only update netflow if we don't have controller flow. We don't - * report NetFlow expiration messages for such facets because they - * are just part of the control logic for the network, not real - * traffic. */ + if (mbridge_has_mirrors(ctx.xbridge->mbridge)) { + if (ctx.xin->resubmit_stats) { + mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors, + ctx.xin->resubmit_stats->n_packets, + ctx.xin->resubmit_stats->n_bytes); + } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR); + entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge); + entry->u.mirror.mirrors = xout->mirrors; + } + } + + if (ctx.xbridge->netflow) { + const struct ofpact *ofpacts = actions->ofpacts; + size_t ofpacts_len = actions->ofpacts_len; + + /* Only update netflow if we don't have controller flow. We don't + * report NetFlow expiration messages for such facets because they + * are just part of the control logic for the network, not real + * traffic. */ + if (ofpacts_len == 0 + || ofpacts->type != OFPACT_CONTROLLER + || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) { + if (ctx.xin->resubmit_stats) { netflow_flow_update(ctx.xbridge->netflow, flow, xout->nf_output_iface, ctx.xin->resubmit_stats); } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW); + entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow); + entry->u.nf.flow = xmemdup(flow, sizeof *flow); + entry->u.nf.iface = xout->nf_output_iface; + } } } @@ -3268,9 +3463,6 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) wc->masks.tp_src &= htons(UINT8_MAX); wc->masks.tp_dst &= htons(UINT8_MAX); } - -out: - rule_dpif_unref(rule); } /* Sends 'packet' out 'ofport'. @@ -3302,3 +3494,189 @@ xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet) &output.ofpact, sizeof output, packet); } + +struct xlate_cache * +xlate_cache_new(void) +{ + struct xlate_cache *xcache = xmalloc(sizeof *xcache); + + ofpbuf_init(&xcache->entries, 512); + return xcache; +} + +static struct xc_entry * +xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type) +{ + struct xc_entry *entry; + + entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry); + entry->type = type; + + return entry; +} + +static void +xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats) +{ + if (entry->u.dev.tx) { + netdev_vport_inc_tx(entry->u.dev.tx, stats); + } + if (entry->u.dev.rx) { + netdev_vport_inc_rx(entry->u.dev.rx, stats); + } + if (entry->u.dev.bfd) { + bfd_account_rx(entry->u.dev.bfd, stats); + } +} + +static void +xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan) +{ + struct xbridge *xbridge; + struct xbundle *xbundle; + struct flow_wildcards wc; + + xbridge = xbridge_lookup(ofproto); + if (!xbridge) { + return; + } + + xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false, + NULL); + if (!xbundle) { + return; + } + + update_learning_table(xbridge, flow, &wc, vlan, xbundle); +} + +/* Push stats and perform side effects of flow translation. */ +void +xlate_push_stats(struct xlate_cache *xcache, bool may_learn, + const struct dpif_flow_stats *stats) +{ + struct xc_entry *entry; + struct ofpbuf entries = xcache->entries; + + XC_ENTRY_FOR_EACH (entry, entries, xcache) { + switch (entry->type) { + case XC_RULE: + rule_dpif_credit_stats(entry->u.rule, stats); + break; + case XC_BOND: + bond_account(entry->u.bond.bond, entry->u.bond.flow, + entry->u.bond.vid, stats->n_bytes); + break; + case XC_NETDEV: + xlate_cache_netdev(entry, stats); + break; + case XC_NETFLOW: + netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow, + entry->u.nf.iface, stats); + break; + case XC_MIRROR: + mirror_update_stats(entry->u.mirror.mbridge, + entry->u.mirror.mirrors, + stats->n_packets, stats->n_bytes); + break; + case XC_LEARN: + if (may_learn) { + struct rule_dpif *rule = entry->u.learn.rule; + + /* Reset the modified time for a rule that is equivalent to + * the currently cached rule. If the rule is not the exact + * rule we have cached, update the reference that we have. */ + entry->u.learn.rule = ofproto_dpif_refresh_rule(rule); + } + break; + case XC_NORMAL: + xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow, + entry->u.normal.vlan); + break; + case XC_FIN_TIMEOUT: + xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags, + entry->u.fin.idle, entry->u.fin.hard); + break; + default: + OVS_NOT_REACHED(); + } + } +} + +static void +xlate_dev_unref(struct xc_entry *entry) +{ + if (entry->u.dev.tx) { + netdev_close(entry->u.dev.tx); + } + if (entry->u.dev.rx) { + netdev_close(entry->u.dev.rx); + } + if (entry->u.dev.bfd) { + bfd_unref(entry->u.dev.bfd); + } +} + +static void +xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow) +{ + netflow_expire(netflow, flow); + netflow_flow_clear(netflow, flow); + netflow_unref(netflow); + free(flow); +} + +void +xlate_cache_clear(struct xlate_cache *xcache) +{ + struct xc_entry *entry; + struct ofpbuf entries; + + if (!xcache) { + return; + } + + XC_ENTRY_FOR_EACH (entry, entries, xcache) { + switch (entry->type) { + case XC_RULE: + rule_dpif_unref(entry->u.rule); + break; + case XC_BOND: + free(entry->u.bond.flow); + bond_unref(entry->u.bond.bond); + break; + case XC_NETDEV: + xlate_dev_unref(entry); + break; + case XC_NETFLOW: + xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow); + break; + case XC_MIRROR: + mbridge_unref(entry->u.mirror.mbridge); + break; + case XC_LEARN: + /* 'u.learn.rule' is the learned rule. */ + rule_dpif_unref(entry->u.learn.rule); + break; + case XC_NORMAL: + free(entry->u.normal.flow); + break; + case XC_FIN_TIMEOUT: + /* 'u.fin.rule' is always already held as a XC_RULE, which + * has already released it's reference above. */ + break; + default: + OVS_NOT_REACHED(); + } + } + + ofpbuf_clear(&xcache->entries); +} + +void +xlate_cache_delete(struct xlate_cache *xcache) +{ + xlate_cache_clear(xcache); + ofpbuf_uninit(&xcache->entries); + free(xcache); +}