From b256dc525c8ef663daf2330463e67a26207cc5f1 Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Thu, 10 Apr 2014 16:00:28 +1200 Subject: [PATCH] ofproto-dpif-xlate: Cache xlate_actions() effects. This patch adds a new object called 'struct xlate_cache' which can be set in 'struct xlate_in', and passed to xlate_actions() to cache the modules affected by this flow translation. Subsequently, the caller can pass the xcache to xlate_push_stats() to credit stats and perform side effects for a lower cost than full flow translation. These changes are aimed currently at long-lived flows, decreasing the average dump duration for such flows by 50-80%. This allows more flows to be supported in the datapath at a given time. Applying these changes to short-lived flows is left for a later commit. Signed-off-by: Joe Stringer Acked-by: Ethan Jackson --- v2: Acked. v1: Add caching for fin_timeout action. Expire netflows on xlate_cache_clear(). Account to bonds using a copy of 'flow' rather than hash. Always build XC_NORMAL entry (previously only if may_learn is true) Rename xlate_from_cache()->xlate_push_stats() Add may_learn parameter to xlate_push_stats() Tidy up xlate_actions__() mirror/netflow code. Fold in style fixups. RFC: First post. --- ofproto/ofproto-dpif-upcall.c | 33 ++- ofproto/ofproto-dpif-xlate.c | 414 +++++++++++++++++++++++++++++++--- ofproto/ofproto-dpif-xlate.h | 16 ++ 3 files changed, 432 insertions(+), 31 deletions(-) diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c index 938cfdebc..9bf133660 100644 --- a/ofproto/ofproto-dpif-upcall.c +++ b/ofproto/ofproto-dpif-upcall.c @@ -168,6 +168,9 @@ struct udpif_key { bool mark; /* Used by mark and sweep GC algorithm. */ struct odputil_keybuf key_buf; /* Memory for 'key'. */ + struct xlate_cache *xcache; /* Cache for xlate entries that + * are affected by this ukey. + * Used for stats and learning.*/ }; /* 'udpif_flow_dump's hold the state associated with one iteration in a flow @@ -1314,6 +1317,7 @@ ukey_create(const struct nlattr *key, size_t key_len, long long int used) ukey->mark = false; ukey->created = used ? used : time_msec(); memset(&ukey->stats, 0, sizeof ukey->stats); + ukey->xcache = NULL; return ukey; } @@ -1322,6 +1326,7 @@ static void ukey_delete(struct revalidator *revalidator, struct udpif_key *ukey) { hmap_remove(&revalidator->ukeys, &ukey->hmap_node); + xlate_cache_delete(ukey->xcache); free(ukey); } @@ -1341,12 +1346,13 @@ revalidate_ukey(struct udpif *udpif, struct udpif_flow_dump *udump, struct xlate_in xin; int error; size_t i; - bool ok; + bool may_learn, ok; ok = false; xoutp = NULL; actions = NULL; netflow = NULL; + may_learn = push.n_packets > 0; /* If we don't need to revalidate, we can simply push the stats contained * in the udump, otherwise we'll have to get the actions so we can check @@ -1373,15 +1379,29 @@ revalidate_ukey(struct udpif *udpif, struct udpif_flow_dump *udump, goto exit; } + if (ukey->xcache && !udump->need_revalidate) { + xlate_push_stats(ukey->xcache, may_learn, &push); + ok = true; + goto exit; + } + error = xlate_receive(udpif->backer, NULL, ukey->key, ukey->key_len, &flow, &ofproto, NULL, NULL, &netflow, &odp_in_port); if (error) { goto exit; } + if (udump->need_revalidate) { + xlate_cache_clear(ukey->xcache); + } + if (!ukey->xcache) { + ukey->xcache = xlate_cache_new(); + } + xlate_in_init(&xin, ofproto, &flow, NULL, push.tcp_flags, NULL); xin.resubmit_stats = push.n_packets ? &push : NULL; - xin.may_learn = push.n_packets > 0; + xin.xcache = ukey->xcache; + xin.may_learn = may_learn; xin.skip_wildcards = !udump->need_revalidate; xlate_actions(&xin, &xout); xoutp = &xout; @@ -1487,6 +1507,13 @@ push_dump_ops(struct revalidator *revalidator, struct ofproto_dpif *ofproto; struct netflow *netflow; struct flow flow; + bool may_learn; + + may_learn = push->n_packets > 0; + if (op->ukey && op->ukey->xcache) { + xlate_push_stats(op->ukey->xcache, may_learn, push); + continue; + } if (!xlate_receive(udpif->backer, NULL, op->op.u.flow_del.key, op->op.u.flow_del.key_len, &flow, &ofproto, @@ -1496,7 +1523,7 @@ push_dump_ops(struct revalidator *revalidator, xlate_in_init(&xin, ofproto, &flow, NULL, push->tcp_flags, NULL); xin.resubmit_stats = push->n_packets ? push : NULL; - xin.may_learn = push->n_packets > 0; + xin.may_learn = may_learn; xin.skip_wildcards = true; xlate_actions_for_side_effects(&xin); diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c index 1ba54620f..89a3ec0b2 100644 --- a/ofproto/ofproto-dpif-xlate.c +++ b/ofproto/ofproto-dpif-xlate.c @@ -222,6 +222,72 @@ struct skb_priority_to_dscp { uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */ }; +enum xc_type { + XC_RULE, + XC_BOND, + XC_NETDEV, + XC_NETFLOW, + XC_MIRROR, + XC_LEARN, + XC_NORMAL, + XC_FIN_TIMEOUT, +}; + +/* xlate_cache entries hold enough information to perform the side effects of + * xlate_actions() for a rule, without needing to perform rule translation + * from scratch. The primary usage of these is to submit statistics to objects + * that a flow relates to, although they may be used for other effects as well + * (for instance, refreshing hard timeouts for learned flows). */ +struct xc_entry { + enum xc_type type; + union { + struct rule_dpif *rule; + struct { + struct netdev *tx; + struct netdev *rx; + struct bfd *bfd; + } dev; + struct { + struct netflow *netflow; + struct flow *flow; + ofp_port_t iface; + } nf; + struct { + struct mbridge *mbridge; + mirror_mask_t mirrors; + } mirror; + struct { + struct bond *bond; + struct flow *flow; + uint16_t vid; + } bond; + struct { + struct ofproto_dpif *ofproto; + struct rule_dpif *rule; + } learn; + struct { + struct ofproto_dpif *ofproto; + struct flow *flow; + int vlan; + } normal; + struct { + struct rule_dpif *rule; + uint16_t idle; + uint16_t hard; + } fin; + } u; +}; + +#define XC_ENTRY_FOR_EACH(entry, entries, xcache) \ + entries = xcache->entries; \ + for (entry = ofpbuf_try_pull(&entries, sizeof *entry); \ + entry; \ + entry = ofpbuf_try_pull(&entries, sizeof *entry)) + +struct xlate_cache { + struct ofpbuf entries; +}; + static struct hmap xbridges = HMAP_INITIALIZER(&xbridges); static struct hmap xbundles = HMAP_INITIALIZER(&xbundles); static struct hmap xports = HMAP_INITIALIZER(&xports); @@ -252,6 +318,9 @@ static void clear_skb_priorities(struct xport *); static bool dscp_from_skb_priority(const struct xport *, uint32_t skb_priority, uint8_t *dscp); +static struct xc_entry *xlate_cache_add_entry(struct xlate_cache *xc, + enum xc_type type); + void xlate_ofproto_set(struct ofproto_dpif *ofproto, const char *name, struct dpif *dpif, struct rule_dpif *miss_rule, @@ -1169,9 +1238,23 @@ output_normal(struct xlate_ctx *ctx, const struct xbundle *out_xbundle, return; } - if (ctx->xin->resubmit_stats && !ctx->xout->use_recirc) { - bond_account(out_xbundle->bond, &ctx->xin->flow, vid, - ctx->xin->resubmit_stats->n_bytes); + /* If ctx->xout->use_recirc is set, the main thread will handle stats + * accounting for this bond. */ + if (!ctx->xout->use_recirc) { + if (ctx->xin->resubmit_stats) { + bond_account(out_xbundle->bond, &ctx->xin->flow, vid, + ctx->xin->resubmit_stats->n_bytes); + } + if (ctx->xin->xcache) { + struct xc_entry *entry; + struct flow *flow; + + flow = &ctx->xin->flow; + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_BOND); + entry->u.bond.bond = bond_ref(out_xbundle->bond); + entry->u.bond.flow = xmemdup(flow, sizeof *flow); + entry->u.bond.vid = vid; + } } } @@ -1459,6 +1542,15 @@ xlate_normal(struct xlate_ctx *ctx) if (ctx->xin->may_learn) { update_learning_table(ctx->xbridge, flow, wc, vlan, in_xbundle); } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + /* Save enough info to update mac learning table later. */ + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NORMAL); + entry->u.normal.ofproto = ctx->xin->ofproto; + entry->u.normal.flow = xmemdup(flow, sizeof *flow); + entry->u.normal.vlan = vlan; + } /* Determine output bundle. */ ovs_rwlock_rdlock(&ctx->xbridge->ml->rwlock); @@ -1785,6 +1877,14 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, bfd_account_rx(peer->bfd, ctx->xin->resubmit_stats); } } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV); + entry->u.dev.tx = netdev_ref(xport->netdev); + entry->u.dev.rx = netdev_ref(peer->netdev); + entry->u.dev.bfd = bfd_ref(peer->bfd); + } return; } @@ -1817,6 +1917,12 @@ compose_output_action__(struct xlate_ctx *ctx, ofp_port_t ofp_port, if (ctx->xin->resubmit_stats) { netdev_vport_inc_tx(xport->netdev, ctx->xin->resubmit_stats); } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_NETDEV); + entry->u.dev.tx = netdev_ref(xport->netdev); + } out_port = odp_port; commit_odp_tunnel_action(flow, &ctx->base_flow, &ctx->xout->odp_actions); @@ -1888,6 +1994,13 @@ xlate_recursively(struct xlate_ctx *ctx, struct rule_dpif *rule) if (ctx->xin->resubmit_stats) { rule_dpif_credit_stats(rule, ctx->xin->resubmit_stats); } + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_RULE); + entry->u.rule = rule; + rule_dpif_ref(rule); + } ctx->resubmits++; ctx->recurse++; @@ -2532,15 +2645,42 @@ xlate_learn_action(struct xlate_ctx *ctx, learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts); ofproto_dpif_flow_mod(ctx->xbridge->ofproto, &fm); ofpbuf_uninit(&ofpacts); + + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_LEARN); + entry->u.learn.ofproto = ctx->xin->ofproto; + rule_dpif_lookup(ctx->xbridge->ofproto, &ctx->xin->flow, NULL, + &entry->u.learn.rule); + } +} + +static void +xlate_fin_timeout__(struct rule_dpif *rule, uint16_t tcp_flags, + uint16_t idle_timeout, uint16_t hard_timeout) +{ + if (tcp_flags & (TCP_FIN | TCP_RST)) { + rule_dpif_reduce_timeouts(rule, idle_timeout, hard_timeout); + } } static void xlate_fin_timeout(struct xlate_ctx *ctx, const struct ofpact_fin_timeout *oft) { - if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) { - rule_dpif_reduce_timeouts(ctx->rule, oft->fin_idle_timeout, - oft->fin_hard_timeout); + if (ctx->rule) { + xlate_fin_timeout__(ctx->rule, ctx->xin->tcp_flags, + oft->fin_idle_timeout, oft->fin_hard_timeout); + if (ctx->xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx->xin->xcache, XC_FIN_TIMEOUT); + entry->u.fin.rule = ctx->rule; + entry->u.fin.idle = oft->fin_idle_timeout; + entry->u.fin.hard = oft->fin_hard_timeout; + rule_dpif_ref(ctx->rule); + } } } @@ -2912,6 +3052,7 @@ xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto, xin->packet = packet; xin->may_learn = packet != NULL; xin->rule = rule; + xin->xcache = NULL; xin->ofpacts = NULL; xin->ofpacts_len = 0; xin->tcp_flags = tcp_flags; @@ -3124,6 +3265,13 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) if (ctx.xin->resubmit_stats) { rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats); } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_RULE); + rule_dpif_ref(rule); + entry->u.rule = rule; + } ctx.rule = rule; } xout->fail_open = ctx.rule && rule_dpif_is_fail_open(ctx.rule); @@ -3174,10 +3322,19 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) } in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port); - if (in_port && in_port->is_tunnel && ctx.xin->resubmit_stats) { - netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); - if (in_port->bfd) { - bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); + if (in_port && in_port->is_tunnel) { + if (ctx.xin->resubmit_stats) { + netdev_vport_inc_rx(in_port->netdev, ctx.xin->resubmit_stats); + if (in_port->bfd) { + bfd_account_rx(in_port->bfd, ctx.xin->resubmit_stats); + } + } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETDEV); + entry->u.dev.rx = netdev_ref(in_port->netdev); + entry->u.dev.bfd = bfd_ref(in_port->bfd); } } @@ -3234,28 +3391,45 @@ xlate_actions__(struct xlate_in *xin, struct xlate_out *xout) ctx.xout->slow |= SLOW_ACTION; } - if (ctx.xin->resubmit_stats) { - mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors, - ctx.xin->resubmit_stats->n_packets, - ctx.xin->resubmit_stats->n_bytes); - - if (ctx.xbridge->netflow) { - const struct ofpact *ofpacts; - size_t ofpacts_len; - - ofpacts_len = actions->ofpacts_len; - ofpacts = actions->ofpacts; - if (ofpacts_len == 0 - || ofpacts->type != OFPACT_CONTROLLER - || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) { - /* Only update netflow if we don't have controller flow. We don't - * report NetFlow expiration messages for such facets because they - * are just part of the control logic for the network, not real - * traffic. */ + if (mbridge_has_mirrors(ctx.xbridge->mbridge)) { + if (ctx.xin->resubmit_stats) { + mirror_update_stats(ctx.xbridge->mbridge, xout->mirrors, + ctx.xin->resubmit_stats->n_packets, + ctx.xin->resubmit_stats->n_bytes); + } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_MIRROR); + entry->u.mirror.mbridge = mbridge_ref(ctx.xbridge->mbridge); + entry->u.mirror.mirrors = xout->mirrors; + } + } + + if (ctx.xbridge->netflow) { + const struct ofpact *ofpacts = actions->ofpacts; + size_t ofpacts_len = actions->ofpacts_len; + + /* Only update netflow if we don't have controller flow. We don't + * report NetFlow expiration messages for such facets because they + * are just part of the control logic for the network, not real + * traffic. */ + if (ofpacts_len == 0 + || ofpacts->type != OFPACT_CONTROLLER + || ofpact_next(ofpacts) < ofpact_end(ofpacts, ofpacts_len)) { + if (ctx.xin->resubmit_stats) { netflow_flow_update(ctx.xbridge->netflow, flow, xout->nf_output_iface, ctx.xin->resubmit_stats); } + if (ctx.xin->xcache) { + struct xc_entry *entry; + + entry = xlate_cache_add_entry(ctx.xin->xcache, XC_NETFLOW); + entry->u.nf.netflow = netflow_ref(ctx.xbridge->netflow); + entry->u.nf.flow = xmemdup(flow, sizeof *flow); + entry->u.nf.iface = xout->nf_output_iface; + } } } @@ -3314,3 +3488,187 @@ xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet) &output.ofpact, sizeof output, packet); } + +struct xlate_cache * +xlate_cache_new(void) +{ + struct xlate_cache *xcache = xmalloc(sizeof *xcache); + + ofpbuf_init(&xcache->entries, 512); + return xcache; +} + +static struct xc_entry * +xlate_cache_add_entry(struct xlate_cache *xcache, enum xc_type type) +{ + struct xc_entry *entry; + + entry = ofpbuf_put_zeros(&xcache->entries, sizeof *entry); + entry->type = type; + + return entry; +} + +static void +xlate_cache_netdev(struct xc_entry *entry, const struct dpif_flow_stats *stats) +{ + if (entry->u.dev.tx) { + netdev_vport_inc_tx(entry->u.dev.tx, stats); + } + if (entry->u.dev.rx) { + netdev_vport_inc_rx(entry->u.dev.rx, stats); + } + if (entry->u.dev.bfd) { + bfd_account_rx(entry->u.dev.bfd, stats); + } +} + +static void +xlate_cache_normal(struct ofproto_dpif *ofproto, struct flow *flow, int vlan) +{ + struct xbridge *xbridge; + struct xbundle *xbundle; + struct flow_wildcards wc; + + xbridge = xbridge_lookup(ofproto); + if (!xbridge) { + return; + } + + xbundle = lookup_input_bundle(xbridge, flow->in_port.ofp_port, false, + NULL); + if (!xbundle) { + return; + } + + update_learning_table(xbridge, flow, &wc, vlan, xbundle); +} + +/* Push stats and perform side effects of flow translation. */ +void +xlate_push_stats(struct xlate_cache *xcache, bool may_learn, + const struct dpif_flow_stats *stats) +{ + struct xc_entry *entry; + struct ofpbuf entries = xcache->entries; + + XC_ENTRY_FOR_EACH (entry, entries, xcache) { + switch (entry->type) { + case XC_RULE: + rule_dpif_credit_stats(entry->u.rule, stats); + break; + case XC_BOND: + bond_account(entry->u.bond.bond, entry->u.bond.flow, + entry->u.bond.vid, stats->n_bytes); + break; + case XC_NETDEV: + xlate_cache_netdev(entry, stats); + break; + case XC_NETFLOW: + netflow_flow_update(entry->u.nf.netflow, entry->u.nf.flow, + entry->u.nf.iface, stats); + break; + case XC_MIRROR: + mirror_update_stats(entry->u.mirror.mbridge, + entry->u.mirror.mirrors, + stats->n_packets, stats->n_bytes); + break; + case XC_LEARN: + if (may_learn) { + struct rule_dpif *rule = entry->u.learn.rule; + + /* Reset the modified time for a rule that is equivalent to + * the currently cached rule. If the rule is not the exact + * rule wehave cached, update the reference that we have. */ + entry->u.learn.rule = ofproto_dpif_refresh_rule(rule); + } + break; + case XC_NORMAL: + xlate_cache_normal(entry->u.normal.ofproto, entry->u.normal.flow, + entry->u.normal.vlan); + break; + case XC_FIN_TIMEOUT: + xlate_fin_timeout__(entry->u.fin.rule, stats->tcp_flags, + entry->u.fin.idle, entry->u.fin.hard); + break; + default: + OVS_NOT_REACHED(); + } + } +} + +static void +xlate_dev_unref(struct xc_entry *entry) +{ + if (entry->u.dev.tx) { + netdev_close(entry->u.dev.tx); + } + if (entry->u.dev.rx) { + netdev_close(entry->u.dev.rx); + } + if (entry->u.dev.bfd) { + bfd_unref(entry->u.dev.bfd); + } +} + +static void +xlate_cache_clear_netflow(struct netflow *netflow, struct flow *flow) +{ + netflow_expire(netflow, flow); + netflow_flow_clear(netflow, flow); + netflow_unref(netflow); + free(flow); +} + +void +xlate_cache_clear(struct xlate_cache *xcache) +{ + struct xc_entry *entry; + struct ofpbuf entries; + + if (!xcache) { + return; + } + + XC_ENTRY_FOR_EACH (entry, entries, xcache) { + switch (entry->type) { + case XC_RULE: + rule_dpif_unref(entry->u.rule); + break; + case XC_BOND: + free(entry->u.bond.flow); + bond_unref(entry->u.bond.bond); + break; + case XC_NETDEV: + xlate_dev_unref(entry); + break; + case XC_NETFLOW: + xlate_cache_clear_netflow(entry->u.nf.netflow, entry->u.nf.flow); + break; + case XC_MIRROR: + mbridge_unref(entry->u.mirror.mbridge); + break; + case XC_LEARN: + rule_dpif_unref(entry->u.learn.rule); + break; + case XC_NORMAL: + free(entry->u.normal.flow); + break; + case XC_FIN_TIMEOUT: + rule_dpif_unref(entry->u.fin.rule); + break; + default: + OVS_NOT_REACHED(); + } + } + + ofpbuf_clear(&xcache->entries); +} + +void +xlate_cache_delete(struct xlate_cache *xcache) +{ + xlate_cache_clear(xcache); + ofpbuf_uninit(&xcache->entries); + free(xcache); +} diff --git a/ofproto/ofproto-dpif-xlate.h b/ofproto/ofproto-dpif-xlate.h index 8b53e10d3..f1267fbf5 100644 --- a/ofproto/ofproto-dpif-xlate.h +++ b/ofproto/ofproto-dpif-xlate.h @@ -31,6 +31,7 @@ struct lacp; struct dpif_ipfix; struct dpif_sflow; struct mac_learning; +struct xlate_cache; struct xlate_recirc { uint32_t recirc_id; /* !0 Use recirculation instead of output. */ @@ -127,6 +128,15 @@ struct xlate_in { * This is normally null so the client has to set it manually after * calling xlate_in_init(). */ const struct dpif_flow_stats *resubmit_stats; + + /* If nonnull, flow translation populates this cache with references to all + * modules that are affected by translation. This 'xlate_cache' may be + * passed to xlate_push_stats() to perform the same function as + * xlate_actions() without the full cost of translation. + * + * This is normally null so the client has to set it manually after + * calling xlate_in_init(). */ + struct xlate_cache *xcache; }; extern struct ovs_rwlock xlate_rwlock; @@ -179,4 +189,10 @@ void xlate_out_copy(struct xlate_out *dst, const struct xlate_out *src); int xlate_send_packet(const struct ofport_dpif *, struct ofpbuf *); +struct xlate_cache *xlate_cache_new(void); +void xlate_push_stats(struct xlate_cache *, bool may_learn, + const struct dpif_flow_stats *); +void xlate_cache_clear(struct xlate_cache *); +void xlate_cache_delete(struct xlate_cache *); + #endif /* ofproto-dpif-xlate.h */ -- 2.43.0