X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=ffeb952484eb98e6840c4e4c73b54f18c003a77e;hb=9b45d7f5d;hp=08d80d89ae6334ae80b3ffd49388d427ac67fd41;hpb=693c4a01124ec5ad9253f8cfcfd99075a9d637f6;p=sliver-openvswitch.git diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 08d80d89a..ffeb95248 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -54,11 +54,11 @@ #include "poll-loop.h" #include "rconn.h" #include "shash.h" -#include "status.h" #include "stream-ssl.h" #include "svec.h" #include "tag.h" #include "timeval.h" +#include "unaligned.h" #include "unixctl.h" #include "vconn.h" #include "vlog.h" @@ -90,7 +90,9 @@ COVERAGE_DEFINE(ofproto_unexpected_rule); COVERAGE_DEFINE(ofproto_uninstallable); COVERAGE_DEFINE(ofproto_update_port); -#include "sflow_api.h" +/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a + * flow translation. */ +#define MAX_RESUBMIT_RECURSION 16 struct rule; @@ -122,7 +124,12 @@ struct action_xlate_ctx { * * This is normally null so the client has to set it manually after * calling action_xlate_ctx_init(). */ - void (*resubmit_hook)(struct action_xlate_ctx *, const struct rule *); + void (*resubmit_hook)(struct action_xlate_ctx *, struct rule *); + + /* If true, the speciality of 'flow' should be checked before executing + * its actions. If special_cb returns false on 'flow' rendered + * uninstallable and no actions will be executed. */ + bool check_special; /* xlate_actions() initializes and uses these members. The client might want * to look at them after it returns. */ @@ -138,7 +145,7 @@ struct action_xlate_ctx { int recurse; /* Recursion level, via xlate_table_action. */ int last_pop_priority; /* Offset in 'odp_actions' just past most - * recently added ODPAT_SET_PRIORITY. */ + * recent ODP_ACTION_ATTR_SET_PRIORITY. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -194,6 +201,8 @@ static void rule_insert(struct ofproto *, struct rule *); static void rule_remove(struct ofproto *, struct rule *); static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason); +static void rule_get_stats(const struct rule *, uint64_t *packets, + uint64_t *bytes); /* An exact-match instantiation of an OpenFlow flow. */ struct facet { @@ -215,6 +224,13 @@ struct facet { uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ + uint64_t dp_packet_count; /* Last known packet count in the datapath. */ + uint64_t dp_byte_count; /* Last known byte count in the datapath. */ + + uint64_t rs_packet_count; /* Packets pushed to resubmit children. */ + uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */ + long long int rs_used; /* Used time pushed to resubmit children. */ + /* Number of bytes passed to account_cb. This may include bytes that can * currently obtained from the datapath (thus, it can be greater than * byte_count). */ @@ -250,6 +266,7 @@ static void facet_make_actions(struct ofproto *, struct facet *, const struct ofpbuf *packet); static void facet_update_stats(struct ofproto *, struct facet *, const struct dpif_flow_stats *); +static void facet_push_stats(struct ofproto *, struct facet *); /* ofproto supports two kinds of OpenFlow connections: * @@ -301,7 +318,8 @@ struct ofconn { /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ - struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ +#define N_SCHEDULERS 2 + struct pinsched *schedulers[N_SCHEDULERS]; struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ int miss_send_len; /* Bytes to send of buffered packets. */ @@ -315,19 +333,9 @@ struct ofconn { enum nx_role role; /* Role. */ struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */ struct discovery *discovery; /* Controller discovery object, if enabled. */ - struct status_category *ss; /* Switch status category. */ enum ofproto_band band; /* In-band or out-of-band? */ }; -/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's - * "schedulers" array. Their values are 0 and 1, and their meanings and values - * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In - * case anything ever changes, check their values here. */ -#define N_SCHEDULERS 2 -BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); -BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); -BUILD_ASSERT_DECL(OFPR_ACTION == 1); -BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, enum ofconn_type); @@ -363,7 +371,6 @@ struct ofproto { uint32_t max_ports; /* Configuration. */ - struct switch_status *switch_status; struct fail_open *fail_open; struct netflow *netflow; struct ofproto_sflow *sflow; @@ -413,6 +420,9 @@ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static int ofproto_expire(struct ofproto *); +static void flow_push_stats(struct ofproto *, const struct rule *, + struct flow *, uint64_t packets, uint64_t bytes, + long long int used); static void handle_upcall(struct ofproto *, struct dpif_upcall *); @@ -444,7 +454,10 @@ ofproto_create(const char *datapath, const char *datapath_type, VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); + error = dpif_recv_set_mask(dpif, + ((1u << DPIF_UC_MISS) | + (1u << DPIF_UC_ACTION) | + (1u << DPIF_UC_SAMPLE))); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -472,7 +485,6 @@ ofproto_create(const char *datapath, const char *datapath_type, p->max_ports = dpif_get_max_ports(dpif); /* Initialize submodules. */ - p->switch_status = switch_status_create(p); p->fail_open = NULL; p->netflow = NULL; p->sflow = NULL; @@ -555,8 +567,7 @@ add_controller(struct ofproto *ofproto, const struct ofproto_controller *c) if (is_discovery_controller(c)) { int error = discovery_create(c->accept_re, c->update_resolv_conf, - ofproto->dpif, ofproto->switch_status, - &discovery); + ofproto->dpif, &discovery); if (error) { return; } @@ -667,8 +678,7 @@ update_in_band_remotes(struct ofproto *ofproto) * even before we know any remote addresses. */ if (n_addrs || discovery) { if (!ofproto->in_band) { - in_band_create(ofproto, ofproto->dpif, ofproto->switch_status, - &ofproto->in_band); + in_band_create(ofproto, ofproto->dpif, &ofproto->in_band); } if (ofproto->in_band) { in_band_set_remotes(ofproto->in_band, addrs, n_addrs); @@ -695,7 +705,7 @@ update_fail_open(struct ofproto *p) size_t n; if (!p->fail_open) { - p->fail_open = fail_open_create(p, p->switch_status); + p->fail_open = fail_open_create(p); } n = 0; @@ -720,7 +730,6 @@ ofproto_set_controllers(struct ofproto *p, struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; - bool ss_exists; size_t i; /* Create newly configured controllers and services. @@ -748,7 +757,6 @@ ofproto_set_controllers(struct ofproto *p, /* Delete controllers that are no longer configured. * Update configuration of all now-existing controllers. */ - ss_exists = false; HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) { struct ofproto_controller *c; @@ -757,9 +765,6 @@ ofproto_set_controllers(struct ofproto *p, ofconn_destroy(ofconn); } else { update_controller(ofconn, c); - if (ofconn->ss) { - ss_exists = true; - } } } @@ -781,13 +786,6 @@ ofproto_set_controllers(struct ofproto *p, update_in_band_remotes(p); update_fail_open(p); - - if (!hmap_is_empty(&p->controllers) && !ss_exists) { - ofconn = CONTAINER_OF(hmap_first(&p->controllers), - struct ofconn, hmap_node); - ofconn->ss = switch_status_register(p->switch_status, "remote", - rconn_status_cb, ofconn->rconn); - } } void @@ -1062,7 +1060,6 @@ ofproto_destroy(struct ofproto *p) } shash_destroy(&p->port_by_name); - switch_status_destroy(p->switch_status); netflow_destroy(p->netflow); ofproto_sflow_destroy(p->sflow); @@ -1349,7 +1346,7 @@ ofproto_is_alive(const struct ofproto *p) } void -ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, +ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) { const struct ofconn *ofconn; @@ -1358,6 +1355,9 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { const struct rconn *rconn = ofconn->rconn; + time_t now = time_now(); + time_t last_connection = rconn_get_last_connection(rconn); + time_t last_disconnect = rconn_get_last_disconnect(rconn); const int last_error = rconn_get_last_error(rconn); struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); @@ -1368,22 +1368,27 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, cinfo->pairs.n = 0; - if (last_error == EOF) { - cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; - cinfo->pairs.values[cinfo->pairs.n++] = xstrdup("End of file"); - } else if (last_error > 0) { + if (last_error) { cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(strerror(last_error)); + xstrdup(ovs_retval_to_string(last_error)); } cinfo->pairs.keys[cinfo->pairs.n] = "state"; cinfo->pairs.values[cinfo->pairs.n++] = xstrdup(rconn_get_state(rconn)); - cinfo->pairs.keys[cinfo->pairs.n] = "time_in_state"; - cinfo->pairs.values[cinfo->pairs.n++] = - xasprintf("%u", rconn_get_state_elapsed(rconn)); + if (last_connection != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_connection)); + } + + if (last_disconnect != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_disconnect)); + } } } @@ -1453,6 +1458,8 @@ ofproto_send_packet(struct ofproto *p, const struct flow *flow, struct ofpbuf *odp_actions; action_xlate_ctx_init(&ctx, p, flow, packet); + /* Always xlate packets originated in this function. */ + ctx.check_special = false; odp_actions = xlate_actions(&ctx, actions, n_actions); /* XXX Should we translate the dpif_execute() errno value into an OpenFlow @@ -1509,6 +1516,8 @@ ofproto_flush_flows(struct ofproto *ofproto) * individually since we are about to blow away all the facets with * dpif_flow_flush(). */ facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; facet_remove(ofproto, facet); } @@ -1574,7 +1583,7 @@ make_ofport(const struct dpif_port *dpif_port) return NULL; } - ofport = xmalloc(sizeof *ofport); + ofport = xzalloc(sizeof *ofport); ofport->netdev = netdev; ofport->odp_port = dpif_port->port_no; ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no); @@ -1815,7 +1824,6 @@ ofconn_destroy(struct ofconn *ofconn) discovery_destroy(ofconn->discovery); list_remove(&ofconn->node); - switch_status_unregister(ofconn->ss); rconn_destroy(ofconn->rconn); rconn_packet_counter_destroy(ofconn->packet_in_counter); rconn_packet_counter_destroy(ofconn->reply_counter); @@ -1840,6 +1848,7 @@ ofconn_run(struct ofconn *ofconn) char *ofconn_name = ofconn_make_name(p, controller_name); rconn_connect(ofconn->rconn, controller_name, ofconn_name); free(ofconn_name); + free(controller_name); } else { rconn_disconnect(ofconn->rconn); } @@ -1929,8 +1938,7 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) if (rate > 0) { if (!*s) { - *s = pinsched_create(rate, burst, - ofconn->ofproto->switch_status); + *s = pinsched_create(rate, burst); } else { pinsched_set_limits(*s, rate, burst); } @@ -2093,13 +2101,13 @@ execute_odp_actions(struct ofproto *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODPAT_CONTROLLER) { + && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ struct dpif_upcall upcall; - upcall.type = _ODPL_ACTION_NR; + upcall.type = DPIF_UC_ACTION; upcall.packet = packet; upcall.key = NULL; upcall.key_len = 0; @@ -2141,12 +2149,10 @@ facet_execute(struct ofproto *ofproto, struct facet *facet, assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in)); flow_extract_stats(&facet->flow, packet, &stats); + stats.used = time_msec(); if (execute_odp_actions(ofproto, &facet->flow, facet->actions, facet->actions_len, packet)) { facet_update_stats(ofproto, facet, &stats); - facet->used = time_msec(); - netflow_flow_update_time(ofproto->netflow, - &facet->nf_flow, facet->used); } } @@ -2198,6 +2204,7 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port, rule->used = time_msec(); rule->packet_count++; rule->byte_count += size; + flow_push_stats(ofproto, rule, &flow, 1, size, rule->used); } ofpbuf_delete(odp_actions); } @@ -2318,6 +2325,8 @@ facet_put__(struct ofproto *ofproto, struct facet *facet, flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; if (stats) { flags |= DPIF_FP_ZERO_STATS; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; } ofpbuf_use_stack(&key, keybuf, sizeof keybuf); @@ -2378,6 +2387,11 @@ facet_uninstall(struct ofproto *p, struct facet *facet) facet_update_stats(p, facet, &stats); } facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } else { + assert(facet->dp_packet_count == 0); + assert(facet->dp_byte_count == 0); } } @@ -2394,10 +2408,16 @@ facet_is_controller_flow(struct facet *facet) } /* Folds all of 'facet''s statistics into its rule. Also updates the - * accounting ofhook and emits a NetFlow expiration if appropriate. */ + * accounting ofhook and emits a NetFlow expiration if appropriate. All of + * 'facet''s statistics in the datapath should have been zeroed and folded into + * its packet and byte counts before this function is called. */ static void facet_flush_stats(struct ofproto *ofproto, struct facet *facet) { + assert(!facet->dp_byte_count); + assert(!facet->dp_packet_count); + + facet_push_stats(ofproto, facet); facet_account(ofproto, facet, 0); if (ofproto->netflow && !facet_is_controller_flow(facet)) { @@ -2416,6 +2436,8 @@ facet_flush_stats(struct ofproto *ofproto, struct facet *facet) * reinstalled. */ facet->packet_count = 0; facet->byte_count = 0; + facet->rs_packet_count = 0; + facet->rs_byte_count = 0; facet->accounted_bytes = 0; netflow_flow_clear(&facet->nf_flow); @@ -2535,6 +2557,7 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) list_push_back(&new_rule->facets, &facet->list_node); facet->rule = new_rule; facet->used = new_rule->created; + facet->rs_used = facet->used; } ofpbuf_delete(odp_actions); @@ -2662,10 +2685,6 @@ handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) return 0; } -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a - * flow translation. */ -#define MAX_RESUBMIT_RECURSION 16 - static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); @@ -2687,7 +2706,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) */ } - nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, port); ctx->nf_output_iface = port; } @@ -2738,7 +2757,7 @@ flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { uint16_t odp_port = ofport->odp_port; if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); } } *nf_output_iface = NF_OUT_FLOOD; @@ -2778,7 +2797,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, &ctx->nf_output_iface, ctx->odp_actions); break; case OFPP_CONTROLLER: - nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); break; case OFPP_LOCAL: add_output_action(ctx, ODPP_LOCAL); @@ -2825,7 +2844,7 @@ static void add_pop_action(struct action_xlate_ctx *ctx) { if (ctx->odp_actions->size != ctx->last_pop_priority) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); ctx->last_pop_priority = ctx->odp_actions->size; } } @@ -2856,7 +2875,7 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, /* Add ODP actions. */ remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); add_output_action(ctx, odp_port); add_pop_action(ctx); @@ -2884,7 +2903,7 @@ xlate_set_queue_action(struct action_xlate_ctx *ctx, } remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); } static void @@ -2892,9 +2911,9 @@ xlate_set_dl_tci(struct action_xlate_ctx *ctx) { ovs_be16 tci = ctx->flow.vlan_tci; if (!(tci & htons(VLAN_CFI))) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); } else { - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, tci & ~htons(VLAN_CFI)); } } @@ -2920,7 +2939,8 @@ update_reg_state(struct action_xlate_ctx *ctx, xlate_set_dl_tci(ctx); } if (ctx->flow.tun_id != state->tun_id) { - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, ctx->flow.tun_id); + nl_msg_put_be64(ctx->odp_actions, + ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id); } } @@ -2946,13 +2966,14 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL: nast = (const struct nx_action_set_tunnel *) nah; tun_id = htonll(ntohl(nast->tun_id)); - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; case NXAST_DROP_SPOOFED_ARP: if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP); + nl_msg_put_flag(ctx->odp_actions, + ODP_ACTION_ATTR_DROP_SPOOFED_ARP); } break; @@ -2985,7 +3006,7 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL64: tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id; - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; @@ -3049,44 +3070,44 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_DL_SRC: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, ia->nw_addr.nw_addr); ctx->flow.nw_src = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_DST: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST, ia->nw_addr.nw_addr); ctx->flow.nw_dst = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_TOS: - nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS, + nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, ia->nw_tos.nw_tos); ctx->flow.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, ia->tp_port.tp_port); ctx->flow.tp_src = ia->tp_port.tp_port; break; case OFPAT_SET_TP_DST: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST, ia->tp_port.tp_port); ctx->flow.tp_dst = ia->tp_port.tp_port; break; @@ -3115,6 +3136,7 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->flow = *flow; ctx->packet = packet; ctx->resubmit_hook = NULL; + ctx->check_special = true; } static struct ofpbuf * @@ -3129,7 +3151,16 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->nf_output_iface = NF_OUT_DROP; ctx->recurse = 0; ctx->last_pop_priority = -1; - do_xlate_actions(in, n_in, ctx); + + if (!ctx->check_special + || !ctx->ofproto->ofhooks->special_cb + || ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet, + ctx->ofproto->aux)) { + do_xlate_actions(in, n_in, ctx); + } else { + ctx->may_set_up_flow = false; + } + remove_pop_action(ctx); /* Check with in-band control to see if we're allowed to set up this @@ -3389,8 +3420,8 @@ handle_table_stats_request(struct ofconn *ofconn, ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL)); ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */ ots->active_count = htonl(classifier_count(&p->cls)); - ots->lookup_count = htonll(0); /* XXX */ - ots->matched_count = htonll(0); /* XXX */ + put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */ + put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */ queue_tx(msg, ofconn, ofconn->reply_counter); return 0; @@ -3411,18 +3442,18 @@ append_port_stat(struct ofport *port, struct ofconn *ofconn, ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp); ops->port_no = htons(port->opp.port_no); memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + put_32aligned_be64(&ops->rx_packets, htonll(stats.rx_packets)); + put_32aligned_be64(&ops->tx_packets, htonll(stats.tx_packets)); + put_32aligned_be64(&ops->rx_bytes, htonll(stats.rx_bytes)); + put_32aligned_be64(&ops->tx_bytes, htonll(stats.tx_bytes)); + put_32aligned_be64(&ops->rx_dropped, htonll(stats.rx_dropped)); + put_32aligned_be64(&ops->tx_dropped, htonll(stats.tx_dropped)); + put_32aligned_be64(&ops->rx_errors, htonll(stats.rx_errors)); + put_32aligned_be64(&ops->tx_errors, htonll(stats.tx_errors)); + put_32aligned_be64(&ops->rx_frame_err, htonll(stats.rx_frame_errors)); + put_32aligned_be64(&ops->rx_over_err, htonll(stats.rx_over_errors)); + put_32aligned_be64(&ops->rx_crc_err, htonll(stats.rx_crc_errors)); + put_32aligned_be64(&ops->collisions, htonll(stats.collisions)); } static int @@ -3450,46 +3481,6 @@ handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) return 0; } -/* Obtains statistic counters for 'rule' within 'p' and stores them into - * '*packet_countp' and '*byte_countp'. The returned statistics include - * statistics for all of 'rule''s facets. */ -static void -query_stats(struct ofproto *p, struct rule *rule, - uint64_t *packet_countp, uint64_t *byte_countp) -{ - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - uint64_t packet_count, byte_count; - struct facet *facet; - struct ofpbuf key; - - /* Start from historical data for 'rule' itself that are no longer tracked - * by the datapath. This counts, for example, facets that have expired. */ - packet_count = rule->packet_count; - byte_count = rule->byte_count; - - /* Ask the datapath for statistics on all of the rule's facets. - * - * Also, add any statistics that are not tracked by the datapath for each - * facet. This includes, for example, statistics for packets that were - * executed "by hand" by ofproto via dpif_execute() but must be accounted - * to a rule. */ - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - LIST_FOR_EACH (facet, list_node, &rule->facets) { - struct dpif_flow_stats stats; - - ofpbuf_clear(&key); - odp_flow_key_from_flow(&key, &facet->flow); - dpif_flow_get(p->dpif, key.data, key.size, NULL, &stats); - - packet_count += stats.n_packets + facet->packet_count; - byte_count += stats.n_bytes + facet->byte_count; - } - - /* Return the stats to the caller. */ - *packet_countp = packet_count; - *byte_countp = byte_count; -} - static void calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec) { @@ -3504,6 +3495,7 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, { struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; + ovs_be64 cookie; size_t act_len, len; if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) { @@ -3513,21 +3505,22 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, act_len = sizeof *rule->actions * rule->n_actions; len = offsetof(struct ofp_flow_stats, actions) + act_len; - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ofs = append_ofp_stats_reply(len, ofconn, replyp); ofs->length = htons(len); ofs->table_id = 0; ofs->pad = 0; ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match, - rule->flow_cookie, &ofs->cookie); + rule->flow_cookie, &cookie); + put_32aligned_be64(&ofs->cookie, cookie); calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec); ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); - ofs->packet_count = htonll(packet_count); - ofs->byte_count = htonll(byte_count); + put_32aligned_be64(&ofs->packet_count, htonll(packet_count)); + put_32aligned_be64(&ofs->byte_count, htonll(byte_count)); if (rule->n_actions > 0) { memcpy(ofs->actions, rule->actions, act_len); } @@ -3536,7 +3529,16 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, static bool is_valid_table(uint8_t table_id) { - return table_id == 0 || table_id == 0xff; + if (table_id == 0 || table_id == 0xff) { + return true; + } else { + /* It would probably be better to reply with an error but there doesn't + * seem to be any appropriate value, so that might just be + * confusing. */ + VLOG_WARN_RL(&rl, "controller asked for invalid table %"PRIu8, + table_id); + return false; + } } static int @@ -3577,7 +3579,7 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, return; } - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); act_len = sizeof *rule->actions * rule->n_actions; @@ -3641,19 +3643,21 @@ handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) } static void -flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) +flow_stats_ds(struct rule *rule, struct ds *results) { uint64_t packet_count, byte_count; size_t act_len = sizeof *rule->actions * rule->n_actions; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ds_put_format(results, "duration=%llds, ", (time_msec() - rule->created) / 1000); + ds_put_format(results, "idle=%.3fs, ", (time_msec() - rule->used) / 1000.0); ds_put_format(results, "priority=%u, ", rule->cr.priority); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, results); + ds_put_char(results, ','); if (act_len > 0) { ofp_print_actions(results, &rule->actions->header, act_len); } else { @@ -3663,7 +3667,7 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) } /* Adds a pretty-printed description of all flows to 'results', including - * those marked hidden by secchan (e.g., by in-band control). */ + * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { @@ -3672,7 +3676,7 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) cls_cursor_init(&cursor, &p->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { - flow_stats_ds(p, rule, results); + flow_stats_ds(rule, results); } } @@ -3697,7 +3701,7 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, uint64_t packet_count; uint64_t byte_count; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); total_packets += packet_count; total_bytes += byte_count; @@ -3707,8 +3711,8 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, } oasr->flow_count = htonl(n_flows); - oasr->packet_count = htonll(total_packets); - oasr->byte_count = htonll(total_bytes); + put_32aligned_be64(&oasr->packet_count, htonll(total_packets)); + put_32aligned_be64(&oasr->byte_count, htonll(total_bytes)); memset(oasr->pad, 0, sizeof oasr->pad); } @@ -3781,9 +3785,9 @@ put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, reply->port_no = htons(cbdata->ofport->opp.port_no); memset(reply->pad, 0, sizeof reply->pad); reply->queue_id = htonl(queue_id); - reply->tx_bytes = htonll(stats->tx_bytes); - reply->tx_packets = htonll(stats->tx_packets); - reply->tx_errors = htonll(stats->tx_errors); + put_32aligned_be64(&reply->tx_bytes, htonll(stats->tx_bytes)); + put_32aligned_be64(&reply->tx_packets, htonll(stats->tx_packets)); + put_32aligned_be64(&reply->tx_errors, htonll(stats->tx_errors)); } static void @@ -3853,11 +3857,12 @@ handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) return 0; } +/* Updates 'facet''s used time. Caller is responsible for calling + * facet_push_stats() to update the flows which 'facet' resubmits into. */ static void facet_update_time(struct ofproto *ofproto, struct facet *facet, - const struct dpif_flow_stats *stats) + long long int used) { - long long int used = stats->used; if (used > facet->used) { facet->used = used; if (used > facet->rule->used) { @@ -3877,14 +3882,74 @@ static void facet_update_stats(struct ofproto *ofproto, struct facet *facet, const struct dpif_flow_stats *stats) { - if (stats->n_packets) { - facet_update_time(ofproto, facet, stats); + if (stats->n_packets || stats->used > facet->used) { + facet_update_time(ofproto, facet, stats->used); facet->packet_count += stats->n_packets; facet->byte_count += stats->n_bytes; + facet_push_stats(ofproto, facet); netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags); } } +static void +facet_push_stats(struct ofproto *ofproto, struct facet *facet) +{ + uint64_t rs_packets, rs_bytes; + + assert(facet->packet_count >= facet->rs_packet_count); + assert(facet->byte_count >= facet->rs_byte_count); + assert(facet->used >= facet->rs_used); + + rs_packets = facet->packet_count - facet->rs_packet_count; + rs_bytes = facet->byte_count - facet->rs_byte_count; + + if (rs_packets || rs_bytes || facet->used > facet->rs_used) { + facet->rs_packet_count = facet->packet_count; + facet->rs_byte_count = facet->byte_count; + facet->rs_used = facet->used; + + flow_push_stats(ofproto, facet->rule, &facet->flow, + rs_packets, rs_bytes, facet->used); + } +} + +struct ofproto_push { + struct action_xlate_ctx ctx; + uint64_t packets; + uint64_t bytes; + long long int used; +}; + +static void +push_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) +{ + struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx); + + if (rule) { + rule->packet_count += push->packets; + rule->byte_count += push->bytes; + rule->used = MAX(push->used, rule->used); + } +} + +/* Pushes flow statistics to the rules which 'flow' resubmits into given + * 'rule''s actions. */ +static void +flow_push_stats(struct ofproto *ofproto, const struct rule *rule, + struct flow *flow, uint64_t packets, uint64_t bytes, + long long int used) +{ + struct ofproto_push push; + + push.packets = packets; + push.bytes = bytes; + push.used = used; + + action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL); + push.ctx.resubmit_hook = push_resubmit; + ofpbuf_delete(xlate_actions(&push.ctx, rule->actions, rule->n_actions)); +} + /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * @@ -4273,10 +4338,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) return 0; /* Nicira extension requests. */ - case OFPUTIL_NXT_STATUS_REQUEST: - return switch_status_handle_request( - ofconn->ofproto->switch_status, ofconn->rconn, oh); - case OFPUTIL_NXT_TUN_ID_FROM_COOKIE: return handle_tun_id_from_cookie(ofconn, oh); @@ -4332,7 +4393,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPUTIL_OFPST_PORT_REPLY: case OFPUTIL_OFPST_TABLE_REPLY: case OFPUTIL_OFPST_AGGREGATE_REPLY: - case OFPUTIL_NXT_STATUS_REPLY: case OFPUTIL_NXT_ROLE_REPLY: case OFPUTIL_NXT_FLOW_REMOVED: case OFPUTIL_NXST_FLOW_REPLY: @@ -4373,13 +4433,19 @@ handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Set header pointers in 'flow'. */ flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow); + if (p->ofhooks->special_cb + && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) { + ofpbuf_delete(upcall->packet); + return; + } + /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) { struct ofpbuf odp_actions; ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL); + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, ODPP_LOCAL); dpif_execute(p->dpif, odp_actions.data, odp_actions.size, upcall->packet); ofpbuf_uninit(&odp_actions); @@ -4439,13 +4505,13 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) struct flow flow; switch (upcall->type) { - case _ODPL_ACTION_NR: + case DPIF_UC_ACTION: COVERAGE_INC(ofproto_ctlr_action); odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); send_packet_in(p, upcall, &flow, false); break; - case _ODPL_SFLOW_NR: + case DPIF_UC_SAMPLE: if (p->sflow) { odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); ofproto_sflow_received(p->sflow, upcall, &flow); @@ -4453,10 +4519,11 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) ofpbuf_delete(upcall->packet); break; - case _ODPL_MISS_NR: + case DPIF_UC_MISS: handle_miss_upcall(p, upcall); break; + case DPIF_N_UC_TYPES: default: VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); break; @@ -4466,7 +4533,7 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Flow expiration. */ static int ofproto_dp_max_idle(const struct ofproto *); -static void ofproto_update_used(struct ofproto *); +static void ofproto_update_stats(struct ofproto *); static void rule_expire(struct ofproto *, struct rule *); static void ofproto_expire_facets(struct ofproto *, int dp_max_idle); @@ -4483,8 +4550,8 @@ ofproto_expire(struct ofproto *ofproto) struct cls_cursor cursor; int dp_max_idle; - /* Update 'used' for each flow in the datapath. */ - ofproto_update_used(ofproto); + /* Update stats for each flow in the datapath. */ + ofproto_update_stats(ofproto); /* Expire facets that have been idle too long. */ dp_max_idle = ofproto_dp_max_idle(ofproto); @@ -4507,9 +4574,19 @@ ofproto_expire(struct ofproto *ofproto) return MIN(dp_max_idle, 1000); } -/* Update 'used' member of installed facets. */ +/* Update 'packet_count', 'byte_count', and 'used' members of installed facets. + * + * This function also pushes statistics updates to rules which each facet + * resubmits into. Generally these statistics will be accurate. However, if a + * facet changes the rule it resubmits into at some time in between + * ofproto_update_stats() runs, it is possible that statistics accrued to the + * old rule will be incorrectly attributed to the new rule. This could be + * avoided by calling ofproto_update_stats() whenever rules are created or + * deleted. However, the performance impact of making so many calls to the + * datapath do not justify the benefit of having perfectly accurate statistics. + */ static void -ofproto_update_used(struct ofproto *p) +ofproto_update_stats(struct ofproto *p) { const struct dpif_flow_stats *stats; struct dpif_flow_dump dump; @@ -4535,8 +4612,25 @@ ofproto_update_used(struct ofproto *p) facet = facet_find(p, &flow); if (facet && facet->installed) { - facet_update_time(p, facet, stats); + + if (stats->n_packets >= facet->dp_packet_count) { + facet->packet_count += stats->n_packets - facet->dp_packet_count; + } else { + VLOG_WARN_RL(&rl, "unexpected packet count from the datapath"); + } + + if (stats->n_bytes >= facet->dp_byte_count) { + facet->byte_count += stats->n_bytes - facet->dp_byte_count; + } else { + VLOG_WARN_RL(&rl, "unexpected byte count from datapath"); + } + + facet->dp_packet_count = stats->n_packets; + facet->dp_byte_count = stats->n_bytes; + + facet_update_time(p, facet, stats->used); facet_account(p, facet, stats->n_bytes); + facet_push_stats(p, facet); } else { /* There's a flow in the datapath that we know nothing about. * Delete it. */ @@ -4579,7 +4673,7 @@ ofproto_dp_max_idle(const struct ofproto *ofproto) * they receive additional data). * * This requires a second pass through the facets, in addition to the pass - * made by ofproto_update_used(), because the former function never looks + * made by ofproto_update_stats(), because the former function never looks * at uninstallable facets. */ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; @@ -4783,6 +4877,31 @@ rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) } } +/* Obtains statistics for 'rule' and stores them in '*packets' and '*bytes'. + * The returned statistics include statistics for all of 'rule''s facets. */ +static void +rule_get_stats(const struct rule *rule, uint64_t *packets, uint64_t *bytes) +{ + uint64_t p, b; + struct facet *facet; + + /* Start from historical data for 'rule' itself that are no longer tracked + * in facets. This counts, for example, facets that have expired. */ + p = rule->packet_count; + b = rule->byte_count; + + /* Add any statistics that are tracked by facets. This includes + * statistical data recently updated by ofproto_update_stats() as well as + * stats for packets that were executed "by hand" via dpif_execute(). */ + LIST_FOR_EACH (facet, list_node, &rule->facets) { + p += facet->packet_count; + b += facet->byte_count; + } + + *packets = p; + *bytes = b; +} + /* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */ static void do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_) @@ -4809,9 +4928,10 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, int total_len, send_len; struct ofpbuf *packet; uint32_t buffer_id; + int idx; /* Get OpenFlow buffer_id. */ - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { buffer_id = pktbuf_get_null(); @@ -4826,7 +4946,7 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, if (buffer_id != UINT32_MAX) { send_len = MIN(send_len, ofconn->miss_send_len); } - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { send_len = MIN(send_len, upcall->userdata); } @@ -4845,22 +4965,24 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, opi->header.type = OFPT_PACKET_IN; opi->total_len = htons(total_len); opi->in_port = htons(odp_port_to_ofp_port(flow->in_port)); - opi->reason = upcall->type == _ODPL_MISS_NR ? OFPR_NO_MATCH : OFPR_ACTION; + opi->reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; opi->buffer_id = htonl(buffer_id); update_openflow_length(packet); /* Hand over to packet scheduler. It might immediately call into * do_send_packet_in() or it might buffer it for a while (until a later * call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[opi->reason], flow->in_port, + idx = upcall->type == DPIF_UC_MISS ? 0 : 1; + pinsched_send(ofconn->schedulers[idx], flow->in_port, packet, do_send_packet_in, ofconn); } -/* Given 'upcall', of type _ODPL_ACTION_NR or _ODPL_MISS_NR, sends an +/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to * their individual configurations. * - * Takes ownership of 'packet'. */ + * If 'clone' is true, the caller retains ownership of 'upcall->packet'. + * Otherwise, ownership is transferred to this function. */ static void send_packet_in(struct ofproto *ofproto, struct dpif_upcall *upcall, const struct flow *flow, bool clone) @@ -4969,7 +5091,7 @@ trace_format_flow(struct ds *result, int level, const char *title, } static void -trace_resubmit(struct action_xlate_ctx *ctx, const struct rule *rule) +trace_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) { struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx); struct ds *result = trace->result; @@ -5114,7 +5236,7 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, nf_output_iface, odp_actions); } else if (out_port != flow->in_port) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port); *nf_output_iface = out_port; } else { /* Drop. */ @@ -5126,5 +5248,6 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, static const struct ofhooks default_ofhooks = { default_normal_ofhook_cb, NULL, + NULL, NULL };