X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=ffeb952484eb98e6840c4e4c73b54f18c003a77e;hb=9b45d7f5d;hp=d6ed160132da3ed68e9e2738d0401cec7fd4e2fc;hpb=856081f683d3e7d5b5fa07af4233d285eb205c47;p=sliver-openvswitch.git diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index d6ed16013..ffeb95248 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -54,11 +54,11 @@ #include "poll-loop.h" #include "rconn.h" #include "shash.h" -#include "status.h" #include "stream-ssl.h" #include "svec.h" #include "tag.h" #include "timeval.h" +#include "unaligned.h" #include "unixctl.h" #include "vconn.h" #include "vlog.h" @@ -90,7 +90,9 @@ COVERAGE_DEFINE(ofproto_unexpected_rule); COVERAGE_DEFINE(ofproto_uninstallable); COVERAGE_DEFINE(ofproto_update_port); -#include "sflow_api.h" +/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a + * flow translation. */ +#define MAX_RESUBMIT_RECURSION 16 struct rule; @@ -122,7 +124,12 @@ struct action_xlate_ctx { * * This is normally null so the client has to set it manually after * calling action_xlate_ctx_init(). */ - void (*resubmit_hook)(struct action_xlate_ctx *, const struct rule *); + void (*resubmit_hook)(struct action_xlate_ctx *, struct rule *); + + /* If true, the speciality of 'flow' should be checked before executing + * its actions. If special_cb returns false on 'flow' rendered + * uninstallable and no actions will be executed. */ + bool check_special; /* xlate_actions() initializes and uses these members. The client might want * to look at them after it returns. */ @@ -138,7 +145,7 @@ struct action_xlate_ctx { int recurse; /* Recursion level, via xlate_table_action. */ int last_pop_priority; /* Offset in 'odp_actions' just past most - * recently added ODPAT_SET_PRIORITY. */ + * recent ODP_ACTION_ATTR_SET_PRIORITY. */ }; static void action_xlate_ctx_init(struct action_xlate_ctx *, @@ -194,6 +201,8 @@ static void rule_insert(struct ofproto *, struct rule *); static void rule_remove(struct ofproto *, struct rule *); static void rule_send_removed(struct ofproto *, struct rule *, uint8_t reason); +static void rule_get_stats(const struct rule *, uint64_t *packets, + uint64_t *bytes); /* An exact-match instantiation of an OpenFlow flow. */ struct facet { @@ -206,7 +215,8 @@ struct facet { * * - Do include packets and bytes that were obtained from the datapath * when a flow was deleted (e.g. dpif_flow_del()) or when its - * statistics were reset (e.g. dpif_flow_put() with ODPPF_ZERO_STATS). + * statistics were reset (e.g. dpif_flow_put() with + * DPIF_FP_ZERO_STATS). * * - Do not include any packets or bytes that can currently be obtained * from the datapath by, e.g., dpif_flow_get(). @@ -214,6 +224,13 @@ struct facet { uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ + uint64_t dp_packet_count; /* Last known packet count in the datapath. */ + uint64_t dp_byte_count; /* Last known byte count in the datapath. */ + + uint64_t rs_packet_count; /* Packets pushed to resubmit children. */ + uint64_t rs_byte_count; /* Bytes pushed to resubmit children. */ + long long int rs_used; /* Used time pushed to resubmit children. */ + /* Number of bytes passed to account_cb. This may include bytes that can * currently obtained from the datapath (thus, it can be greater than * byte_count). */ @@ -248,7 +265,8 @@ static void facet_flush_stats(struct ofproto *, struct facet *); static void facet_make_actions(struct ofproto *, struct facet *, const struct ofpbuf *packet); static void facet_update_stats(struct ofproto *, struct facet *, - const struct odp_flow_stats *); + const struct dpif_flow_stats *); +static void facet_push_stats(struct ofproto *, struct facet *); /* ofproto supports two kinds of OpenFlow connections: * @@ -300,7 +318,8 @@ struct ofconn { /* OFPT_PACKET_IN related data. */ struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ - struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ +#define N_SCHEDULERS 2 + struct pinsched *schedulers[N_SCHEDULERS]; struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ int miss_send_len; /* Bytes to send of buffered packets. */ @@ -314,19 +333,9 @@ struct ofconn { enum nx_role role; /* Role. */ struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */ struct discovery *discovery; /* Controller discovery object, if enabled. */ - struct status_category *ss; /* Switch status category. */ enum ofproto_band band; /* In-band or out-of-band? */ }; -/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's - * "schedulers" array. Their values are 0 and 1, and their meanings and values - * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In - * case anything ever changes, check their values here. */ -#define N_SCHEDULERS 2 -BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); -BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); -BUILD_ASSERT_DECL(OFPR_ACTION == 1); -BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, enum ofconn_type); @@ -362,7 +371,6 @@ struct ofproto { uint32_t max_ports; /* Configuration. */ - struct switch_status *switch_status; struct fail_open *fail_open; struct netflow *netflow; struct ofproto_sflow *sflow; @@ -412,6 +420,9 @@ static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); static int ofproto_expire(struct ofproto *); +static void flow_push_stats(struct ofproto *, const struct rule *, + struct flow *, uint64_t packets, uint64_t bytes, + long long int used); static void handle_upcall(struct ofproto *, struct dpif_upcall *); @@ -429,7 +440,6 @@ ofproto_create(const char *datapath, const char *datapath_type, const struct ofhooks *ofhooks, void *aux, struct ofproto **ofprotop) { - struct odp_stats stats; struct ofproto *p; struct dpif *dpif; int error; @@ -444,14 +454,10 @@ ofproto_create(const char *datapath, const char *datapath_type, VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; } - error = dpif_get_dp_stats(dpif, &stats); - if (error) { - VLOG_ERR("failed to obtain stats for datapath %s: %s", - datapath, strerror(error)); - dpif_close(dpif); - return error; - } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); + error = dpif_recv_set_mask(dpif, + ((1u << DPIF_UC_MISS) | + (1u << DPIF_UC_ACTION) | + (1u << DPIF_UC_SAMPLE))); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -476,10 +482,9 @@ ofproto_create(const char *datapath, const char *datapath_type, p->netdev_monitor = netdev_monitor_create(); hmap_init(&p->ports); shash_init(&p->port_by_name); - p->max_ports = stats.max_ports; + p->max_ports = dpif_get_max_ports(dpif); /* Initialize submodules. */ - p->switch_status = switch_status_create(p); p->fail_open = NULL; p->netflow = NULL; p->sflow = NULL; @@ -562,8 +567,7 @@ add_controller(struct ofproto *ofproto, const struct ofproto_controller *c) if (is_discovery_controller(c)) { int error = discovery_create(c->accept_re, c->update_resolv_conf, - ofproto->dpif, ofproto->switch_status, - &discovery); + ofproto->dpif, &discovery); if (error) { return; } @@ -674,8 +678,7 @@ update_in_band_remotes(struct ofproto *ofproto) * even before we know any remote addresses. */ if (n_addrs || discovery) { if (!ofproto->in_band) { - in_band_create(ofproto, ofproto->dpif, ofproto->switch_status, - &ofproto->in_band); + in_band_create(ofproto, ofproto->dpif, &ofproto->in_band); } if (ofproto->in_band) { in_band_set_remotes(ofproto->in_band, addrs, n_addrs); @@ -702,7 +705,7 @@ update_fail_open(struct ofproto *p) size_t n; if (!p->fail_open) { - p->fail_open = fail_open_create(p, p->switch_status); + p->fail_open = fail_open_create(p); } n = 0; @@ -727,7 +730,6 @@ ofproto_set_controllers(struct ofproto *p, struct shash new_controllers; struct ofconn *ofconn, *next_ofconn; struct ofservice *ofservice, *next_ofservice; - bool ss_exists; size_t i; /* Create newly configured controllers and services. @@ -755,7 +757,6 @@ ofproto_set_controllers(struct ofproto *p, /* Delete controllers that are no longer configured. * Update configuration of all now-existing controllers. */ - ss_exists = false; HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, hmap_node, &p->controllers) { struct ofproto_controller *c; @@ -764,9 +765,6 @@ ofproto_set_controllers(struct ofproto *p, ofconn_destroy(ofconn); } else { update_controller(ofconn, c); - if (ofconn->ss) { - ss_exists = true; - } } } @@ -788,13 +786,6 @@ ofproto_set_controllers(struct ofproto *p, update_in_band_remotes(p); update_fail_open(p); - - if (!hmap_is_empty(&p->controllers) && !ss_exists) { - ofconn = CONTAINER_OF(hmap_first(&p->controllers), - struct ofconn, hmap_node); - ofconn->ss = switch_status_register(p->switch_status, "remote", - rconn_status_cb, ofconn->rconn); - } } void @@ -1069,7 +1060,6 @@ ofproto_destroy(struct ofproto *p) } shash_destroy(&p->port_by_name); - switch_status_destroy(p->switch_status); netflow_destroy(p->netflow); ofproto_sflow_destroy(p->sflow); @@ -1356,7 +1346,7 @@ ofproto_is_alive(const struct ofproto *p) } void -ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, +ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) { const struct ofconn *ofconn; @@ -1365,6 +1355,9 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, HMAP_FOR_EACH (ofconn, hmap_node, &ofproto->controllers) { const struct rconn *rconn = ofconn->rconn; + time_t now = time_now(); + time_t last_connection = rconn_get_last_connection(rconn); + time_t last_disconnect = rconn_get_last_disconnect(rconn); const int last_error = rconn_get_last_error(rconn); struct ofproto_controller_info *cinfo = xmalloc(sizeof *cinfo); @@ -1375,22 +1368,27 @@ ofproto_get_ofproto_controller_info(const struct ofproto * ofproto, cinfo->pairs.n = 0; - if (last_error == EOF) { - cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; - cinfo->pairs.values[cinfo->pairs.n++] = xstrdup("End of file"); - } else if (last_error > 0) { + if (last_error) { cinfo->pairs.keys[cinfo->pairs.n] = "last_error"; cinfo->pairs.values[cinfo->pairs.n++] = - xstrdup(strerror(last_error)); + xstrdup(ovs_retval_to_string(last_error)); } cinfo->pairs.keys[cinfo->pairs.n] = "state"; cinfo->pairs.values[cinfo->pairs.n++] = xstrdup(rconn_get_state(rconn)); - cinfo->pairs.keys[cinfo->pairs.n] = "time_in_state"; - cinfo->pairs.values[cinfo->pairs.n++] = - xasprintf("%u", rconn_get_state_elapsed(rconn)); + if (last_connection != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_connect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_connection)); + } + + if (last_disconnect != TIME_MIN) { + cinfo->pairs.keys[cinfo->pairs.n] = "sec_since_disconnect"; + cinfo->pairs.values[cinfo->pairs.n++] + = xasprintf("%ld", (long int) (now - last_disconnect)); + } } } @@ -1460,6 +1458,8 @@ ofproto_send_packet(struct ofproto *p, const struct flow *flow, struct ofpbuf *odp_actions; action_xlate_ctx_init(&ctx, p, flow, packet); + /* Always xlate packets originated in this function. */ + ctx.check_special = false; odp_actions = xlate_actions(&ctx, actions, n_actions); /* XXX Should we translate the dpif_execute() errno value into an OpenFlow @@ -1516,6 +1516,8 @@ ofproto_flush_flows(struct ofproto *ofproto) * individually since we are about to blow away all the facets with * dpif_flow_flush(). */ facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; facet_remove(ofproto, facet); } @@ -1536,12 +1538,11 @@ ofproto_flush_flows(struct ofproto *ofproto) static void reinit_ports(struct ofproto *p) { + struct dpif_port_dump dump; struct shash_node *node; struct shash devnames; struct ofport *ofport; - struct odp_port *odp_ports; - size_t n_odp_ports; - size_t i; + struct dpif_port dpif_port; COVERAGE_INC(ofproto_reinit_ports); @@ -1549,11 +1550,9 @@ reinit_ports(struct ofproto *p) HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { shash_add_once (&devnames, ofport->opp.name, NULL); } - dpif_port_list(p->dpif, &odp_ports, &n_odp_ports); - for (i = 0; i < n_odp_ports; i++) { - shash_add_once (&devnames, odp_ports[i].devname, NULL); + DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) { + shash_add_once (&devnames, dpif_port.name, NULL); } - free(odp_ports); SHASH_FOR_EACH (node, &devnames) { update_port(p, node->name); @@ -1562,7 +1561,7 @@ reinit_ports(struct ofproto *p) } static struct ofport * -make_ofport(const struct odp_port *odp_port) +make_ofport(const struct dpif_port *dpif_port) { struct netdev_options netdev_options; enum netdev_flags flags; @@ -1571,27 +1570,25 @@ make_ofport(const struct odp_port *odp_port) int error; memset(&netdev_options, 0, sizeof netdev_options); - netdev_options.name = odp_port->devname; - netdev_options.type = odp_port->type; + netdev_options.name = dpif_port->name; + netdev_options.type = dpif_port->type; netdev_options.ethertype = NETDEV_ETH_TYPE_NONE; error = netdev_open(&netdev_options, &netdev); if (error) { VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s " "cannot be opened (%s)", - odp_port->devname, odp_port->port, - odp_port->devname, strerror(error)); + dpif_port->name, dpif_port->port_no, + dpif_port->name, strerror(error)); return NULL; } - ofport = xmalloc(sizeof *ofport); + ofport = xzalloc(sizeof *ofport); ofport->netdev = netdev; - ofport->odp_port = odp_port->port; - ofport->opp.port_no = odp_port_to_ofp_port(odp_port->port); + ofport->odp_port = dpif_port->port_no; + ofport->opp.port_no = odp_port_to_ofp_port(dpif_port->port_no); netdev_get_etheraddr(netdev, ofport->opp.hw_addr); - memcpy(ofport->opp.name, odp_port->devname, - MIN(sizeof ofport->opp.name, sizeof odp_port->devname)); - ofport->opp.name[sizeof ofport->opp.name - 1] = '\0'; + ovs_strlcpy(ofport->opp.name, dpif_port->name, sizeof ofport->opp.name); netdev_get_flags(netdev, &flags); ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN; @@ -1605,15 +1602,15 @@ make_ofport(const struct odp_port *odp_port) } static bool -ofport_conflicts(const struct ofproto *p, const struct odp_port *odp_port) +ofport_conflicts(const struct ofproto *p, const struct dpif_port *dpif_port) { - if (get_port(p, odp_port->port)) { + if (get_port(p, dpif_port->port_no)) { VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath", - odp_port->port); + dpif_port->port_no); return true; - } else if (shash_find(&p->port_by_name, odp_port->devname)) { + } else if (shash_find(&p->port_by_name, dpif_port->name)) { VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath", - odp_port->devname); + dpif_port->name); return true; } else { return false; @@ -1714,7 +1711,7 @@ get_port(const struct ofproto *ofproto, uint16_t odp_port) static void update_port(struct ofproto *p, const char *devname) { - struct odp_port odp_port; + struct dpif_port dpif_port; struct ofport *old_ofport; struct ofport *new_ofport; int error; @@ -1722,7 +1719,7 @@ update_port(struct ofproto *p, const char *devname) COVERAGE_INC(ofproto_update_port); /* Query the datapath for port information. */ - error = dpif_port_query_by_name(p->dpif, devname, &odp_port); + error = dpif_port_query_by_name(p->dpif, devname, &dpif_port); /* Find the old ofport. */ old_ofport = shash_find_data(&p->port_by_name, devname); @@ -1738,20 +1735,20 @@ update_port(struct ofproto *p, const char *devname) * reliably but more portably by comparing the old port's MAC * against the new port's MAC. However, this code isn't that smart * and always sends an OFPPR_MODIFY (XXX). */ - old_ofport = get_port(p, odp_port.port); + old_ofport = get_port(p, dpif_port.port_no); } } else if (error != ENOENT && error != ENODEV) { VLOG_WARN_RL(&rl, "dpif_port_query_by_name returned unexpected error " "%s", strerror(error)); - return; + goto exit; } /* Create a new ofport. */ - new_ofport = !error ? make_ofport(&odp_port) : NULL; + new_ofport = !error ? make_ofport(&dpif_port) : NULL; /* Eliminate a few pathological cases. */ if (!old_ofport && !new_ofport) { - return; + goto exit; } else if (old_ofport && new_ofport) { /* Most of the 'config' bits are OpenFlow soft state, but * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the @@ -1762,7 +1759,7 @@ update_port(struct ofproto *p, const char *devname) if (ofport_equal(old_ofport, new_ofport)) { /* False alarm--no change. */ ofport_free(new_ofport); - return; + goto exit; } } @@ -1778,31 +1775,26 @@ update_port(struct ofproto *p, const char *devname) : !new_ofport ? OFPPR_DELETE : OFPPR_MODIFY)); ofport_free(old_ofport); + +exit: + dpif_port_destroy(&dpif_port); } static int init_ports(struct ofproto *p) { - struct odp_port *ports; - size_t n_ports; - size_t i; - int error; + struct dpif_port_dump dump; + struct dpif_port dpif_port; - error = dpif_port_list(p->dpif, &ports, &n_ports); - if (error) { - return error; - } - - for (i = 0; i < n_ports; i++) { - const struct odp_port *odp_port = &ports[i]; - if (!ofport_conflicts(p, odp_port)) { - struct ofport *ofport = make_ofport(odp_port); + DPIF_PORT_FOR_EACH (&dpif_port, &dump, p->dpif) { + if (!ofport_conflicts(p, &dpif_port)) { + struct ofport *ofport = make_ofport(&dpif_port); if (ofport) { ofport_install(p, ofport); } } } - free(ports); + return 0; } @@ -1832,7 +1824,6 @@ ofconn_destroy(struct ofconn *ofconn) discovery_destroy(ofconn->discovery); list_remove(&ofconn->node); - switch_status_unregister(ofconn->ss); rconn_destroy(ofconn->rconn); rconn_packet_counter_destroy(ofconn->packet_in_counter); rconn_packet_counter_destroy(ofconn->reply_counter); @@ -1857,6 +1848,7 @@ ofconn_run(struct ofconn *ofconn) char *ofconn_name = ofconn_make_name(p, controller_name); rconn_connect(ofconn->rconn, controller_name, ofconn_name); free(ofconn_name); + free(controller_name); } else { rconn_disconnect(ofconn->rconn); } @@ -1946,8 +1938,7 @@ ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst) if (rate > 0) { if (!*s) { - *s = pinsched_create(rate, burst, - ofconn->ofproto->switch_status); + *s = pinsched_create(rate, burst); } else { pinsched_set_limits(*s, rate, burst); } @@ -2110,13 +2101,13 @@ execute_odp_actions(struct ofproto *ofproto, const struct flow *flow, struct ofpbuf *packet) { if (actions_len == NLA_ALIGN(NLA_HDRLEN + sizeof(uint64_t)) - && odp_actions->nla_type == ODPAT_CONTROLLER) { + && odp_actions->nla_type == ODP_ACTION_ATTR_CONTROLLER) { /* As an optimization, avoid a round-trip from userspace to kernel to * userspace. This also avoids possibly filling up kernel packet * buffers along the way. */ struct dpif_upcall upcall; - upcall.type = _ODPL_ACTION_NR; + upcall.type = DPIF_UC_ACTION; upcall.packet = packet; upcall.key = NULL; upcall.key_len = 0; @@ -2153,17 +2144,15 @@ static void facet_execute(struct ofproto *ofproto, struct facet *facet, struct ofpbuf *packet) { - struct odp_flow_stats stats; + struct dpif_flow_stats stats; assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in)); flow_extract_stats(&facet->flow, packet, &stats); + stats.used = time_msec(); if (execute_odp_actions(ofproto, &facet->flow, facet->actions, facet->actions_len, packet)) { facet_update_stats(ofproto, facet, &stats); - facet->used = time_msec(); - netflow_flow_update_time(ofproto->netflow, - &facet->nf_flow, facet->used); } } @@ -2215,6 +2204,7 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, uint16_t in_port, rule->used = time_msec(); rule->packet_count++; rule->byte_count += size; + flow_push_stats(ofproto, rule, &flow, 1, size, rule->used); } ofpbuf_delete(odp_actions); } @@ -2324,24 +2314,27 @@ facet_make_actions(struct ofproto *p, struct facet *facet, } static int -facet_put__(struct ofproto *ofproto, struct facet *facet, int flags, - struct odp_flow_put *put) +facet_put__(struct ofproto *ofproto, struct facet *facet, + const struct nlattr *actions, size_t actions_len, + struct dpif_flow_stats *stats) { uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + enum dpif_flow_put_flags flags; struct ofpbuf key; + flags = DPIF_FP_CREATE | DPIF_FP_MODIFY; + if (stats) { + flags |= DPIF_FP_ZERO_STATS; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } + ofpbuf_use_stack(&key, keybuf, sizeof keybuf); odp_flow_key_from_flow(&key, &facet->flow); assert(key.base == keybuf); - memset(&put->flow.stats, 0, sizeof put->flow.stats); - put->flow.key = key.data; - put->flow.key_len = key.size; - put->flow.actions = facet->actions; - put->flow.actions_len = facet->actions_len; - put->flow.flags = 0; - put->flags = flags; - return dpif_flow_put(ofproto->dpif, put); + return dpif_flow_put(ofproto->dpif, flags, key.data, key.size, + actions, actions_len, stats); } /* If 'facet' is installable, inserts or re-inserts it into 'p''s datapath. If @@ -2350,17 +2343,12 @@ facet_put__(struct ofproto *ofproto, struct facet *facet, int flags, static void facet_install(struct ofproto *p, struct facet *facet, bool zero_stats) { - if (facet->may_install) { - struct odp_flow_put put; - int flags; + struct dpif_flow_stats stats; - flags = ODPPF_CREATE | ODPPF_MODIFY; - if (zero_stats) { - flags |= ODPPF_ZERO_STATS; - } - if (!facet_put__(p, facet, flags, &put)) { - facet->installed = true; - } + if (facet->may_install + && !facet_put__(p, facet, facet->actions, facet->actions_len, + zero_stats ? &stats : NULL)) { + facet->installed = true; } } @@ -2388,22 +2376,22 @@ facet_uninstall(struct ofproto *p, struct facet *facet) { if (facet->installed) { uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - struct odp_flow odp_flow; + struct dpif_flow_stats stats; struct ofpbuf key; ofpbuf_use_stack(&key, keybuf, sizeof keybuf); odp_flow_key_from_flow(&key, &facet->flow); assert(key.base == keybuf); - odp_flow.key = key.data; - odp_flow.key_len = key.size; - odp_flow.actions = NULL; - odp_flow.actions_len = 0; - odp_flow.flags = 0; - if (!dpif_flow_del(p->dpif, &odp_flow)) { - facet_update_stats(p, facet, &odp_flow.stats); + if (!dpif_flow_del(p->dpif, key.data, key.size, &stats)) { + facet_update_stats(p, facet, &stats); } facet->installed = false; + facet->dp_packet_count = 0; + facet->dp_byte_count = 0; + } else { + assert(facet->dp_packet_count == 0); + assert(facet->dp_byte_count == 0); } } @@ -2420,10 +2408,16 @@ facet_is_controller_flow(struct facet *facet) } /* Folds all of 'facet''s statistics into its rule. Also updates the - * accounting ofhook and emits a NetFlow expiration if appropriate. */ + * accounting ofhook and emits a NetFlow expiration if appropriate. All of + * 'facet''s statistics in the datapath should have been zeroed and folded into + * its packet and byte counts before this function is called. */ static void facet_flush_stats(struct ofproto *ofproto, struct facet *facet) { + assert(!facet->dp_byte_count); + assert(!facet->dp_packet_count); + + facet_push_stats(ofproto, facet); facet_account(ofproto, facet, 0); if (ofproto->netflow && !facet_is_controller_flow(facet)) { @@ -2442,6 +2436,8 @@ facet_flush_stats(struct ofproto *ofproto, struct facet *facet) * reinstalled. */ facet->packet_count = 0; facet->byte_count = 0; + facet->rs_packet_count = 0; + facet->rs_byte_count = 0; facet->accounted_bytes = 0; netflow_flow_clear(&facet->nf_flow); @@ -2530,25 +2526,13 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) /* If the ODP actions changed or the installability changed, then we need * to talk to the datapath. */ - if (actions_changed || facet->may_install != facet->installed) { - if (facet->may_install) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - struct odp_flow_put put; - struct ofpbuf key; - - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &facet->flow); - - memset(&put.flow.stats, 0, sizeof put.flow.stats); - put.flow.key = key.data; - put.flow.key_len = key.size; - put.flow.actions = odp_actions->data; - put.flow.actions_len = odp_actions->size; - put.flow.flags = 0; - put.flags = ODPPF_CREATE | ODPPF_MODIFY | ODPPF_ZERO_STATS; - dpif_flow_put(ofproto->dpif, &put); - - facet_update_stats(ofproto, facet, &put.flow.stats); + if (actions_changed || ctx.may_set_up_flow != facet->installed) { + if (ctx.may_set_up_flow) { + struct dpif_flow_stats stats; + + facet_put__(ofproto, facet, + odp_actions->data, odp_actions->size, &stats); + facet_update_stats(ofproto, facet, &stats); } else { facet_uninstall(ofproto, facet); } @@ -2573,6 +2557,7 @@ facet_revalidate(struct ofproto *ofproto, struct facet *facet) list_push_back(&new_rule->facets, &facet->list_node); facet->rule = new_rule; facet->used = new_rule->created; + facet->rs_used = facet->used; } ofpbuf_delete(odp_actions); @@ -2700,10 +2685,6 @@ handle_set_config(struct ofconn *ofconn, const struct ofp_switch_config *osc) return 0; } -/* Maximum depth of flow table recursion (due to NXAST_RESUBMIT actions) in a - * flow translation. */ -#define MAX_RESUBMIT_RECURSION 16 - static void do_xlate_actions(const union ofp_action *in, size_t n_in, struct action_xlate_ctx *ctx); @@ -2725,7 +2706,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) */ } - nl_msg_put_u32(ctx->odp_actions, ODPAT_OUTPUT, port); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_OUTPUT, port); ctx->nf_output_iface = port; } @@ -2776,7 +2757,7 @@ flood_packets(struct ofproto *ofproto, uint16_t odp_in_port, uint32_t mask, HMAP_FOR_EACH (ofport, hmap_node, &ofproto->ports) { uint16_t odp_port = ofport->odp_port; if (odp_port != odp_in_port && !(ofport->opp.config & mask)) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, odp_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, odp_port); } } *nf_output_iface = NF_OUT_FLOOD; @@ -2816,7 +2797,7 @@ xlate_output_action__(struct action_xlate_ctx *ctx, &ctx->nf_output_iface, ctx->odp_actions); break; case OFPP_CONTROLLER: - nl_msg_put_u64(ctx->odp_actions, ODPAT_CONTROLLER, max_len); + nl_msg_put_u64(ctx->odp_actions, ODP_ACTION_ATTR_CONTROLLER, max_len); break; case OFPP_LOCAL: add_output_action(ctx, ODPP_LOCAL); @@ -2863,7 +2844,7 @@ static void add_pop_action(struct action_xlate_ctx *ctx) { if (ctx->odp_actions->size != ctx->last_pop_priority) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_POP_PRIORITY); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_POP_PRIORITY); ctx->last_pop_priority = ctx->odp_actions->size; } } @@ -2894,7 +2875,7 @@ xlate_enqueue_action(struct action_xlate_ctx *ctx, /* Add ODP actions. */ remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); add_output_action(ctx, odp_port); add_pop_action(ctx); @@ -2922,7 +2903,7 @@ xlate_set_queue_action(struct action_xlate_ctx *ctx, } remove_pop_action(ctx); - nl_msg_put_u32(ctx->odp_actions, ODPAT_SET_PRIORITY, priority); + nl_msg_put_u32(ctx->odp_actions, ODP_ACTION_ATTR_SET_PRIORITY, priority); } static void @@ -2930,9 +2911,9 @@ xlate_set_dl_tci(struct action_xlate_ctx *ctx) { ovs_be16 tci = ctx->flow.vlan_tci; if (!(tci & htons(VLAN_CFI))) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_STRIP_VLAN); + nl_msg_put_flag(ctx->odp_actions, ODP_ACTION_ATTR_STRIP_VLAN); } else { - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_DL_TCI, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_TCI, tci & ~htons(VLAN_CFI)); } } @@ -2958,7 +2939,8 @@ update_reg_state(struct action_xlate_ctx *ctx, xlate_set_dl_tci(ctx); } if (ctx->flow.tun_id != state->tun_id) { - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, ctx->flow.tun_id); + nl_msg_put_be64(ctx->odp_actions, + ODP_ACTION_ATTR_SET_TUNNEL, ctx->flow.tun_id); } } @@ -2984,13 +2966,14 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL: nast = (const struct nx_action_set_tunnel *) nah; tun_id = htonll(ntohl(nast->tun_id)); - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; case NXAST_DROP_SPOOFED_ARP: if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { - nl_msg_put_flag(ctx->odp_actions, ODPAT_DROP_SPOOFED_ARP); + nl_msg_put_flag(ctx->odp_actions, + ODP_ACTION_ATTR_DROP_SPOOFED_ARP); } break; @@ -3023,7 +3006,7 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, case NXAST_SET_TUNNEL64: tun_id = ((const struct nx_action_set_tunnel64 *) nah)->tun_id; - nl_msg_put_be64(ctx->odp_actions, ODPAT_SET_TUNNEL, tun_id); + nl_msg_put_be64(ctx->odp_actions, ODP_ACTION_ATTR_SET_TUNNEL, tun_id); ctx->flow.tun_id = tun_id; break; @@ -3087,44 +3070,44 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_DL_SRC: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_SRC, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_SRC, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oada = ((struct ofp_action_dl_addr *) ia); - nl_msg_put_unspec(ctx->odp_actions, ODPAT_SET_DL_DST, + nl_msg_put_unspec(ctx->odp_actions, ODP_ACTION_ATTR_SET_DL_DST, oada->dl_addr, ETH_ADDR_LEN); memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_SRC, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_SRC, ia->nw_addr.nw_addr); ctx->flow.nw_src = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_DST: - nl_msg_put_be32(ctx->odp_actions, ODPAT_SET_NW_DST, + nl_msg_put_be32(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_DST, ia->nw_addr.nw_addr); ctx->flow.nw_dst = ia->nw_addr.nw_addr; break; case OFPAT_SET_NW_TOS: - nl_msg_put_u8(ctx->odp_actions, ODPAT_SET_NW_TOS, + nl_msg_put_u8(ctx->odp_actions, ODP_ACTION_ATTR_SET_NW_TOS, ia->nw_tos.nw_tos); ctx->flow.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_SRC, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_SRC, ia->tp_port.tp_port); ctx->flow.tp_src = ia->tp_port.tp_port; break; case OFPAT_SET_TP_DST: - nl_msg_put_be16(ctx->odp_actions, ODPAT_SET_TP_DST, + nl_msg_put_be16(ctx->odp_actions, ODP_ACTION_ATTR_SET_TP_DST, ia->tp_port.tp_port); ctx->flow.tp_dst = ia->tp_port.tp_port; break; @@ -3153,6 +3136,7 @@ action_xlate_ctx_init(struct action_xlate_ctx *ctx, ctx->flow = *flow; ctx->packet = packet; ctx->resubmit_hook = NULL; + ctx->check_special = true; } static struct ofpbuf * @@ -3167,7 +3151,16 @@ xlate_actions(struct action_xlate_ctx *ctx, ctx->nf_output_iface = NF_OUT_DROP; ctx->recurse = 0; ctx->last_pop_priority = -1; - do_xlate_actions(in, n_in, ctx); + + if (!ctx->check_special + || !ctx->ofproto->ofhooks->special_cb + || ctx->ofproto->ofhooks->special_cb(&ctx->flow, ctx->packet, + ctx->ofproto->aux)) { + do_xlate_actions(in, n_in, ctx); + } else { + ctx->may_set_up_flow = false; + } + remove_pop_action(ctx); /* Check with in-band control to see if we're allowed to set up this @@ -3427,8 +3420,8 @@ handle_table_stats_request(struct ofconn *ofconn, ? htonl(OFPFW_ALL) : htonl(OVSFW_ALL)); ots->max_entries = htonl(1024 * 1024); /* An arbitrary big number. */ ots->active_count = htonl(classifier_count(&p->cls)); - ots->lookup_count = htonll(0); /* XXX */ - ots->matched_count = htonll(0); /* XXX */ + put_32aligned_be64(&ots->lookup_count, htonll(0)); /* XXX */ + put_32aligned_be64(&ots->matched_count, htonll(0)); /* XXX */ queue_tx(msg, ofconn, ofconn->reply_counter); return 0; @@ -3449,18 +3442,18 @@ append_port_stat(struct ofport *port, struct ofconn *ofconn, ops = append_ofp_stats_reply(sizeof *ops, ofconn, msgp); ops->port_no = htons(port->opp.port_no); memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + put_32aligned_be64(&ops->rx_packets, htonll(stats.rx_packets)); + put_32aligned_be64(&ops->tx_packets, htonll(stats.tx_packets)); + put_32aligned_be64(&ops->rx_bytes, htonll(stats.rx_bytes)); + put_32aligned_be64(&ops->tx_bytes, htonll(stats.tx_bytes)); + put_32aligned_be64(&ops->rx_dropped, htonll(stats.rx_dropped)); + put_32aligned_be64(&ops->tx_dropped, htonll(stats.tx_dropped)); + put_32aligned_be64(&ops->rx_errors, htonll(stats.rx_errors)); + put_32aligned_be64(&ops->tx_errors, htonll(stats.tx_errors)); + put_32aligned_be64(&ops->rx_frame_err, htonll(stats.rx_frame_errors)); + put_32aligned_be64(&ops->rx_over_err, htonll(stats.rx_over_errors)); + put_32aligned_be64(&ops->rx_crc_err, htonll(stats.rx_crc_errors)); + put_32aligned_be64(&ops->collisions, htonll(stats.collisions)); } static int @@ -3488,57 +3481,6 @@ handle_port_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) return 0; } -/* Obtains statistic counters for 'rule' within 'p' and stores them into - * '*packet_countp' and '*byte_countp'. The returned statistics include - * statistics for all of 'rule''s facets. */ -static void -query_stats(struct ofproto *p, struct rule *rule, - uint64_t *packet_countp, uint64_t *byte_countp) -{ - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - uint64_t packet_count, byte_count; - struct facet *facet; - struct ofpbuf key; - - /* Start from historical data for 'rule' itself that are no longer tracked - * by the datapath. This counts, for example, facets that have expired. */ - packet_count = rule->packet_count; - byte_count = rule->byte_count; - - /* Ask the datapath for statistics on all of the rule's facets. (We could - * batch up statistics requests using dpif_flow_get_multiple(), but that is - * not yet implemented.) - * - * Also, add any statistics that are not tracked by the datapath for each - * facet. This includes, for example, statistics for packets that were - * executed "by hand" by ofproto via dpif_execute() but must be accounted - * to a rule. */ - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - LIST_FOR_EACH (facet, list_node, &rule->facets) { - struct odp_flow odp_flow; - - ofpbuf_clear(&key); - odp_flow_key_from_flow(&key, &facet->flow); - - odp_flow.key = key.data; - odp_flow.key_len = key.size; - odp_flow.actions = NULL; - odp_flow.actions_len = 0; - odp_flow.flags = 0; - if (!dpif_flow_get(p->dpif, &odp_flow)) { - packet_count += odp_flow.stats.n_packets; - byte_count += odp_flow.stats.n_bytes; - } - - packet_count += facet->packet_count; - byte_count += facet->byte_count; - } - - /* Return the stats to the caller. */ - *packet_countp = packet_count; - *byte_countp = byte_count; -} - static void calc_flow_duration(long long int start, ovs_be32 *sec, ovs_be32 *nsec) { @@ -3553,6 +3495,7 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, { struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; + ovs_be64 cookie; size_t act_len, len; if (rule_is_hidden(rule) || !rule_has_out_port(rule, out_port)) { @@ -3562,21 +3505,22 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, act_len = sizeof *rule->actions * rule->n_actions; len = offsetof(struct ofp_flow_stats, actions) + act_len; - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ofs = append_ofp_stats_reply(len, ofconn, replyp); ofs->length = htons(len); ofs->table_id = 0; ofs->pad = 0; ofputil_cls_rule_to_match(&rule->cr, ofconn->flow_format, &ofs->match, - rule->flow_cookie, &ofs->cookie); + rule->flow_cookie, &cookie); + put_32aligned_be64(&ofs->cookie, cookie); calc_flow_duration(rule->created, &ofs->duration_sec, &ofs->duration_nsec); ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); memset(ofs->pad2, 0, sizeof ofs->pad2); - ofs->packet_count = htonll(packet_count); - ofs->byte_count = htonll(byte_count); + put_32aligned_be64(&ofs->packet_count, htonll(packet_count)); + put_32aligned_be64(&ofs->byte_count, htonll(byte_count)); if (rule->n_actions > 0) { memcpy(ofs->actions, rule->actions, act_len); } @@ -3585,7 +3529,16 @@ put_ofp_flow_stats(struct ofconn *ofconn, struct rule *rule, static bool is_valid_table(uint8_t table_id) { - return table_id == 0 || table_id == 0xff; + if (table_id == 0 || table_id == 0xff) { + return true; + } else { + /* It would probably be better to reply with an error but there doesn't + * seem to be any appropriate value, so that might just be + * confusing. */ + VLOG_WARN_RL(&rl, "controller asked for invalid table %"PRIu8, + table_id); + return false; + } } static int @@ -3626,7 +3579,7 @@ put_nx_flow_stats(struct ofconn *ofconn, struct rule *rule, return; } - query_stats(ofconn->ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); act_len = sizeof *rule->actions * rule->n_actions; @@ -3690,19 +3643,21 @@ handle_nxst_flow(struct ofconn *ofconn, const struct ofp_header *oh) } static void -flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) +flow_stats_ds(struct rule *rule, struct ds *results) { uint64_t packet_count, byte_count; size_t act_len = sizeof *rule->actions * rule->n_actions; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); ds_put_format(results, "duration=%llds, ", (time_msec() - rule->created) / 1000); + ds_put_format(results, "idle=%.3fs, ", (time_msec() - rule->used) / 1000.0); ds_put_format(results, "priority=%u, ", rule->cr.priority); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, results); + ds_put_char(results, ','); if (act_len > 0) { ofp_print_actions(results, &rule->actions->header, act_len); } else { @@ -3712,7 +3667,7 @@ flow_stats_ds(struct ofproto *ofproto, struct rule *rule, struct ds *results) } /* Adds a pretty-printed description of all flows to 'results', including - * those marked hidden by secchan (e.g., by in-band control). */ + * hidden flows (e.g., set up by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) { @@ -3721,7 +3676,7 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) cls_cursor_init(&cursor, &p->cls, NULL); CLS_CURSOR_FOR_EACH (rule, cr, &cursor) { - flow_stats_ds(p, rule, results); + flow_stats_ds(rule, results); } } @@ -3746,7 +3701,7 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, uint64_t packet_count; uint64_t byte_count; - query_stats(ofproto, rule, &packet_count, &byte_count); + rule_get_stats(rule, &packet_count, &byte_count); total_packets += packet_count; total_bytes += byte_count; @@ -3756,8 +3711,8 @@ query_aggregate_stats(struct ofproto *ofproto, struct cls_rule *target, } oasr->flow_count = htonl(n_flows); - oasr->packet_count = htonll(total_packets); - oasr->byte_count = htonll(total_bytes); + put_32aligned_be64(&oasr->packet_count, htonll(total_packets)); + put_32aligned_be64(&oasr->byte_count, htonll(total_bytes)); memset(oasr->pad, 0, sizeof oasr->pad); } @@ -3830,9 +3785,9 @@ put_queue_stats(struct queue_stats_cbdata *cbdata, uint32_t queue_id, reply->port_no = htons(cbdata->ofport->opp.port_no); memset(reply->pad, 0, sizeof reply->pad); reply->queue_id = htonl(queue_id); - reply->tx_bytes = htonll(stats->tx_bytes); - reply->tx_packets = htonll(stats->tx_packets); - reply->tx_errors = htonll(stats->tx_errors); + put_32aligned_be64(&reply->tx_bytes, htonll(stats->tx_bytes)); + put_32aligned_be64(&reply->tx_packets, htonll(stats->tx_packets)); + put_32aligned_be64(&reply->tx_errors, htonll(stats->tx_errors)); } static void @@ -3902,17 +3857,12 @@ handle_queue_stats_request(struct ofconn *ofconn, const struct ofp_header *oh) return 0; } -static long long int -msec_from_nsec(uint64_t sec, uint32_t nsec) -{ - return !sec ? 0 : sec * 1000 + nsec / 1000000; -} - +/* Updates 'facet''s used time. Caller is responsible for calling + * facet_push_stats() to update the flows which 'facet' resubmits into. */ static void facet_update_time(struct ofproto *ofproto, struct facet *facet, - const struct odp_flow_stats *stats) + long long int used) { - long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec); if (used > facet->used) { facet->used = used; if (used > facet->rule->used) { @@ -3930,16 +3880,76 @@ facet_update_time(struct ofproto *ofproto, struct facet *facet, * cleared out of the datapath. */ static void facet_update_stats(struct ofproto *ofproto, struct facet *facet, - const struct odp_flow_stats *stats) + const struct dpif_flow_stats *stats) { - if (stats->n_packets) { - facet_update_time(ofproto, facet, stats); + if (stats->n_packets || stats->used > facet->used) { + facet_update_time(ofproto, facet, stats->used); facet->packet_count += stats->n_packets; facet->byte_count += stats->n_bytes; + facet_push_stats(ofproto, facet); netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags); } } +static void +facet_push_stats(struct ofproto *ofproto, struct facet *facet) +{ + uint64_t rs_packets, rs_bytes; + + assert(facet->packet_count >= facet->rs_packet_count); + assert(facet->byte_count >= facet->rs_byte_count); + assert(facet->used >= facet->rs_used); + + rs_packets = facet->packet_count - facet->rs_packet_count; + rs_bytes = facet->byte_count - facet->rs_byte_count; + + if (rs_packets || rs_bytes || facet->used > facet->rs_used) { + facet->rs_packet_count = facet->packet_count; + facet->rs_byte_count = facet->byte_count; + facet->rs_used = facet->used; + + flow_push_stats(ofproto, facet->rule, &facet->flow, + rs_packets, rs_bytes, facet->used); + } +} + +struct ofproto_push { + struct action_xlate_ctx ctx; + uint64_t packets; + uint64_t bytes; + long long int used; +}; + +static void +push_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) +{ + struct ofproto_push *push = CONTAINER_OF(ctx, struct ofproto_push, ctx); + + if (rule) { + rule->packet_count += push->packets; + rule->byte_count += push->bytes; + rule->used = MAX(push->used, rule->used); + } +} + +/* Pushes flow statistics to the rules which 'flow' resubmits into given + * 'rule''s actions. */ +static void +flow_push_stats(struct ofproto *ofproto, const struct rule *rule, + struct flow *flow, uint64_t packets, uint64_t bytes, + long long int used) +{ + struct ofproto_push push; + + push.packets = packets; + push.bytes = bytes; + push.used = used; + + action_xlate_ctx_init(&push.ctx, ofproto, flow, NULL); + push.ctx.resubmit_hook = push_resubmit; + ofpbuf_delete(xlate_actions(&push.ctx, rule->actions, rule->n_actions)); +} + /* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT * in which no matching flow already exists in the flow table. * @@ -4328,10 +4338,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) return 0; /* Nicira extension requests. */ - case OFPUTIL_NXT_STATUS_REQUEST: - return switch_status_handle_request( - ofconn->ofproto->switch_status, ofconn->rconn, oh); - case OFPUTIL_NXT_TUN_ID_FROM_COOKIE: return handle_tun_id_from_cookie(ofconn, oh); @@ -4387,7 +4393,6 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPUTIL_OFPST_PORT_REPLY: case OFPUTIL_OFPST_TABLE_REPLY: case OFPUTIL_OFPST_AGGREGATE_REPLY: - case OFPUTIL_NXT_STATUS_REPLY: case OFPUTIL_NXT_ROLE_REPLY: case OFPUTIL_NXT_FLOW_REMOVED: case OFPUTIL_NXST_FLOW_REPLY: @@ -4428,13 +4433,19 @@ handle_miss_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Set header pointers in 'flow'. */ flow_extract(upcall->packet, flow.tun_id, flow.in_port, &flow); + if (p->ofhooks->special_cb + && !p->ofhooks->special_cb(&flow, upcall->packet, p->aux)) { + ofpbuf_delete(upcall->packet); + return; + } + /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ if (in_band_msg_in_hook(p->in_band, &flow, upcall->packet)) { struct ofpbuf odp_actions; ofpbuf_init(&odp_actions, 32); - nl_msg_put_u32(&odp_actions, ODPAT_OUTPUT, ODPP_LOCAL); + nl_msg_put_u32(&odp_actions, ODP_ACTION_ATTR_OUTPUT, ODPP_LOCAL); dpif_execute(p->dpif, odp_actions.data, odp_actions.size, upcall->packet); ofpbuf_uninit(&odp_actions); @@ -4494,13 +4505,13 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) struct flow flow; switch (upcall->type) { - case _ODPL_ACTION_NR: + case DPIF_UC_ACTION: COVERAGE_INC(ofproto_ctlr_action); odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); send_packet_in(p, upcall, &flow, false); break; - case _ODPL_SFLOW_NR: + case DPIF_UC_SAMPLE: if (p->sflow) { odp_flow_key_to_flow(upcall->key, upcall->key_len, &flow); ofproto_sflow_received(p->sflow, upcall, &flow); @@ -4508,10 +4519,11 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) ofpbuf_delete(upcall->packet); break; - case _ODPL_MISS_NR: + case DPIF_UC_MISS: handle_miss_upcall(p, upcall); break; + case DPIF_N_UC_TYPES: default: VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32, upcall->type); break; @@ -4521,7 +4533,7 @@ handle_upcall(struct ofproto *p, struct dpif_upcall *upcall) /* Flow expiration. */ static int ofproto_dp_max_idle(const struct ofproto *); -static void ofproto_update_used(struct ofproto *); +static void ofproto_update_stats(struct ofproto *); static void rule_expire(struct ofproto *, struct rule *); static void ofproto_expire_facets(struct ofproto *, int dp_max_idle); @@ -4538,8 +4550,8 @@ ofproto_expire(struct ofproto *ofproto) struct cls_cursor cursor; int dp_max_idle; - /* Update 'used' for each flow in the datapath. */ - ofproto_update_used(ofproto); + /* Update stats for each flow in the datapath. */ + ofproto_update_stats(ofproto); /* Expire facets that have been idle too long. */ dp_max_idle = ofproto_dp_max_idle(ofproto); @@ -4562,35 +4574,35 @@ ofproto_expire(struct ofproto *ofproto) return MIN(dp_max_idle, 1000); } -/* Update 'used' member of installed facets. */ +/* Update 'packet_count', 'byte_count', and 'used' members of installed facets. + * + * This function also pushes statistics updates to rules which each facet + * resubmits into. Generally these statistics will be accurate. However, if a + * facet changes the rule it resubmits into at some time in between + * ofproto_update_stats() runs, it is possible that statistics accrued to the + * old rule will be incorrectly attributed to the new rule. This could be + * avoided by calling ofproto_update_stats() whenever rules are created or + * deleted. However, the performance impact of making so many calls to the + * datapath do not justify the benefit of having perfectly accurate statistics. + */ static void -ofproto_update_used(struct ofproto *p) +ofproto_update_stats(struct ofproto *p) { + const struct dpif_flow_stats *stats; struct dpif_flow_dump dump; + const struct nlattr *key; + size_t key_len; dpif_flow_dump_start(&dump, p->dpif); - for (;;) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; + while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) { struct facet *facet; - struct odp_flow f; struct flow flow; - memset(&f, 0, sizeof f); - f.key = (struct nlattr *) keybuf; - f.key_len = sizeof keybuf; - if (!dpif_flow_dump_next(&dump, &f)) { - break; - } - - if (f.key_len > sizeof keybuf) { - VLOG_WARN_RL(&rl, "ODP flow key overflowed buffer"); - continue; - } - if (odp_flow_key_to_flow(f.key, f.key_len, &flow)) { + if (odp_flow_key_to_flow(key, key_len, &flow)) { struct ds s; ds_init(&s); - odp_flow_key_format(f.key, f.key_len, &s); + odp_flow_key_format(key, key_len, &s); VLOG_WARN_RL(&rl, "failed to convert ODP flow key to flow: %s", ds_cstr(&s)); ds_destroy(&s); @@ -4600,13 +4612,30 @@ ofproto_update_used(struct ofproto *p) facet = facet_find(p, &flow); if (facet && facet->installed) { - facet_update_time(p, facet, &f.stats); - facet_account(p, facet, f.stats.n_bytes); + + if (stats->n_packets >= facet->dp_packet_count) { + facet->packet_count += stats->n_packets - facet->dp_packet_count; + } else { + VLOG_WARN_RL(&rl, "unexpected packet count from the datapath"); + } + + if (stats->n_bytes >= facet->dp_byte_count) { + facet->byte_count += stats->n_bytes - facet->dp_byte_count; + } else { + VLOG_WARN_RL(&rl, "unexpected byte count from datapath"); + } + + facet->dp_packet_count = stats->n_packets; + facet->dp_byte_count = stats->n_bytes; + + facet_update_time(p, facet, stats->used); + facet_account(p, facet, stats->n_bytes); + facet_push_stats(p, facet); } else { /* There's a flow in the datapath that we know nothing about. * Delete it. */ COVERAGE_INC(ofproto_unexpected_rule); - dpif_flow_del(p->dpif, &f); + dpif_flow_del(p->dpif, key, key_len, NULL); } } dpif_flow_dump_done(&dump); @@ -4644,7 +4673,7 @@ ofproto_dp_max_idle(const struct ofproto *ofproto) * they receive additional data). * * This requires a second pass through the facets, in addition to the pass - * made by ofproto_update_used(), because the former function never looks + * made by ofproto_update_stats(), because the former function never looks * at uninstallable facets. */ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) }; @@ -4708,39 +4737,19 @@ facet_active_timeout(struct ofproto *ofproto, struct facet *facet) if (ofproto->netflow && !facet_is_controller_flow(facet) && netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) { struct ofexpired expired; - struct odp_flow odp_flow; - /* Get updated flow stats. - * - * XXX We could avoid this call entirely if (1) ofproto_update_used() - * updated TCP flags and (2) the dpif_flow_list_all() in - * ofproto_update_used() zeroed TCP flags. */ - memset(&odp_flow, 0, sizeof odp_flow); if (facet->installed) { - uint32_t keybuf[ODPUTIL_FLOW_KEY_U32S]; - struct ofpbuf key; - - ofpbuf_use_stack(&key, keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &facet->flow); + struct dpif_flow_stats stats; - odp_flow.key = key.data; - odp_flow.key_len = key.size; - odp_flow.flags = ODPFF_ZERO_TCP_FLAGS; - dpif_flow_get(ofproto->dpif, &odp_flow); - - if (odp_flow.stats.n_packets) { - facet_update_time(ofproto, facet, &odp_flow.stats); - netflow_flow_update_flags(&facet->nf_flow, - odp_flow.stats.tcp_flags); - } + facet_put__(ofproto, facet, facet->actions, facet->actions_len, + &stats); + facet_update_stats(ofproto, facet, &stats); } expired.flow = facet->flow; - expired.packet_count = facet->packet_count + - odp_flow.stats.n_packets; - expired.byte_count = facet->byte_count + odp_flow.stats.n_bytes; + expired.packet_count = facet->packet_count; + expired.byte_count = facet->byte_count; expired.used = facet->used; - netflow_expire(ofproto->netflow, &facet->nf_flow, &expired); } } @@ -4868,6 +4877,31 @@ rule_send_removed(struct ofproto *p, struct rule *rule, uint8_t reason) } } +/* Obtains statistics for 'rule' and stores them in '*packets' and '*bytes'. + * The returned statistics include statistics for all of 'rule''s facets. */ +static void +rule_get_stats(const struct rule *rule, uint64_t *packets, uint64_t *bytes) +{ + uint64_t p, b; + struct facet *facet; + + /* Start from historical data for 'rule' itself that are no longer tracked + * in facets. This counts, for example, facets that have expired. */ + p = rule->packet_count; + b = rule->byte_count; + + /* Add any statistics that are tracked by facets. This includes + * statistical data recently updated by ofproto_update_stats() as well as + * stats for packets that were executed "by hand" via dpif_execute(). */ + LIST_FOR_EACH (facet, list_node, &rule->facets) { + p += facet->packet_count; + b += facet->byte_count; + } + + *packets = p; + *bytes = b; +} + /* pinsched callback for sending 'ofp_packet_in' on 'ofconn'. */ static void do_send_packet_in(struct ofpbuf *ofp_packet_in, void *ofconn_) @@ -4894,9 +4928,10 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, int total_len, send_len; struct ofpbuf *packet; uint32_t buffer_id; + int idx; /* Get OpenFlow buffer_id. */ - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { buffer_id = UINT32_MAX; } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { buffer_id = pktbuf_get_null(); @@ -4911,7 +4946,7 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, if (buffer_id != UINT32_MAX) { send_len = MIN(send_len, ofconn->miss_send_len); } - if (upcall->type == _ODPL_ACTION_NR) { + if (upcall->type == DPIF_UC_ACTION) { send_len = MIN(send_len, upcall->userdata); } @@ -4930,22 +4965,24 @@ schedule_packet_in(struct ofconn *ofconn, struct dpif_upcall *upcall, opi->header.type = OFPT_PACKET_IN; opi->total_len = htons(total_len); opi->in_port = htons(odp_port_to_ofp_port(flow->in_port)); - opi->reason = upcall->type == _ODPL_MISS_NR ? OFPR_NO_MATCH : OFPR_ACTION; + opi->reason = upcall->type == DPIF_UC_MISS ? OFPR_NO_MATCH : OFPR_ACTION; opi->buffer_id = htonl(buffer_id); update_openflow_length(packet); /* Hand over to packet scheduler. It might immediately call into * do_send_packet_in() or it might buffer it for a while (until a later * call to pinsched_run()). */ - pinsched_send(ofconn->schedulers[opi->reason], flow->in_port, + idx = upcall->type == DPIF_UC_MISS ? 0 : 1; + pinsched_send(ofconn->schedulers[idx], flow->in_port, packet, do_send_packet_in, ofconn); } -/* Given 'upcall', of type _ODPL_ACTION_NR or _ODPL_MISS_NR, sends an +/* Given 'upcall', of type DPIF_UC_ACTION or DPIF_UC_MISS, sends an * OFPT_PACKET_IN message to each OpenFlow controller as necessary according to * their individual configurations. * - * Takes ownership of 'packet'. */ + * If 'clone' is true, the caller retains ownership of 'upcall->packet'. + * Otherwise, ownership is transferred to this function. */ static void send_packet_in(struct ofproto *ofproto, struct dpif_upcall *upcall, const struct flow *flow, bool clone) @@ -5054,7 +5091,7 @@ trace_format_flow(struct ds *result, int level, const char *title, } static void -trace_resubmit(struct action_xlate_ctx *ctx, const struct rule *rule) +trace_resubmit(struct action_xlate_ctx *ctx, struct rule *rule) { struct ofproto_trace *trace = CONTAINER_OF(ctx, struct ofproto_trace, ctx); struct ds *result = trace->result; @@ -5199,7 +5236,7 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, flood_packets(ofproto, flow->in_port, OFPPC_NO_FLOOD, nf_output_iface, odp_actions); } else if (out_port != flow->in_port) { - nl_msg_put_u32(odp_actions, ODPAT_OUTPUT, out_port); + nl_msg_put_u32(odp_actions, ODP_ACTION_ATTR_OUTPUT, out_port); *nf_output_iface = out_port; } else { /* Drop. */ @@ -5211,5 +5248,6 @@ default_normal_ofhook_cb(const struct flow *flow, const struct ofpbuf *packet, static const struct ofhooks default_ofhooks = { default_normal_ofhook_cb, NULL, + NULL, NULL };