X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=dcf8683ff37ea007a41d75e1fc375c2b1e0e71e6;hb=85444c3f79f594c838d4b3e72ff27e263514f194;hp=eb8a7a9119235bdc5825b97581be9bd5eaecc943;hpb=3f355f47f8e7343e909ccfa854454d667baf3c38;p=sliver-openvswitch.git diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index eb8a7a911..dcf8683ff 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -1,5 +1,6 @@ /* - * Copyright (c) 2009 Nicira Networks. + * Copyright (c) 2009, 2010 Nicira Networks. + * Copyright (c) 2010 Jean Tourrilhes - HP-Labs. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +28,6 @@ #include "discovery.h" #include "dpif.h" #include "dynamic-string.h" -#include "executer.h" #include "fail-open.h" #include "in-band.h" #include "mac-learning.h" @@ -35,10 +35,10 @@ #include "netflow.h" #include "odp-util.h" #include "ofp-print.h" +#include "ofproto-sflow.h" #include "ofpbuf.h" #include "openflow/nicira-ext.h" #include "openflow/openflow.h" -#include "openflow/openflow-mgmt.h" #include "openvswitch/datapath-protocol.h" #include "packets.h" #include "pinsched.h" @@ -49,21 +49,18 @@ #include "shash.h" #include "status.h" #include "stp.h" +#include "stream-ssl.h" #include "svec.h" #include "tag.h" #include "timeval.h" #include "unixctl.h" #include "vconn.h" -#include "vconn-ssl.h" #include "xtoxll.h" #define THIS_MODULE VLM_ofproto #include "vlog.h" -enum { - DP_GROUP_FLOOD = 0, - DP_GROUP_ALL = 1 -}; +#include "sflow_api.h" enum { TABLEID_HASH = 0, @@ -82,21 +79,23 @@ static int xlate_actions(const union ofp_action *in, size_t n_in, const flow_t *flow, struct ofproto *ofproto, const struct ofpbuf *packet, struct odp_actions *out, tag_type *tags, - bool *may_setup_flow); + bool *may_set_up_flow, uint16_t *nf_output_iface); struct rule { struct cls_rule cr; + uint64_t flow_cookie; /* Controller-issued identifier. + (Kept in network-byte order.) */ uint16_t idle_timeout; /* In seconds from time of last use. */ uint16_t hard_timeout; /* In seconds from time of creation. */ + bool send_flow_removed; /* Send a flow removed message? */ long long int used; /* Last-used time (0 if never used). */ long long int created; /* Creation time. */ uint64_t packet_count; /* Number of packets received. */ uint64_t byte_count; /* Number of bytes received. */ uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */ - uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */ - uint8_t ip_tos; /* Last-seen IP type-of-service. */ tag_type tags; /* Tags (set only by hooks). */ + struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */ /* If 'super' is non-NULL, this rule is a subrule, that is, it is an * exact-match rule (having cr.wc.wildcards of 0) generated from the @@ -110,6 +109,9 @@ struct rule { struct list list; /* OpenFlow actions. + * + * 'n_actions' is the number of elements in the 'actions' array. A single + * action may take up more more than one element's worth of space. * * A subrule has no actions (it uses the super-rule's actions). */ int n_actions; @@ -145,9 +147,10 @@ rule_is_hidden(const struct rule *rule) return false; } -static struct rule *rule_create(struct rule *super, const union ofp_action *, - size_t n_actions, uint16_t idle_timeout, - uint16_t hard_timeout); +static struct rule *rule_create(struct ofproto *, struct rule *super, + const union ofp_action *, size_t n_actions, + uint16_t idle_timeout, uint16_t hard_timeout, + uint64_t flow_cookie, bool send_flow_removed); static void rule_free(struct rule *); static void rule_destroy(struct ofproto *, struct rule *); static struct rule *rule_from_cls_rule(const struct cls_rule *); @@ -160,38 +163,83 @@ static void rule_install(struct ofproto *, struct rule *, struct rule *displaced_rule); static void rule_uninstall(struct ofproto *, struct rule *); static void rule_post_uninstall(struct ofproto *, struct rule *); +static void send_flow_removed(struct ofproto *p, struct rule *rule, + long long int now, uint8_t reason); -struct ofconn { - struct list node; - struct rconn *rconn; - struct pktbuf *pktbuf; - bool send_flow_exp; - int miss_send_len; - - struct rconn_packet_counter *packet_in_counter; +/* ofproto supports two kinds of OpenFlow connections: + * + * - "Controller connections": Connections to ordinary OpenFlow controllers. + * ofproto maintains persistent connections to these controllers and by + * default sends them asynchronous messages such as packet-ins. + * + * - "Transient connections", e.g. from ovs-ofctl. When these connections + * drop, it is the other side's responsibility to reconnect them if + * necessary. ofproto does not send them asynchronous messages by default. + */ +enum ofconn_type { + OFCONN_CONTROLLER, /* An OpenFlow controller. */ + OFCONN_TRANSIENT /* A transient connection. */ +}; - /* Number of OpenFlow messages queued as replies to OpenFlow requests, and - * the maximum number before we stop reading OpenFlow requests. */ +/* An OpenFlow connection. */ +struct ofconn { + struct ofproto *ofproto; /* The ofproto that owns this connection. */ + struct list node; /* In struct ofproto's "all_conns" list. */ + struct rconn *rconn; /* OpenFlow connection. */ + enum ofconn_type type; /* Type. */ + + /* OFPT_PACKET_IN related data. */ + struct rconn_packet_counter *packet_in_counter; /* # queued on 'rconn'. */ + struct pinsched *schedulers[2]; /* Indexed by reason code; see below. */ + struct pktbuf *pktbuf; /* OpenFlow packet buffers. */ + int miss_send_len; /* Bytes to send of buffered packets. */ + + /* Number of OpenFlow messages queued on 'rconn' as replies to OpenFlow + * requests, and the maximum number before we stop reading OpenFlow + * requests. */ #define OFCONN_REPLY_MAX 100 struct rconn_packet_counter *reply_counter; + + /* type == OFCONN_CONTROLLER only. */ + enum nx_role role; /* Role. */ + struct hmap_node hmap_node; /* In struct ofproto's "controllers" map. */ + struct discovery *discovery; /* Controller discovery object, if enabled. */ + struct status_category *ss; /* Switch status category. */ + enum ofproto_band band; /* In-band or out-of-band? */ }; -static struct ofconn *ofconn_create(struct ofproto *, struct rconn *); -static void ofconn_destroy(struct ofconn *, struct ofproto *); +/* We use OFPR_NO_MATCH and OFPR_ACTION as indexes into struct ofconn's + * "schedulers" array. Their values are 0 and 1, and their meanings and values + * coincide with _ODPL_MISS_NR and _ODPL_ACTION_NR, so this is convenient. In + * case anything ever changes, check their values here. */ +#define N_SCHEDULERS 2 +BUILD_ASSERT_DECL(OFPR_NO_MATCH == 0); +BUILD_ASSERT_DECL(OFPR_NO_MATCH == _ODPL_MISS_NR); +BUILD_ASSERT_DECL(OFPR_ACTION == 1); +BUILD_ASSERT_DECL(OFPR_ACTION == _ODPL_ACTION_NR); + +static struct ofconn *ofconn_create(struct ofproto *, struct rconn *, + enum ofconn_type); +static void ofconn_destroy(struct ofconn *); static void ofconn_run(struct ofconn *, struct ofproto *); static void ofconn_wait(struct ofconn *); +static bool ofconn_receives_async_msgs(const struct ofconn *); + static void queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn, struct rconn_packet_counter *counter); +static void send_packet_in(struct ofproto *, struct ofpbuf *odp_msg); +static void do_send_packet_in(struct ofpbuf *odp_msg, void *ofconn); + struct ofproto { /* Settings. */ uint64_t datapath_id; /* Datapath ID. */ uint64_t fallback_dpid; /* Datapath ID if no better choice found. */ - uint64_t mgmt_id; /* Management channel identifier. */ - char *manufacturer; /* Manufacturer. */ - char *hardware; /* Hardware. */ - char *software; /* Software version. */ - char *serial; /* Serial number. */ + char *mfr_desc; /* Manufacturer. */ + char *hw_desc; /* Hardware. */ + char *sw_desc; /* Software version. */ + char *serial_desc; /* Serial number. */ + char *dp_desc; /* Datapath description. */ /* Datapath. */ struct dpif *dpif; @@ -203,23 +251,26 @@ struct ofproto { /* Configuration. */ struct switch_status *switch_status; - struct status_category *ss_cat; - struct in_band *in_band; - struct discovery *discovery; struct fail_open *fail_open; - struct pinsched *miss_sched, *action_sched; - struct executer *executer; struct netflow *netflow; + struct ofproto_sflow *sflow; + + /* In-band control. */ + struct in_band *in_band; + long long int next_in_band_update; + struct sockaddr_in *extra_in_band_remotes; + size_t n_extra_remotes; /* Flow table. */ struct classifier cls; bool need_revalidate; long long int next_expiration; struct tag_set revalidate_set; + bool tun_id_from_cookie; /* OpenFlow connections. */ - struct list all_conns; - struct ofconn *controller; + struct hmap controllers; /* Controller "struct ofconn"s. */ + struct list all_conns; /* Contains "struct ofconn"s. */ struct pvconn **listeners; size_t n_listeners; struct pvconn **snoops; @@ -239,11 +290,12 @@ static const struct ofhooks default_ofhooks; static uint64_t pick_datapath_id(const struct ofproto *); static uint64_t pick_fallback_dpid(void); -static void send_packet_in_miss(struct ofpbuf *, void *ofproto); -static void send_packet_in_action(struct ofpbuf *, void *ofproto); + static void update_used(struct ofproto *); -static void update_stats(struct rule *, const struct odp_flow_stats *); +static void update_stats(struct ofproto *, struct rule *, + const struct odp_flow_stats *); static void expire_rule(struct cls_rule *, void *ofproto); +static void active_timeout(struct ofproto *ofproto, struct rule *rule); static bool revalidate_rule(struct ofproto *p, struct rule *rule); static void revalidate_cb(struct cls_rule *rule_, void *p_); @@ -252,13 +304,15 @@ static void handle_odp_msg(struct ofproto *, struct ofpbuf *); static void handle_openflow(struct ofconn *, struct ofproto *, struct ofpbuf *); -static void refresh_port_group(struct ofproto *, unsigned int group); +static void refresh_port_groups(struct ofproto *); + static void update_port(struct ofproto *, const char *devname); static int init_ports(struct ofproto *); static void reinit_ports(struct ofproto *); int -ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, +ofproto_create(const char *datapath, const char *datapath_type, + const struct ofhooks *ofhooks, void *aux, struct ofproto **ofprotop) { struct odp_stats stats; @@ -269,7 +323,7 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, *ofprotop = NULL; /* Connect to datapath and start listening for messages. */ - error = dpif_open(datapath, &dpif); + error = dpif_open(datapath, datapath_type, &dpif); if (error) { VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error)); return error; @@ -281,7 +335,7 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, dpif_close(dpif); return error; } - error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION); + error = dpif_recv_set_mask(dpif, ODPL_MISS | ODPL_ACTION | ODPL_SFLOW); if (error) { VLOG_ERR("failed to listen on datapath %s: %s", datapath, strerror(error)); @@ -292,13 +346,14 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, dpif_recv_purge(dpif); /* Initialize settings. */ - p = xcalloc(1, sizeof *p); + p = xzalloc(sizeof *p); p->fallback_dpid = pick_fallback_dpid(); p->datapath_id = p->fallback_dpid; - p->manufacturer = xstrdup("Nicira Networks, Inc."); - p->hardware = xstrdup("Reference Implementation"); - p->software = xstrdup(VERSION BUILDNR); - p->serial = xstrdup("None"); + p->mfr_desc = xstrdup(DEFAULT_MFR_DESC); + p->hw_desc = xstrdup(DEFAULT_HW_DESC); + p->sw_desc = xstrdup(DEFAULT_SW_DESC); + p->serial_desc = xstrdup(DEFAULT_SERIAL_DESC); + p->dp_desc = xstrdup(DEFAULT_DP_DESC); /* Initialize datapath. */ p->dpif = dpif; @@ -310,11 +365,9 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, /* Initialize submodules. */ p->switch_status = switch_status_create(p); p->in_band = NULL; - p->discovery = NULL; p->fail_open = NULL; - p->miss_sched = p->action_sched = NULL; - p->executer = NULL; p->netflow = NULL; + p->sflow = NULL; /* Initialize flow table. */ classifier_init(&p->cls); @@ -324,9 +377,7 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, /* Initialize OpenFlow connections. */ list_init(&p->all_conns); - p->controller = ofconn_create(p, rconn_create(5, 8)); - p->controller->pktbuf = pktbuf_create(); - p->controller->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN; + hmap_init(&p->controllers); p->listeners = NULL; p->n_listeners = 0; p->snoops = NULL; @@ -343,20 +394,9 @@ ofproto_create(const char *datapath, const struct ofhooks *ofhooks, void *aux, p->ml = mac_learning_create(); } - /* Register switch status category. */ - p->ss_cat = switch_status_register(p->switch_status, "remote", - rconn_status_cb, p->controller->rconn); - - /* Almost done... */ - error = init_ports(p); - if (error) { - ofproto_destroy(p); - return error; - } - /* Pick final datapath ID. */ p->datapath_id = pick_datapath_id(p); - VLOG_INFO("using datapath ID %012"PRIx64, p->datapath_id); + VLOG_INFO("using datapath ID %016"PRIx64, p->datapath_id); *ofprotop = p; return 0; @@ -368,116 +408,340 @@ ofproto_set_datapath_id(struct ofproto *p, uint64_t datapath_id) uint64_t old_dpid = p->datapath_id; p->datapath_id = datapath_id ? datapath_id : pick_datapath_id(p); if (p->datapath_id != old_dpid) { - VLOG_INFO("datapath ID changed to %012"PRIx64, p->datapath_id); - rconn_reconnect(p->controller->rconn); + struct ofconn *ofconn; + + VLOG_INFO("datapath ID changed to %016"PRIx64, p->datapath_id); + + /* Force all active connections to reconnect, since there is no way to + * notify a controller that the datapath ID has changed. */ + LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) { + rconn_reconnect(ofconn->rconn); + } } } -void -ofproto_set_mgmt_id(struct ofproto *p, uint64_t mgmt_id) +static bool +is_discovery_controller(const struct ofproto_controller *c) { - p->mgmt_id = mgmt_id; + return !strcmp(c->target, "discover"); } -void -ofproto_set_probe_interval(struct ofproto *p, int probe_interval) +static bool +is_in_band_controller(const struct ofproto_controller *c) { - probe_interval = probe_interval ? MAX(probe_interval, 5) : 0; - rconn_set_probe_interval(p->controller->rconn, probe_interval); - if (p->fail_open) { - int trigger_duration = probe_interval ? probe_interval * 3 : 15; - fail_open_set_trigger_duration(p->fail_open, trigger_duration); - } + return is_discovery_controller(c) || c->band == OFPROTO_IN_BAND; } -void -ofproto_set_max_backoff(struct ofproto *p, int max_backoff) +/* Creates a new controller in 'ofproto'. Some of the settings are initially + * drawn from 'c', but update_controller() needs to be called later to finish + * the new ofconn's configuration. */ +static void +add_controller(struct ofproto *ofproto, const struct ofproto_controller *c) { - rconn_set_max_backoff(p->controller->rconn, max_backoff); + struct discovery *discovery; + struct ofconn *ofconn; + + if (is_discovery_controller(c)) { + int error = discovery_create(c->accept_re, c->update_resolv_conf, + ofproto->dpif, ofproto->switch_status, + &discovery); + if (error) { + return; + } + } else { + discovery = NULL; + } + + ofconn = ofconn_create(ofproto, rconn_create(5, 8), OFCONN_CONTROLLER); + ofconn->pktbuf = pktbuf_create(); + ofconn->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN; + if (discovery) { + ofconn->discovery = discovery; + } else { + rconn_connect(ofconn->rconn, c->target); + } + hmap_insert(&ofproto->controllers, &ofconn->hmap_node, + hash_string(c->target, 0)); } -void -ofproto_set_desc(struct ofproto *p, - const char *manufacturer, const char *hardware, - const char *software, const char *serial) +/* Reconfigures 'ofconn' to match 'c'. This function cannot update an ofconn's + * target or turn discovery on or off (these are done by creating new ofconns + * and deleting old ones), but it can update the rest of an ofconn's + * settings. */ +static void +update_controller(struct ofconn *ofconn, const struct ofproto_controller *c) { - if (manufacturer) { - free(p->manufacturer); - p->manufacturer = xstrdup(manufacturer); - } - if (hardware) { - free(p->hardware); - p->hardware = xstrdup(hardware); + struct ofproto *ofproto = ofconn->ofproto; + int probe_interval; + int i; + + ofconn->band = (is_in_band_controller(c) + ? OFPROTO_IN_BAND : OFPROTO_OUT_OF_BAND); + + rconn_set_max_backoff(ofconn->rconn, c->max_backoff); + + probe_interval = c->probe_interval ? MAX(c->probe_interval, 5) : 0; + rconn_set_probe_interval(ofconn->rconn, probe_interval); + + if (ofconn->discovery) { + discovery_set_update_resolv_conf(ofconn->discovery, + c->update_resolv_conf); + discovery_set_accept_controller_re(ofconn->discovery, c->accept_re); } - if (software) { - free(p->software); - p->software = xstrdup(software); + + for (i = 0; i < N_SCHEDULERS; i++) { + struct pinsched **s = &ofconn->schedulers[i]; + + if (c->rate_limit > 0) { + if (!*s) { + *s = pinsched_create(c->rate_limit, c->burst_limit, + ofproto->switch_status); + } else { + pinsched_set_limits(*s, c->rate_limit, c->burst_limit); + } + } else { + pinsched_destroy(*s); + *s = NULL; + } } - if (serial) { - free(p->serial); - p->serial = xstrdup(serial); +} + +static const char * +ofconn_get_target(const struct ofconn *ofconn) +{ + return ofconn->discovery ? "discover" : rconn_get_name(ofconn->rconn); +} + +static struct ofconn * +find_controller_by_target(struct ofproto *ofproto, const char *target) +{ + struct ofconn *ofconn; + + HMAP_FOR_EACH_WITH_HASH (ofconn, struct ofconn, hmap_node, + hash_string(target, 0), &ofproto->controllers) { + if (!strcmp(ofconn_get_target(ofconn), target)) { + return ofconn; + } } + return NULL; } -int -ofproto_set_in_band(struct ofproto *p, bool in_band) +static void +update_in_band_remotes(struct ofproto *ofproto) { - if (in_band != (p->in_band != NULL)) { - if (in_band) { - return in_band_create(p, p->dpif, p->switch_status, - p->controller->rconn, &p->in_band); - } else { - ofproto_set_discovery(p, false, NULL, true); - in_band_destroy(p->in_band); - p->in_band = NULL; + const struct ofconn *ofconn; + struct sockaddr_in *addrs; + size_t max_addrs, n_addrs; + bool discovery; + size_t i; + + /* Allocate enough memory for as many remotes as we could possibly have. */ + max_addrs = ofproto->n_extra_remotes + hmap_count(&ofproto->controllers); + addrs = xmalloc(max_addrs * sizeof *addrs); + n_addrs = 0; + + /* Add all the remotes. */ + discovery = false; + HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &ofproto->controllers) { + struct sockaddr_in *sin = &addrs[n_addrs]; + + sin->sin_addr.s_addr = rconn_get_remote_ip(ofconn->rconn); + if (sin->sin_addr.s_addr) { + sin->sin_port = rconn_get_remote_port(ofconn->rconn); + n_addrs++; + } + if (ofconn->discovery) { + discovery = true; } - rconn_reconnect(p->controller->rconn); } - return 0; + for (i = 0; i < ofproto->n_extra_remotes; i++) { + addrs[n_addrs++] = ofproto->extra_in_band_remotes[i]; + } + + /* Create or update or destroy in-band. + * + * Ordinarily we only enable in-band if there's at least one remote + * address, but discovery needs the in-band rules for DHCP to be installed + * even before we know any remote addresses. */ + if (n_addrs || discovery) { + if (!ofproto->in_band) { + in_band_create(ofproto, ofproto->dpif, ofproto->switch_status, + &ofproto->in_band); + } + in_band_set_remotes(ofproto->in_band, addrs, n_addrs); + ofproto->next_in_band_update = time_msec() + 1000; + } else { + in_band_destroy(ofproto->in_band); + ofproto->in_band = NULL; + } + + /* Clean up. */ + free(addrs); } -int -ofproto_set_discovery(struct ofproto *p, bool discovery, - const char *re, bool update_resolv_conf) -{ - if (discovery != (p->discovery != NULL)) { - if (discovery) { - int error = ofproto_set_in_band(p, true); - if (error) { - return error; +void +ofproto_set_controllers(struct ofproto *p, + const struct ofproto_controller *controllers, + size_t n_controllers) +{ + struct shash new_controllers; + enum ofproto_fail_mode fail_mode; + struct ofconn *ofconn, *next; + bool ss_exists; + size_t i; + + shash_init(&new_controllers); + for (i = 0; i < n_controllers; i++) { + const struct ofproto_controller *c = &controllers[i]; + + shash_add_once(&new_controllers, c->target, &controllers[i]); + if (!find_controller_by_target(p, c->target)) { + add_controller(p, c); + } + } + + fail_mode = OFPROTO_FAIL_STANDALONE; + ss_exists = false; + HMAP_FOR_EACH_SAFE (ofconn, next, struct ofconn, hmap_node, + &p->controllers) { + struct ofproto_controller *c; + + c = shash_find_data(&new_controllers, ofconn_get_target(ofconn)); + if (!c) { + ofconn_destroy(ofconn); + } else { + update_controller(ofconn, c); + if (ofconn->ss) { + ss_exists = true; } - error = discovery_create(re, update_resolv_conf, - p->dpif, p->switch_status, - &p->discovery); - if (error) { - return error; + if (c->fail == OFPROTO_FAIL_SECURE) { + fail_mode = OFPROTO_FAIL_SECURE; } - } else { - discovery_destroy(p->discovery); - p->discovery = NULL; } - rconn_disconnect(p->controller->rconn); - } else if (discovery) { - discovery_set_update_resolv_conf(p->discovery, update_resolv_conf); - return discovery_set_accept_controller_re(p->discovery, re); } - return 0; + shash_destroy(&new_controllers); + + update_in_band_remotes(p); + + if (!hmap_is_empty(&p->controllers) + && fail_mode == OFPROTO_FAIL_STANDALONE) { + struct rconn **rconns; + size_t n; + + if (!p->fail_open) { + p->fail_open = fail_open_create(p, p->switch_status); + } + + n = 0; + rconns = xmalloc(hmap_count(&p->controllers) * sizeof *rconns); + HMAP_FOR_EACH (ofconn, struct ofconn, hmap_node, &p->controllers) { + rconns[n++] = ofconn->rconn; + } + + fail_open_set_controllers(p->fail_open, rconns, n); + /* p->fail_open takes ownership of 'rconns'. */ + } else { + fail_open_destroy(p->fail_open); + p->fail_open = NULL; + } + + if (!hmap_is_empty(&p->controllers) && !ss_exists) { + ofconn = CONTAINER_OF(hmap_first(&p->controllers), + struct ofconn, hmap_node); + ofconn->ss = switch_status_register(p->switch_status, "remote", + rconn_status_cb, ofconn->rconn); + } } -int -ofproto_set_controller(struct ofproto *ofproto, const char *controller) +static bool +any_extras_changed(const struct ofproto *ofproto, + const struct sockaddr_in *extras, size_t n) { - if (ofproto->discovery) { - return EINVAL; - } else if (controller) { - if (strcmp(rconn_get_name(ofproto->controller->rconn), controller)) { - return rconn_connect(ofproto->controller->rconn, controller); - } else { - return 0; + size_t i; + + if (n != ofproto->n_extra_remotes) { + return true; + } + + for (i = 0; i < n; i++) { + const struct sockaddr_in *old = &ofproto->extra_in_band_remotes[i]; + const struct sockaddr_in *new = &extras[i]; + + if (old->sin_addr.s_addr != new->sin_addr.s_addr || + old->sin_port != new->sin_port) { + return true; } - } else { - rconn_disconnect(ofproto->controller->rconn); - return 0; + } + + return false; +} + +/* Sets the 'n' TCP port addresses in 'extras' as ones to which 'ofproto''s + * in-band control should guarantee access, in the same way that in-band + * control guarantees access to OpenFlow controllers. */ +void +ofproto_set_extra_in_band_remotes(struct ofproto *ofproto, + const struct sockaddr_in *extras, size_t n) +{ + if (!any_extras_changed(ofproto, extras, n)) { + return; + } + + free(ofproto->extra_in_band_remotes); + ofproto->n_extra_remotes = n; + ofproto->extra_in_band_remotes = xmemdup(extras, n * sizeof *extras); + + update_in_band_remotes(ofproto); +} + +void +ofproto_set_desc(struct ofproto *p, + const char *mfr_desc, const char *hw_desc, + const char *sw_desc, const char *serial_desc, + const char *dp_desc) +{ + struct ofp_desc_stats *ods; + + if (mfr_desc) { + if (strlen(mfr_desc) >= sizeof ods->mfr_desc) { + VLOG_WARN("truncating mfr_desc, must be less than %zu characters", + sizeof ods->mfr_desc); + } + free(p->mfr_desc); + p->mfr_desc = xstrdup(mfr_desc); + } + if (hw_desc) { + if (strlen(hw_desc) >= sizeof ods->hw_desc) { + VLOG_WARN("truncating hw_desc, must be less than %zu characters", + sizeof ods->hw_desc); + } + free(p->hw_desc); + p->hw_desc = xstrdup(hw_desc); + } + if (sw_desc) { + if (strlen(sw_desc) >= sizeof ods->sw_desc) { + VLOG_WARN("truncating sw_desc, must be less than %zu characters", + sizeof ods->sw_desc); + } + free(p->sw_desc); + p->sw_desc = xstrdup(sw_desc); + } + if (serial_desc) { + if (strlen(serial_desc) >= sizeof ods->serial_num) { + VLOG_WARN("truncating serial_desc, must be less than %zu " + "characters", + sizeof ods->serial_num); + } + free(p->serial_desc); + p->serial_desc = xstrdup(serial_desc); + } + if (dp_desc) { + if (strlen(dp_desc) >= sizeof ods->dp_desc) { + VLOG_WARN("truncating dp_desc, must be less than %zu characters", + sizeof ods->dp_desc); + } + free(p->dp_desc); + p->dp_desc = xstrdup(dp_desc); } } @@ -532,16 +796,14 @@ ofproto_set_snoops(struct ofproto *ofproto, const struct svec *snoops) } int -ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors, - uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface) +ofproto_set_netflow(struct ofproto *ofproto, + const struct netflow_options *nf_options) { - if (collectors && collectors->n) { + if (nf_options && nf_options->collectors.n) { if (!ofproto->netflow) { ofproto->netflow = netflow_create(); } - netflow_set_engine(ofproto->netflow, engine_type, engine_id, - add_id_to_iface); - return netflow_set_collectors(ofproto->netflow, collectors); + return netflow_set_options(ofproto->netflow, nf_options); } else { netflow_destroy(ofproto->netflow); ofproto->netflow = NULL; @@ -550,50 +812,31 @@ ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors, } void -ofproto_set_failure(struct ofproto *ofproto, bool fail_open) -{ - if (fail_open) { - struct rconn *rconn = ofproto->controller->rconn; - int trigger_duration = rconn_get_probe_interval(rconn) * 3; - if (!ofproto->fail_open) { - ofproto->fail_open = fail_open_create(ofproto, trigger_duration, - ofproto->switch_status, - rconn); - } else { - fail_open_set_trigger_duration(ofproto->fail_open, - trigger_duration); - } - } else { - fail_open_destroy(ofproto->fail_open); - ofproto->fail_open = NULL; - } -} - -void -ofproto_set_rate_limit(struct ofproto *ofproto, - int rate_limit, int burst_limit) -{ - if (rate_limit > 0) { - if (!ofproto->miss_sched) { - ofproto->miss_sched = pinsched_create(rate_limit, burst_limit, - ofproto->switch_status); - ofproto->action_sched = pinsched_create(rate_limit, burst_limit, - NULL); - } else { - pinsched_set_limits(ofproto->miss_sched, rate_limit, burst_limit); - pinsched_set_limits(ofproto->action_sched, - rate_limit, burst_limit); +ofproto_set_sflow(struct ofproto *ofproto, + const struct ofproto_sflow_options *oso) +{ + struct ofproto_sflow *os = ofproto->sflow; + if (oso) { + if (!os) { + struct ofport *ofport; + unsigned int odp_port; + + os = ofproto->sflow = ofproto_sflow_create(ofproto->dpif); + refresh_port_groups(ofproto); + PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, odp_port) { + ofproto_sflow_add_port(os, odp_port, + netdev_get_name(ofport->netdev)); + } } + ofproto_sflow_set_options(os, oso); } else { - pinsched_destroy(ofproto->miss_sched); - ofproto->miss_sched = NULL; - pinsched_destroy(ofproto->action_sched); - ofproto->action_sched = NULL; + ofproto_sflow_destroy(os); + ofproto->sflow = NULL; } } int -ofproto_set_stp(struct ofproto *ofproto UNUSED, bool enable_stp) +ofproto_set_stp(struct ofproto *ofproto OVS_UNUSED, bool enable_stp) { /* XXX */ if (enable_stp) { @@ -604,64 +847,16 @@ ofproto_set_stp(struct ofproto *ofproto UNUSED, bool enable_stp) } } -int -ofproto_set_remote_execution(struct ofproto *ofproto, const char *command_acl, - const char *command_dir) -{ - if (command_acl) { - if (!ofproto->executer) { - return executer_create(command_acl, command_dir, - &ofproto->executer); - } else { - executer_set_acl(ofproto->executer, command_acl, command_dir); - } - } else { - executer_destroy(ofproto->executer); - ofproto->executer = NULL; - } - return 0; -} - uint64_t ofproto_get_datapath_id(const struct ofproto *ofproto) { return ofproto->datapath_id; } -uint64_t -ofproto_get_mgmt_id(const struct ofproto *ofproto) -{ - return ofproto->mgmt_id; -} - -int -ofproto_get_probe_interval(const struct ofproto *ofproto) -{ - return rconn_get_probe_interval(ofproto->controller->rconn); -} - -int -ofproto_get_max_backoff(const struct ofproto *ofproto) -{ - return rconn_get_max_backoff(ofproto->controller->rconn); -} - -bool -ofproto_get_in_band(const struct ofproto *ofproto) -{ - return ofproto->in_band != NULL; -} - bool -ofproto_get_discovery(const struct ofproto *ofproto) -{ - return ofproto->discovery != NULL; -} - -const char * -ofproto_get_controller(const struct ofproto *ofproto) +ofproto_has_controller(const struct ofproto *ofproto) { - return rconn_get_name(ofproto->controller->rconn); + return !hmap_is_empty(&ofproto->controllers); } void @@ -696,13 +891,22 @@ ofproto_destroy(struct ofproto *p) return; } + /* Destroy fail-open and in-band early, since they touch the classifier. */ + fail_open_destroy(p->fail_open); + p->fail_open = NULL; + + in_band_destroy(p->in_band); + p->in_band = NULL; + free(p->extra_in_band_remotes); + ofproto_flush_flows(p); classifier_destroy(&p->cls); LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node, &p->all_conns) { - ofconn_destroy(ofconn, p); + ofconn_destroy(ofconn); } + hmap_destroy(&p->controllers); dpif_close(p->dpif); netdev_monitor_destroy(p->netdev_monitor); @@ -712,15 +916,8 @@ ofproto_destroy(struct ofproto *p) shash_destroy(&p->port_by_name); switch_status_destroy(p->switch_status); - in_band_destroy(p->in_band); - discovery_destroy(p->discovery); - fail_open_destroy(p->fail_open); - pinsched_destroy(p->miss_sched); - pinsched_destroy(p->action_sched); - executer_destroy(p->executer); netflow_destroy(p->netflow); - - switch_status_unregister(p->ss_cat); + ofproto_sflow_destroy(p->sflow); for (i = 0; i < p->n_listeners; i++) { pvconn_close(p->listeners[i]); @@ -734,6 +931,14 @@ ofproto_destroy(struct ofproto *p) mac_learning_destroy(p->ml); + free(p->mfr_desc); + free(p->hw_desc); + free(p->sw_desc); + free(p->serial_desc); + free(p->dp_desc); + + port_array_destroy(&p->ports); + free(p); } @@ -758,6 +963,49 @@ process_port_change(struct ofproto *ofproto, int error, char *devname) } } +/* Returns a "preference level" for snooping 'ofconn'. A higher return value + * means that 'ofconn' is more interesting for monitoring than a lower return + * value. */ +static int +snoop_preference(const struct ofconn *ofconn) +{ + switch (ofconn->role) { + case NX_ROLE_MASTER: + return 3; + case NX_ROLE_OTHER: + return 2; + case NX_ROLE_SLAVE: + return 1; + default: + /* Shouldn't happen. */ + return 0; + } +} + +/* One of ofproto's "snoop" pvconns has accepted a new connection on 'vconn'. + * Connects this vconn to a controller. */ +static void +add_snooper(struct ofproto *ofproto, struct vconn *vconn) +{ + struct ofconn *ofconn, *best; + + /* Pick a controller for monitoring. */ + best = NULL; + LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) { + if (ofconn->type == OFCONN_CONTROLLER + && (!best || snoop_preference(ofconn) > snoop_preference(best))) { + best = ofconn; + } + } + + if (best) { + rconn_add_monitor(best->rconn, vconn); + } else { + VLOG_INFO_RL(&rl, "no controller connection to snoop"); + vconn_close(vconn); + } +} + int ofproto_run1(struct ofproto *p) { @@ -766,6 +1014,10 @@ ofproto_run1(struct ofproto *p) int error; int i; + if (shash_is_empty(&p->port_by_name)) { + init_ports(p); + } + for (i = 0; i < 50; i++) { struct ofpbuf *buf; int error; @@ -776,8 +1028,8 @@ ofproto_run1(struct ofproto *p) /* Someone destroyed the datapath behind our back. The caller * better destroy us and give up, because we're just going to * spin from here on out. */ - static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); - VLOG_ERR_RL(&rl, "%s: datapath was destroyed externally", + static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5); + VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally", dpif_name(p->dpif)); return ENODEV; } @@ -796,25 +1048,10 @@ ofproto_run1(struct ofproto *p) } if (p->in_band) { - in_band_run(p->in_band); - } - if (p->discovery) { - char *controller_name; - if (rconn_is_connectivity_questionable(p->controller->rconn)) { - discovery_question_connectivity(p->discovery); - } - if (discovery_run(p->discovery, &controller_name)) { - if (controller_name) { - rconn_connect(p->controller->rconn, controller_name); - } else { - rconn_disconnect(p->controller->rconn); - } + if (time_msec() >= p->next_in_band_update) { + update_in_band_remotes(p); } - } - pinsched_run(p->miss_sched, send_packet_in_miss, p); - pinsched_run(p->action_sched, send_packet_in_action, p); - if (p->executer) { - executer_run(p->executer); + in_band_run(p->in_band); } LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node, @@ -834,7 +1071,8 @@ ofproto_run1(struct ofproto *p) retval = pvconn_accept(p->listeners[i], OFP_VERSION, &vconn); if (!retval) { - ofconn_create(p, rconn_new_from_vconn("passive", vconn)); + ofconn_create(p, rconn_new_from_vconn("passive", vconn), + OFCONN_TRANSIENT); } else if (retval != EAGAIN) { VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval)); } @@ -846,7 +1084,7 @@ ofproto_run1(struct ofproto *p) retval = pvconn_accept(p->snoops[i], OFP_VERSION, &vconn); if (!retval) { - rconn_add_monitor(p->controller->rconn, vconn); + add_snooper(p, vconn); } else if (retval != EAGAIN) { VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval)); } @@ -871,6 +1109,9 @@ ofproto_run1(struct ofproto *p) if (p->netflow) { netflow_run(p->netflow); } + if (p->sflow) { + ofproto_sflow_run(p->sflow); + } return 0; } @@ -914,18 +1155,14 @@ ofproto_wait(struct ofproto *p) ofconn_wait(ofconn); } if (p->in_band) { + poll_timer_wait(p->next_in_band_update - time_msec()); in_band_wait(p->in_band); } - if (p->discovery) { - discovery_wait(p->discovery); - } if (p->fail_open) { fail_open_wait(p->fail_open); } - pinsched_wait(p->miss_sched); - pinsched_wait(p->action_sched); - if (p->executer) { - executer_wait(p->executer); + if (p->sflow) { + ofproto_sflow_wait(p->sflow); } if (!tag_set_is_empty(&p->revalidate_set)) { poll_immediate_wake(); @@ -960,7 +1197,7 @@ ofproto_get_revalidate_set(struct ofproto *ofproto) bool ofproto_is_alive(const struct ofproto *p) { - return p->discovery || rconn_is_alive(p->controller->rconn); + return !hmap_is_empty(&p->controllers); } int @@ -972,7 +1209,7 @@ ofproto_send_packet(struct ofproto *p, const flow_t *flow, int error; error = xlate_actions(actions, n_actions, flow, p, packet, &odp_actions, - NULL, NULL); + NULL, NULL, NULL); if (error) { return error; } @@ -991,9 +1228,10 @@ ofproto_add_flow(struct ofproto *p, int idle_timeout) { struct rule *rule; - rule = rule_create(NULL, actions, n_actions, - idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, 0); - cls_rule_from_flow(&rule->cr, flow, wildcards, priority); + rule = rule_create(p, NULL, actions, n_actions, + idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, + 0, 0, false); + cls_rule_from_flow(flow, wildcards, priority, &rule->cr); rule_insert(p, rule, NULL, 0); } @@ -1067,7 +1305,7 @@ reinit_ports(struct ofproto *p) svec_destroy(&devnames); } -static void +static size_t refresh_port_group(struct ofproto *p, unsigned int group) { uint16_t *ports; @@ -1086,25 +1324,36 @@ refresh_port_group(struct ofproto *p, unsigned int group) } dpif_port_group_set(p->dpif, group, ports, n_ports); free(ports); + + return n_ports; } static void refresh_port_groups(struct ofproto *p) { - refresh_port_group(p, DP_GROUP_FLOOD); - refresh_port_group(p, DP_GROUP_ALL); + size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD); + size_t n_all = refresh_port_group(p, DP_GROUP_ALL); + if (p->sflow) { + ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all); + } } static struct ofport * make_ofport(const struct odp_port *odp_port) { + struct netdev_options netdev_options; enum netdev_flags flags; struct ofport *ofport; struct netdev *netdev; bool carrier; int error; - error = netdev_open(odp_port->devname, NETDEV_ETH_TYPE_NONE, &netdev); + memset(&netdev_options, 0, sizeof netdev_options); + netdev_options.name = odp_port->devname; + netdev_options.ethertype = NETDEV_ETH_TYPE_NONE; + netdev_options.may_open = true; + + error = netdev_open(&netdev_options, &netdev); if (error) { VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s " "cannot be opened (%s)", @@ -1177,6 +1426,10 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, struct ofp_port_status *ops; struct ofpbuf *b; + if (!ofconn_receives_async_msgs(ofconn)) { + continue; + } + ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b); ops->reason = reason; ops->desc = ofport->opp; @@ -1191,19 +1444,29 @@ send_port_status(struct ofproto *p, const struct ofport *ofport, static void ofport_install(struct ofproto *p, struct ofport *ofport) { + uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no); + const char *netdev_name = (const char *) ofport->opp.name; + netdev_monitor_add(p->netdev_monitor, ofport->netdev); - port_array_set(&p->ports, ofp_port_to_odp_port(ofport->opp.port_no), - ofport); - shash_add(&p->port_by_name, (char *) ofport->opp.name, ofport); + port_array_set(&p->ports, odp_port, ofport); + shash_add(&p->port_by_name, netdev_name, ofport); + if (p->sflow) { + ofproto_sflow_add_port(p->sflow, odp_port, netdev_name); + } } static void ofport_remove(struct ofproto *p, struct ofport *ofport) { + uint16_t odp_port = ofp_port_to_odp_port(ofport->opp.port_no); + netdev_monitor_remove(p->netdev_monitor, ofport->netdev); - port_array_set(&p->ports, ofp_port_to_odp_port(ofport->opp.port_no), NULL); + port_array_set(&p->ports, odp_port, NULL); shash_delete(&p->port_by_name, shash_find(&p->port_by_name, (char *) ofport->opp.name)); + if (p->sflow) { + ofproto_sflow_del_port(p->sflow, odp_port); + } } static void @@ -1315,27 +1578,31 @@ init_ports(struct ofproto *p) } static struct ofconn * -ofconn_create(struct ofproto *p, struct rconn *rconn) +ofconn_create(struct ofproto *p, struct rconn *rconn, enum ofconn_type type) { - struct ofconn *ofconn = xmalloc(sizeof *ofconn); + struct ofconn *ofconn = xzalloc(sizeof *ofconn); + ofconn->ofproto = p; list_push_back(&p->all_conns, &ofconn->node); ofconn->rconn = rconn; + ofconn->type = type; + ofconn->role = NX_ROLE_OTHER; + ofconn->packet_in_counter = rconn_packet_counter_create (); ofconn->pktbuf = NULL; - ofconn->send_flow_exp = false; ofconn->miss_send_len = 0; - ofconn->packet_in_counter = rconn_packet_counter_create (); ofconn->reply_counter = rconn_packet_counter_create (); return ofconn; } static void -ofconn_destroy(struct ofconn *ofconn, struct ofproto *p) +ofconn_destroy(struct ofconn *ofconn) { - if (p->executer) { - executer_rconn_closing(p->executer, ofconn->rconn); + if (ofconn->type == OFCONN_CONTROLLER) { + hmap_remove(&ofconn->ofproto->controllers, &ofconn->hmap_node); } + discovery_destroy(ofconn->discovery); list_remove(&ofconn->node); + switch_status_unregister(ofconn->ss); rconn_destroy(ofconn->rconn); rconn_packet_counter_destroy(ofconn->packet_in_counter); rconn_packet_counter_destroy(ofconn->reply_counter); @@ -1347,6 +1614,25 @@ static void ofconn_run(struct ofconn *ofconn, struct ofproto *p) { int iteration; + size_t i; + + if (ofconn->discovery) { + char *controller_name; + if (rconn_is_connectivity_questionable(ofconn->rconn)) { + discovery_question_connectivity(ofconn->discovery); + } + if (discovery_run(ofconn->discovery, &controller_name)) { + if (controller_name) { + rconn_connect(ofconn->rconn, controller_name); + } else { + rconn_disconnect(ofconn->rconn); + } + } + } + + for (i = 0; i < N_SCHEDULERS; i++) { + pinsched_run(ofconn->schedulers[i], do_send_packet_in, ofconn); + } rconn_run(ofconn->rconn); @@ -1366,14 +1652,22 @@ ofconn_run(struct ofconn *ofconn, struct ofproto *p) } } - if (ofconn != p->controller && !rconn_is_alive(ofconn->rconn)) { - ofconn_destroy(ofconn, p); + if (!ofconn->discovery && !rconn_is_alive(ofconn->rconn)) { + ofconn_destroy(ofconn); } } static void ofconn_wait(struct ofconn *ofconn) { + int i; + + if (ofconn->discovery) { + discovery_wait(ofconn->discovery); + } + for (i = 0; i < N_SCHEDULERS; i++) { + pinsched_wait(ofconn->schedulers[i]); + } rconn_run_wait(ofconn->rconn); if (rconn_packet_counter_read (ofconn->reply_counter) < OFCONN_REPLY_MAX) { rconn_recv_wait(ofconn->rconn); @@ -1381,18 +1675,37 @@ ofconn_wait(struct ofconn *ofconn) COVERAGE_INC(ofproto_ofconn_stuck); } } - -/* Caller is responsible for initializing the 'cr' member of the returned + +/* Returns true if 'ofconn' should receive asynchronous messages. */ +static bool +ofconn_receives_async_msgs(const struct ofconn *ofconn) +{ + if (ofconn->type == OFCONN_CONTROLLER) { + /* Ordinary controllers always get asynchronous messages unless they + * have configured themselves as "slaves". */ + return ofconn->role != NX_ROLE_SLAVE; + } else { + /* Transient connections don't get asynchronous messages unless they + * have explicitly asked for them by setting a nonzero miss send + * length. */ + return ofconn->miss_send_len > 0; + } +} + +/* Caller is responsible for initializing the 'cr' member of the returned * rule. */ static struct rule * -rule_create(struct rule *super, +rule_create(struct ofproto *ofproto, struct rule *super, const union ofp_action *actions, size_t n_actions, - uint16_t idle_timeout, uint16_t hard_timeout) + uint16_t idle_timeout, uint16_t hard_timeout, + uint64_t flow_cookie, bool send_flow_removed) { - struct rule *rule = xcalloc(1, sizeof *rule); + struct rule *rule = xzalloc(sizeof *rule); rule->idle_timeout = idle_timeout; rule->hard_timeout = hard_timeout; + rule->flow_cookie = flow_cookie; rule->used = rule->created = time_msec(); + rule->send_flow_removed = send_flow_removed; rule->super = super; if (super) { list_push_back(&super->list, &rule->list); @@ -1401,6 +1714,9 @@ rule_create(struct rule *super, } rule->n_actions = n_actions; rule->actions = xmemdup(actions, n_actions * sizeof *actions); + netflow_flow_clear(&rule->nf_flow); + netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created); + return rule; } @@ -1489,7 +1805,7 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) { struct rule *super = rule->super ? rule->super : rule; if (xlate_actions(super->actions, super->n_actions, flow, ofproto, - packet, &a, NULL, 0)) { + packet, &a, NULL, 0, NULL)) { return; } actions = a.actions; @@ -1504,8 +1820,9 @@ rule_execute(struct ofproto *ofproto, struct rule *rule, actions, n_actions, packet)) { struct odp_flow_stats stats; flow_extract_stats(flow, packet, &stats); - update_stats(rule, &stats); + update_stats(ofproto, rule, &stats); rule->used = time_msec(); + netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used); } } @@ -1524,7 +1841,7 @@ rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet, /* Send the packet and credit it to the rule. */ if (packet) { flow_t flow; - flow_extract(packet, in_port, &flow); + flow_extract(packet, 0, in_port, &flow); rule_execute(p, rule, packet, &flow); } @@ -1547,12 +1864,12 @@ static struct rule * rule_create_subrule(struct ofproto *ofproto, struct rule *rule, const flow_t *flow) { - struct rule *subrule = rule_create(rule, NULL, 0, - rule->idle_timeout, rule->hard_timeout); + struct rule *subrule = rule_create(ofproto, rule, NULL, 0, + rule->idle_timeout, rule->hard_timeout, + 0, false); COVERAGE_INC(ofproto_subrule_create); - cls_rule_from_flow(&subrule->cr, flow, 0, - (rule->cr.priority <= UINT16_MAX ? UINT16_MAX - : rule->cr.priority)); + cls_rule_from_flow(flow, 0, (rule->cr.priority <= UINT16_MAX ? UINT16_MAX + : rule->cr.priority), &subrule->cr); classifier_insert_exact(&ofproto->cls, &subrule->cr); return subrule; @@ -1585,7 +1902,8 @@ rule_make_actions(struct ofproto *p, struct rule *rule, super = rule->super ? rule->super : rule; rule->tags = 0; xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p, - packet, &a, &rule->tags, &rule->may_install); + packet, &a, &rule->tags, &rule->may_install, + &rule->nf_flow.output_iface); actions_len = a.n_actions * sizeof *a.actions; if (rule->n_odp_actions != a.n_actions @@ -1608,6 +1926,7 @@ do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags, put->flow.key = rule->cr.flow; put->flow.actions = rule->odp_actions; put->flow.n_actions = rule->n_odp_actions; + put->flow.flags = 0; put->flags = flags; return dpif_flow_put(ofproto->dpif, put); } @@ -1624,7 +1943,7 @@ rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule) &put)) { rule->installed = true; if (displaced_rule) { - update_stats(rule, &put.flow.stats); + update_stats(p, displaced_rule, &put.flow.stats); rule_post_uninstall(p, displaced_rule); } } @@ -1648,14 +1967,27 @@ rule_reinstall(struct ofproto *ofproto, struct rule *rule) static void rule_update_actions(struct ofproto *ofproto, struct rule *rule) { - bool actions_changed = rule_make_actions(ofproto, rule, NULL); + bool actions_changed; + uint16_t new_out_iface, old_out_iface; + + old_out_iface = rule->nf_flow.output_iface; + actions_changed = rule_make_actions(ofproto, rule, NULL); + if (rule->may_install) { if (rule->installed) { if (actions_changed) { - /* XXX should really do rule_post_uninstall() for the *old* set - * of actions, and distinguish the old stats from the new. */ struct odp_flow_put put; - do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY, &put); + do_put_flow(ofproto, rule, ODPPF_CREATE | ODPPF_MODIFY + | ODPPF_ZERO_STATS, &put); + update_stats(ofproto, rule, &put.flow.stats); + + /* Temporarily set the old output iface so that NetFlow + * messages have the correct output interface for the old + * stats. */ + new_out_iface = rule->nf_flow.output_iface; + rule->nf_flow.output_iface = old_out_iface; + rule_post_uninstall(ofproto, rule); + rule->nf_flow.output_iface = new_out_iface; } } else { rule_install(ofproto, rule, NULL); @@ -1690,8 +2022,9 @@ rule_uninstall(struct ofproto *p, struct rule *rule) odp_flow.key = rule->cr.flow; odp_flow.actions = NULL; odp_flow.n_actions = 0; + odp_flow.flags = 0; if (!dpif_flow_del(p->dpif, &odp_flow)) { - update_stats(rule, &odp_flow.stats); + update_stats(p, rule, &odp_flow.stats); } rule->installed = false; @@ -1699,39 +2032,51 @@ rule_uninstall(struct ofproto *p, struct rule *rule) } } +static bool +is_controller_rule(struct rule *rule) +{ + /* If the only action is send to the controller then don't report + * NetFlow expiration messages since it is just part of the control + * logic for the network and not real traffic. */ + + if (rule && rule->super) { + struct rule *super = rule->super; + + return super->n_actions == 1 && + super->actions[0].type == htons(OFPAT_OUTPUT) && + super->actions[0].output.port == htons(OFPP_CONTROLLER); + } + + return false; +} + static void rule_post_uninstall(struct ofproto *ofproto, struct rule *rule) { struct rule *super = rule->super; rule_account(ofproto, rule, 0); - if (ofproto->netflow && rule->byte_count) { + + if (ofproto->netflow && !is_controller_rule(rule)) { struct ofexpired expired; expired.flow = rule->cr.flow; expired.packet_count = rule->packet_count; expired.byte_count = rule->byte_count; expired.used = rule->used; - expired.created = rule->created; - expired.tcp_flags = rule->tcp_flags; - expired.ip_tos = rule->ip_tos; - netflow_expire(ofproto->netflow, &expired); + netflow_expire(ofproto->netflow, &rule->nf_flow, &expired); } if (super) { super->packet_count += rule->packet_count; super->byte_count += rule->byte_count; - super->tcp_flags |= rule->tcp_flags; - if (rule->packet_count) { - super->ip_tos = rule->ip_tos; - } - } - /* Reset counters to prevent double counting if the rule ever gets - * reinstalled. */ - rule->packet_count = 0; - rule->byte_count = 0; - rule->accounted_bytes = 0; - rule->tcp_flags = 0; - rule->ip_tos = 0; + /* Reset counters to prevent double counting if the rule ever gets + * reinstalled. */ + rule->packet_count = 0; + rule->byte_count = 0; + rule->accounted_bytes = 0; + + netflow_flow_clear(&rule->nf_flow); + } } static void @@ -1808,7 +2153,7 @@ handle_features_request(struct ofproto *p, struct ofconn *ofconn, osf->n_buffers = htonl(pktbuf_capacity()); osf->n_tables = 2; osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS | - OFPC_PORT_STATS | OFPC_MULTI_PHY_TX); + OFPC_PORT_STATS | OFPC_ARP_MATCH_IP); osf->actions = htonl((1u << OFPAT_OUTPUT) | (1u << OFPAT_SET_VLAN_VID) | (1u << OFPAT_SET_VLAN_PCP) | @@ -1817,6 +2162,7 @@ handle_features_request(struct ofproto *p, struct ofconn *ofconn, (1u << OFPAT_SET_DL_DST) | (1u << OFPAT_SET_NW_SRC) | (1u << OFPAT_SET_NW_DST) | + (1u << OFPAT_SET_NW_TOS) | (1u << OFPAT_SET_TP_SRC) | (1u << OFPAT_SET_TP_DST)); @@ -1840,9 +2186,6 @@ handle_get_config_request(struct ofproto *p, struct ofconn *ofconn, /* Figure out flags. */ dpif_get_drop_frags(p->dpif, &drop_frags); flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL; - if (ofconn->send_flow_exp) { - flags |= OFPC_SEND_FLOW_EXP; - } /* Send reply. */ osc = make_openflow_xid(sizeof *osc, OFPT_GET_CONFIG_REPLY, oh->xid, &buf); @@ -1866,9 +2209,7 @@ handle_set_config(struct ofproto *p, struct ofconn *ofconn, } flags = ntohs(osc->flags); - ofconn->send_flow_exp = (flags & OFPC_SEND_FLOW_EXP) != 0; - - if (ofconn == p->controller) { + if (ofconn->type == OFCONN_CONTROLLER && ofconn->role != NX_ROLE_SLAVE) { switch (flags & OFPC_FRAG_MASK) { case OFPC_FRAG_NORMAL: dpif_set_drop_frags(p->dpif, false); @@ -1883,23 +2224,20 @@ handle_set_config(struct ofproto *p, struct ofconn *ofconn, } } - if ((ntohs(osc->miss_send_len) != 0) != (ofconn->miss_send_len != 0)) { - if (ntohs(osc->miss_send_len) != 0) { - ofconn->pktbuf = pktbuf_create(); - } else { - pktbuf_destroy(ofconn->pktbuf); - } - } - ofconn->miss_send_len = ntohs(osc->miss_send_len); return 0; } static void -add_output_group_action(struct odp_actions *actions, uint16_t group) +add_output_group_action(struct odp_actions *actions, uint16_t group, + uint16_t *nf_output_iface) { odp_actions_add(actions, ODPAT_OUTPUT_GROUP)->output_group.group = group; + + if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) { + *nf_output_iface = NF_OUT_FLOOD; + } } static void @@ -1907,12 +2245,12 @@ add_controller_action(struct odp_actions *actions, const struct ofp_action_output *oao) { union odp_action *a = odp_actions_add(actions, ODPAT_CONTROLLER); - a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX; + a->controller.arg = ntohs(oao->max_len); } struct action_xlate_ctx { /* Input. */ - const flow_t *flow; /* Flow to which these actions correspond. */ + flow_t flow; /* Flow to which these actions correspond. */ int recurse; /* Recursion level, via xlate_table_action. */ struct ofproto *ofproto; const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a @@ -1922,8 +2260,9 @@ struct action_xlate_ctx { /* Output. */ struct odp_actions *out; /* Datapath actions. */ tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */ - bool may_setup_flow; /* True ordinarily; false if the actions must + bool may_set_up_flow; /* True ordinarily; false if the actions must * be reassessed for every packet. */ + uint16_t nf_output_iface; /* Output interface index for NetFlow. */ }; static void do_xlate_actions(const union ofp_action *in, size_t n_in, @@ -1948,6 +2287,7 @@ add_output_action(struct action_xlate_ctx *ctx, uint16_t port) } odp_actions_add(ctx->out, ODPAT_OUTPUT)->output.port = port; + ctx->nf_output_iface = port; } static struct rule * @@ -1973,13 +2313,17 @@ static void xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port) { if (!ctx->recurse) { + uint16_t old_in_port; struct rule *rule; - flow_t flow; - flow = *ctx->flow; - flow.in_port = in_port; + /* Look up a flow with 'in_port' as the input port. Then restore the + * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will + * have surprising behavior). */ + old_in_port = ctx->flow.in_port; + ctx->flow.in_port = in_port; + rule = lookup_valid_rule(ctx->ofproto, &ctx->flow); + ctx->flow.in_port = old_in_port; - rule = lookup_valid_rule(ctx->ofproto, &flow); if (rule) { if (rule->super) { rule = rule->super; @@ -1997,27 +2341,32 @@ xlate_output_action(struct action_xlate_ctx *ctx, const struct ofp_action_output *oao) { uint16_t odp_port; + uint16_t prev_nf_output_iface = ctx->nf_output_iface; + + ctx->nf_output_iface = NF_OUT_DROP; switch (ntohs(oao->port)) { case OFPP_IN_PORT: - add_output_action(ctx, ctx->flow->in_port); + add_output_action(ctx, ctx->flow.in_port); break; case OFPP_TABLE: - xlate_table_action(ctx, ctx->flow->in_port); + xlate_table_action(ctx, ctx->flow.in_port); break; case OFPP_NORMAL: - if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet, + if (!ctx->ofproto->ofhooks->normal_cb(&ctx->flow, ctx->packet, ctx->out, ctx->tags, + &ctx->nf_output_iface, ctx->ofproto->aux)) { COVERAGE_INC(ofproto_uninstallable); - ctx->may_setup_flow = false; + ctx->may_set_up_flow = false; } break; case OFPP_FLOOD: - add_output_group_action(ctx->out, DP_GROUP_FLOOD); + add_output_group_action(ctx->out, DP_GROUP_FLOOD, + &ctx->nf_output_iface); break; case OFPP_ALL: - add_output_group_action(ctx->out, DP_GROUP_ALL); + add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface); break; case OFPP_CONTROLLER: add_controller_action(ctx->out, oao); @@ -2027,11 +2376,20 @@ xlate_output_action(struct action_xlate_ctx *ctx, break; default: odp_port = ofp_port_to_odp_port(ntohs(oao->port)); - if (odp_port != ctx->flow->in_port) { + if (odp_port != ctx->flow.in_port) { add_output_action(ctx, odp_port); } break; } + + if (prev_nf_output_iface == NF_OUT_FLOOD) { + ctx->nf_output_iface = NF_OUT_FLOOD; + } else if (ctx->nf_output_iface == NF_OUT_DROP) { + ctx->nf_output_iface = prev_nf_output_iface; + } else if (prev_nf_output_iface != NF_OUT_DROP && + ctx->nf_output_iface != NF_OUT_FLOOD) { + ctx->nf_output_iface = NF_OUT_MULTI; + } } static void @@ -2039,6 +2397,8 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, const struct nx_action_header *nah) { const struct nx_action_resubmit *nar; + const struct nx_action_set_tunnel *nast; + union odp_action *oa; int subtype = ntohs(nah->subtype); assert(nah->vendor == htonl(NX_VENDOR_ID)); @@ -2048,6 +2408,15 @@ xlate_nicira_action(struct action_xlate_ctx *ctx, xlate_table_action(ctx, ofp_port_to_odp_port(ntohs(nar->in_port))); break; + case NXAST_SET_TUNNEL: + nast = (const struct nx_action_set_tunnel *) nah; + oa = odp_actions_add(ctx->out, ODPAT_SET_TUNNEL); + ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id; + break; + + /* If you add a new action here that modifies flow data, don't forget to + * update the flow key in ctx->flow in the same key. */ + default: VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype); break; @@ -2062,9 +2431,9 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, const union ofp_action *ia; const struct ofport *port; - port = port_array_get(&ctx->ofproto->ports, ctx->flow->in_port); + port = port_array_get(&ctx->ofproto->ports, ctx->flow.in_port); if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) && - port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr) + port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, stp_eth_addr) ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) { /* Drop this flow. */ return; @@ -2081,38 +2450,59 @@ do_xlate_actions(const union ofp_action *in, size_t n_in, case OFPAT_SET_VLAN_VID: oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_VID); - oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid; + ctx->flow.dl_vlan = oa->vlan_vid.vlan_vid = ia->vlan_vid.vlan_vid; break; case OFPAT_SET_VLAN_PCP: oa = odp_actions_add(ctx->out, ODPAT_SET_VLAN_PCP); - oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp; + ctx->flow.dl_vlan_pcp = oa->vlan_pcp.vlan_pcp = ia->vlan_pcp.vlan_pcp; break; case OFPAT_STRIP_VLAN: odp_actions_add(ctx->out, ODPAT_STRIP_VLAN); + ctx->flow.dl_vlan = OFP_VLAN_NONE; + ctx->flow.dl_vlan_pcp = 0; break; case OFPAT_SET_DL_SRC: oa = odp_actions_add(ctx->out, ODPAT_SET_DL_SRC); memcpy(oa->dl_addr.dl_addr, ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + memcpy(ctx->flow.dl_src, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_DL_DST: oa = odp_actions_add(ctx->out, ODPAT_SET_DL_DST); memcpy(oa->dl_addr.dl_addr, ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + memcpy(ctx->flow.dl_dst, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); break; case OFPAT_SET_NW_SRC: oa = odp_actions_add(ctx->out, ODPAT_SET_NW_SRC); - oa->nw_addr.nw_addr = ia->nw_addr.nw_addr; + ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr; + break; + + case OFPAT_SET_NW_DST: + oa = odp_actions_add(ctx->out, ODPAT_SET_NW_DST); + ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr; + break; + + case OFPAT_SET_NW_TOS: + oa = odp_actions_add(ctx->out, ODPAT_SET_NW_TOS); + ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos; break; case OFPAT_SET_TP_SRC: oa = odp_actions_add(ctx->out, ODPAT_SET_TP_SRC); - oa->tp_port.tp_port = ia->tp_port.tp_port; + ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port; + break; + + case OFPAT_SET_TP_DST: + oa = odp_actions_add(ctx->out, ODPAT_SET_TP_DST); + ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port; break; case OFPAT_VENDOR: @@ -2130,29 +2520,34 @@ static int xlate_actions(const union ofp_action *in, size_t n_in, const flow_t *flow, struct ofproto *ofproto, const struct ofpbuf *packet, - struct odp_actions *out, tag_type *tags, bool *may_setup_flow) + struct odp_actions *out, tag_type *tags, bool *may_set_up_flow, + uint16_t *nf_output_iface) { tag_type no_tags = 0; struct action_xlate_ctx ctx; COVERAGE_INC(ofproto_ofp2odp); odp_actions_init(out); - ctx.flow = flow; + ctx.flow = *flow; ctx.recurse = 0; ctx.ofproto = ofproto; ctx.packet = packet; ctx.out = out; ctx.tags = tags ? tags : &no_tags; - ctx.may_setup_flow = true; + ctx.may_set_up_flow = true; + ctx.nf_output_iface = NF_OUT_DROP; do_xlate_actions(in, n_in, &ctx); - /* Check with in-band control to see if we're allowed to setup this + /* Check with in-band control to see if we're allowed to set up this * flow. */ if (!in_band_rule_check(ofproto->in_band, flow, out)) { - ctx.may_setup_flow = false; + ctx.may_set_up_flow = false; } - if (may_setup_flow) { - *may_setup_flow = ctx.may_setup_flow; + if (may_set_up_flow) { + *may_set_up_flow = ctx.may_set_up_flow; + } + if (nf_output_iface) { + *nf_output_iface = ctx.nf_output_iface; } if (odp_actions_overflow(out)) { odp_actions_init(out); @@ -2161,6 +2556,29 @@ xlate_actions(const union ofp_action *in, size_t n_in, return 0; } +/* Checks whether 'ofconn' is a slave controller. If so, returns an OpenFlow + * error message code (composed with ofp_mkerr()) for the caller to propagate + * upward. Otherwise, returns 0. + * + * 'oh' is used to make log messages more informative. */ +static int +reject_slave_controller(struct ofconn *ofconn, const struct ofp_header *oh) +{ + if (ofconn->type == OFCONN_CONTROLLER && ofconn->role == NX_ROLE_SLAVE) { + static struct vlog_rate_limit perm_rl = VLOG_RATE_LIMIT_INIT(1, 5); + char *type_name; + + type_name = ofp_message_type_to_string(oh->type); + VLOG_WARN_RL(&perm_rl, "rejecting %s message from slave controller", + type_name); + free(type_name); + + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); + } else { + return 0; + } +} + static int handle_packet_out(struct ofproto *p, struct ofconn *ofconn, struct ofp_header *oh) @@ -2173,6 +2591,11 @@ handle_packet_out(struct ofproto *p, struct ofconn *ofconn, flow_t flow; int error; + error = reject_slave_controller(ofconn, oh); + if (error) { + return error; + } + error = check_ofp_packet_out(oh, &payload, &n_actions, p->max_ports); if (error) { return error; @@ -2191,9 +2614,9 @@ handle_packet_out(struct ofproto *p, struct ofconn *ofconn, buffer = NULL; } - flow_extract(&payload, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow); + flow_extract(&payload, 0, ofp_port_to_odp_port(ntohs(opo->in_port)), &flow); error = xlate_actions((const union ofp_action *) opo->actions, n_actions, - &flow, p, &payload, &actions, NULL, NULL); + &flow, p, &payload, &actions, NULL, NULL, NULL); if (error) { return error; } @@ -2226,7 +2649,7 @@ update_port_config(struct ofproto *p, struct ofport *port, #undef REVALIDATE_BITS if (mask & OFPPC_NO_FLOOD) { port->opp.config ^= OFPPC_NO_FLOOD; - refresh_port_group(p, DP_GROUP_FLOOD); + refresh_port_groups(p); } if (mask & OFPPC_NO_PACKET_IN) { port->opp.config ^= OFPPC_NO_PACKET_IN; @@ -2234,12 +2657,17 @@ update_port_config(struct ofproto *p, struct ofport *port, } static int -handle_port_mod(struct ofproto *p, struct ofp_header *oh) +handle_port_mod(struct ofproto *p, struct ofconn *ofconn, + struct ofp_header *oh) { const struct ofp_port_mod *opm; struct ofport *port; int error; + error = reject_slave_controller(ofconn, oh); + if (error) { + return error; + } error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm); if (error) { return error; @@ -2303,10 +2731,12 @@ handle_desc_stats_request(struct ofproto *p, struct ofconn *ofconn, msg = start_stats_reply(request, sizeof *ods); ods = append_stats_reply(sizeof *ods, ofconn, &msg); - strncpy(ods->mfr_desc, p->manufacturer, sizeof ods->mfr_desc); - strncpy(ods->hw_desc, p->hardware, sizeof ods->hw_desc); - strncpy(ods->sw_desc, p->software, sizeof ods->sw_desc); - strncpy(ods->serial_num, p->serial, sizeof ods->serial_num); + memset(ods, 0, sizeof *ods); + ovs_strlcpy(ods->mfr_desc, p->mfr_desc, sizeof ods->mfr_desc); + ovs_strlcpy(ods->hw_desc, p->hw_desc, sizeof ods->hw_desc); + ovs_strlcpy(ods->sw_desc, p->sw_desc, sizeof ods->sw_desc); + ovs_strlcpy(ods->serial_num, p->serial_desc, sizeof ods->serial_num); + ovs_strlcpy(ods->dp_desc, p->dp_desc, sizeof ods->dp_desc); queue_tx(msg, ofconn, ofconn->reply_counter); return 0; @@ -2358,7 +2788,8 @@ handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn, memset(ots, 0, sizeof *ots); ots->table_id = TABLEID_CLASSIFIER; strcpy(ots->name, "classifier"); - ots->wildcards = htonl(OFPFW_ALL); + ots->wildcards = p->tun_id_from_cookie ? htonl(OVSFW_ALL) + : htonl(OFPFW_ALL); ots->max_entries = htonl(65536); ots->active_count = htonl(n_wild); ots->lookup_count = htonll(0); /* XXX */ @@ -2368,39 +2799,62 @@ handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn, return 0; } +static void +append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn, + struct ofpbuf **msgp) +{ + struct netdev_stats stats; + struct ofp_port_stats *ops; + + /* Intentionally ignore return value, since errors will set + * 'stats' to all-1s, which is correct for OpenFlow, and + * netdev_get_stats() will log errors. */ + netdev_get_stats(port->netdev, &stats); + + ops = append_stats_reply(sizeof *ops, ofconn, msgp); + ops->port_no = htons(odp_port_to_ofp_port(port_no)); + memset(ops->pad, 0, sizeof ops->pad); + ops->rx_packets = htonll(stats.rx_packets); + ops->tx_packets = htonll(stats.tx_packets); + ops->rx_bytes = htonll(stats.rx_bytes); + ops->tx_bytes = htonll(stats.tx_bytes); + ops->rx_dropped = htonll(stats.rx_dropped); + ops->tx_dropped = htonll(stats.tx_dropped); + ops->rx_errors = htonll(stats.rx_errors); + ops->tx_errors = htonll(stats.tx_errors); + ops->rx_frame_err = htonll(stats.rx_frame_errors); + ops->rx_over_err = htonll(stats.rx_over_errors); + ops->rx_crc_err = htonll(stats.rx_crc_errors); + ops->collisions = htonll(stats.collisions); +} + static int handle_port_stats_request(struct ofproto *p, struct ofconn *ofconn, - struct ofp_stats_request *request) + struct ofp_stats_request *osr, + size_t arg_size) { + struct ofp_port_stats_request *psr; struct ofp_port_stats *ops; struct ofpbuf *msg; struct ofport *port; unsigned int port_no; - msg = start_stats_reply(request, sizeof *ops * 16); - PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) { - struct netdev_stats stats; - - /* Intentionally ignore return value, since errors will set 'stats' to - * all-1s, which is correct for OpenFlow, and netdev_get_stats() will - * log errors. */ - netdev_get_stats(port->netdev, &stats); - - ops = append_stats_reply(sizeof *ops, ofconn, &msg); - ops->port_no = htons(odp_port_to_ofp_port(port_no)); - memset(ops->pad, 0, sizeof ops->pad); - ops->rx_packets = htonll(stats.rx_packets); - ops->tx_packets = htonll(stats.tx_packets); - ops->rx_bytes = htonll(stats.rx_bytes); - ops->tx_bytes = htonll(stats.tx_bytes); - ops->rx_dropped = htonll(stats.rx_dropped); - ops->tx_dropped = htonll(stats.tx_dropped); - ops->rx_errors = htonll(stats.rx_errors); - ops->tx_errors = htonll(stats.tx_errors); - ops->rx_frame_err = htonll(stats.rx_frame_errors); - ops->rx_over_err = htonll(stats.rx_over_errors); - ops->rx_crc_err = htonll(stats.rx_crc_errors); - ops->collisions = htonll(stats.collisions); + if (arg_size != sizeof *psr) { + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); + } + psr = (struct ofp_port_stats_request *) osr->body; + + msg = start_stats_reply(osr, sizeof *ops * 16); + if (psr->port_no != htons(OFPP_NONE)) { + port = port_array_get(&p->ports, + ofp_port_to_odp_port(ntohs(psr->port_no))); + if (port) { + append_port_stat(port, ntohs(psr->port_no), ofconn, &msg); + } + } else { + PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) { + append_port_stat(port, port_no, ofconn, &msg); + } } queue_tx(msg, ofconn, ofconn->reply_counter); @@ -2414,6 +2868,9 @@ struct flow_stats_cbdata { struct ofpbuf *msg; }; +/* Obtains statistic counters for 'rule' within 'p' and stores them into + * '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the + * returned statistic include statistics for all of 'rule''s subrules. */ static void query_stats(struct ofproto *p, struct rule *rule, uint64_t *packet_countp, uint64_t *byte_countp) @@ -2423,11 +2880,21 @@ query_stats(struct ofproto *p, struct rule *rule, struct odp_flow *odp_flows; size_t n_odp_flows; + /* Start from historical data for 'rule' itself that are no longer tracked + * by the datapath. This counts, for example, subrules that have + * expired. */ packet_count = rule->packet_count; byte_count = rule->byte_count; + /* Prepare to ask the datapath for statistics on 'rule', or if it is + * wildcarded then on all of its subrules. + * + * Also, add any statistics that are not tracked by the datapath for each + * subrule. This includes, for example, statistics for packets that were + * executed "by hand" by ofproto via dpif_execute() but must be accounted + * to a flow. */ n_odp_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1; - odp_flows = xcalloc(1, n_odp_flows * sizeof *odp_flows); + odp_flows = xzalloc(n_odp_flows * sizeof *odp_flows); if (rule->cr.wc.wildcards) { size_t i = 0; LIST_FOR_EACH (subrule, struct rule, list, &rule->list) { @@ -2439,8 +2906,7 @@ query_stats(struct ofproto *p, struct rule *rule, odp_flows[0].key = rule->cr.flow; } - packet_count = rule->packet_count; - byte_count = rule->byte_count; + /* Fetch up-to-date statistics from the datapath and add them in. */ if (!dpif_flow_get_multiple(p->dpif, odp_flows, n_odp_flows)) { size_t i; for (i = 0; i < n_odp_flows; i++) { @@ -2451,6 +2917,7 @@ query_stats(struct ofproto *p, struct rule *rule, } free(odp_flows); + /* Return the stats to the caller. */ *packet_countp = packet_count; *byte_countp = byte_count; } @@ -2463,6 +2930,9 @@ flow_stats_cb(struct cls_rule *rule_, void *cbdata_) struct ofp_flow_stats *ofs; uint64_t packet_count, byte_count; size_t act_len, len; + long long int tdiff = time_msec() - rule->created; + uint32_t sec = tdiff / 1000; + uint32_t msec = tdiff - (sec * 1000); if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) { return; @@ -2477,8 +2947,11 @@ flow_stats_cb(struct cls_rule *rule_, void *cbdata_) ofs->length = htons(len); ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH; ofs->pad = 0; - flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match); - ofs->duration = htonl((time_msec() - rule->created) / 1000); + flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, + cbdata->ofproto->tun_id_from_cookie, &ofs->match); + ofs->duration_sec = htonl(sec); + ofs->duration_nsec = htonl(msec * 1000000); + ofs->cookie = rule->flow_cookie; ofs->priority = htons(rule->cr.priority); ofs->idle_timeout = htons(rule->idle_timeout); ofs->hard_timeout = htons(rule->hard_timeout); @@ -2507,7 +2980,7 @@ handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn, struct cls_rule target; if (arg_size != sizeof *fsr) { - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } fsr = (struct ofp_flow_stats_request *) osr->body; @@ -2516,7 +2989,7 @@ handle_flow_stats_request(struct ofproto *p, struct ofconn *ofconn, cbdata.ofconn = ofconn; cbdata.out_port = fsr->out_port; cbdata.msg = start_stats_reply(osr, 1024); - cls_rule_from_match(&target, &fsr->match, 0); + cls_rule_from_match(&fsr->match, 0, false, 0, &target); classifier_for_each_match(&p->cls, &target, table_id_to_include(fsr->table_id), flow_stats_cb, &cbdata); @@ -2545,7 +3018,8 @@ flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_) } query_stats(cbdata->ofproto, rule, &packet_count, &byte_count); - flow_to_ovs_match(&rule->cr.flow, rule->cr.wc.wildcards, &match); + flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, + cbdata->ofproto->tun_id_from_cookie, &match); ds_put_format(results, "duration=%llds, ", (time_msec() - rule->created) / 1000); @@ -2567,12 +3041,12 @@ ofproto_get_all_flows(struct ofproto *p, struct ds *results) struct flow_stats_ds_cbdata cbdata; memset(&match, 0, sizeof match); - match.wildcards = htonl(OFPFW_ALL); + match.wildcards = htonl(OVSFW_ALL); cbdata.ofproto = p; cbdata.results = results; - cls_rule_from_match(&target, &match, 0); + cls_rule_from_match(&match, 0, false, 0, &target); classifier_for_each_match(&p->cls, &target, CLS_INC_ALL, flow_stats_ds_cb, &cbdata); } @@ -2615,7 +3089,7 @@ handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn, struct ofpbuf *msg; if (arg_size != sizeof *asr) { - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } asr = (struct ofp_aggregate_stats_request *) osr->body; @@ -2625,7 +3099,7 @@ handle_aggregate_stats_request(struct ofproto *p, struct ofconn *ofconn, cbdata.packet_count = 0; cbdata.byte_count = 0; cbdata.n_flows = 0; - cls_rule_from_match(&target, &asr->match, 0); + cls_rule_from_match(&asr->match, 0, false, 0, &target); classifier_for_each_match(&p->cls, &target, table_id_to_include(asr->table_id), aggregate_stats_cb, &cbdata); @@ -2668,7 +3142,7 @@ handle_stats_request(struct ofproto *p, struct ofconn *ofconn, return handle_table_stats_request(p, ofconn, osr); case OFPST_PORT: - return handle_port_stats_request(p, ofconn, osr); + return handle_port_stats_request(p, ofconn, osr, arg_size); case OFPST_VENDOR: return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR); @@ -2685,45 +3159,76 @@ msec_from_nsec(uint64_t sec, uint32_t nsec) } static void -update_time(struct rule *rule, const struct odp_flow_stats *stats) +update_time(struct ofproto *ofproto, struct rule *rule, + const struct odp_flow_stats *stats) { long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec); if (used > rule->used) { rule->used = used; + if (rule->super && used > rule->super->used) { + rule->super->used = used; + } + netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used); } } static void -update_stats(struct rule *rule, const struct odp_flow_stats *stats) +update_stats(struct ofproto *ofproto, struct rule *rule, + const struct odp_flow_stats *stats) { - update_time(rule, stats); - rule->packet_count += stats->n_packets; - rule->byte_count += stats->n_bytes; - rule->tcp_flags |= stats->tcp_flags; if (stats->n_packets) { - rule->ip_tos = stats->ip_tos; + update_time(ofproto, rule, stats); + rule->packet_count += stats->n_packets; + rule->byte_count += stats->n_bytes; + netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos, + stats->tcp_flags); } } +/* Implements OFPFC_ADD and the cases for OFPFC_MODIFY and OFPFC_MODIFY_STRICT + * in which no matching flow already exists in the flow table. + * + * Adds the flow specified by 'ofm', which is followed by 'n_actions' + * ofp_actions, to 'p''s flow table. Returns 0 on success or an OpenFlow error + * code as encoded by ofp_mkerr() on failure. + * + * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id, + * if any. */ static int add_flow(struct ofproto *p, struct ofconn *ofconn, - struct ofp_flow_mod *ofm, size_t n_actions) + const struct ofp_flow_mod *ofm, size_t n_actions) { struct ofpbuf *packet; struct rule *rule; uint16_t in_port; int error; - rule = rule_create(NULL, (const union ofp_action *) ofm->actions, + if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) { + flow_t flow; + uint32_t wildcards; + + flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie, + &flow, &wildcards); + if (classifier_rule_overlaps(&p->cls, &flow, wildcards, + ntohs(ofm->priority))) { + return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP); + } + } + + rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions, n_actions, ntohs(ofm->idle_timeout), - ntohs(ofm->hard_timeout)); - cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority)); + ntohs(ofm->hard_timeout), ofm->cookie, + ofm->flags & htons(OFPFF_SEND_FLOW_REM)); + cls_rule_from_match(&ofm->match, ntohs(ofm->priority), + p->tun_id_from_cookie, ofm->cookie, &rule->cr); - packet = NULL; error = 0; if (ofm->buffer_id != htonl(UINT32_MAX)) { error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id), &packet, &in_port); + } else { + packet = NULL; + in_port = UINT16_MAX; } rule_insert(p, rule, packet, in_port); @@ -2731,109 +3236,227 @@ add_flow(struct ofproto *p, struct ofconn *ofconn, return error; } -static int -modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm, - size_t n_actions, uint16_t command, struct rule *rule) +static struct rule * +find_flow_strict(struct ofproto *p, const struct ofp_flow_mod *ofm) { - if (rule_is_hidden(rule)) { - return 0; - } - - if (command == OFPFC_DELETE) { - rule_remove(p, rule); - } else { - size_t actions_len = n_actions * sizeof *rule->actions; - - if (n_actions == rule->n_actions - && !memcmp(ofm->actions, rule->actions, actions_len)) - { - return 0; - } - - free(rule->actions); - rule->actions = xmemdup(ofm->actions, actions_len); - rule->n_actions = n_actions; - - if (rule->cr.wc.wildcards) { - COVERAGE_INC(ofproto_mod_wc_flow); - p->need_revalidate = true; - } else { - rule_update_actions(p, rule); - } - } + uint32_t wildcards; + flow_t flow; - return 0; + flow_from_match(&ofm->match, p->tun_id_from_cookie, ofm->cookie, + &flow, &wildcards); + return rule_from_cls_rule(classifier_find_rule_exactly( + &p->cls, &flow, wildcards, + ntohs(ofm->priority))); } static int -modify_flows_strict(struct ofproto *p, const struct ofp_flow_mod *ofm, - size_t n_actions, uint16_t command) +send_buffered_packet(struct ofproto *ofproto, struct ofconn *ofconn, + struct rule *rule, const struct ofp_flow_mod *ofm) { - struct rule *rule; - uint32_t wildcards; + struct ofpbuf *packet; + uint16_t in_port; flow_t flow; + int error; - flow_from_match(&flow, &wildcards, &ofm->match); - rule = rule_from_cls_rule(classifier_find_rule_exactly( - &p->cls, &flow, wildcards, - ntohs(ofm->priority))); - - if (rule) { - if (command == OFPFC_DELETE - && ofm->out_port != htons(OFPP_NONE) - && !rule_has_out_port(rule, ofm->out_port)) { - return 0; - } + if (ofm->buffer_id == htonl(UINT32_MAX)) { + return 0; + } - modify_flow(p, ofm, n_actions, command, rule); + error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id), + &packet, &in_port); + if (error) { + return error; } + + flow_extract(packet, 0, in_port, &flow); + rule_execute(ofproto, rule, packet, &flow); + ofpbuf_delete(packet); + return 0; } + +/* OFPFC_MODIFY and OFPFC_MODIFY_STRICT. */ struct modify_flows_cbdata { struct ofproto *ofproto; const struct ofp_flow_mod *ofm; - uint16_t out_port; size_t n_actions; - uint16_t command; + struct rule *match; }; +static int modify_flow(struct ofproto *, const struct ofp_flow_mod *, + size_t n_actions, struct rule *); +static void modify_flows_cb(struct cls_rule *, void *cbdata_); + +/* Implements OFPFC_MODIFY. Returns 0 on success or an OpenFlow error code as + * encoded by ofp_mkerr() on failure. + * + * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id, + * if any. */ +static int +modify_flows_loose(struct ofproto *p, struct ofconn *ofconn, + const struct ofp_flow_mod *ofm, size_t n_actions) +{ + struct modify_flows_cbdata cbdata; + struct cls_rule target; + + cbdata.ofproto = p; + cbdata.ofm = ofm; + cbdata.n_actions = n_actions; + cbdata.match = NULL; + + cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie, + &target); + + classifier_for_each_match(&p->cls, &target, CLS_INC_ALL, + modify_flows_cb, &cbdata); + if (cbdata.match) { + /* This credits the packet to whichever flow happened to happened to + * match last. That's weird. Maybe we should do a lookup for the + * flow that actually matches the packet? Who knows. */ + send_buffered_packet(p, ofconn, cbdata.match, ofm); + return 0; + } else { + return add_flow(p, ofconn, ofm, n_actions); + } +} + +/* Implements OFPFC_MODIFY_STRICT. Returns 0 on success or an OpenFlow error + * code as encoded by ofp_mkerr() on failure. + * + * 'ofconn' is used to retrieve the packet buffer specified in ofm->buffer_id, + * if any. */ +static int +modify_flow_strict(struct ofproto *p, struct ofconn *ofconn, + struct ofp_flow_mod *ofm, size_t n_actions) +{ + struct rule *rule = find_flow_strict(p, ofm); + if (rule && !rule_is_hidden(rule)) { + modify_flow(p, ofm, n_actions, rule); + return send_buffered_packet(p, ofconn, rule, ofm); + } else { + return add_flow(p, ofconn, ofm, n_actions); + } +} + +/* Callback for modify_flows_loose(). */ static void modify_flows_cb(struct cls_rule *rule_, void *cbdata_) { struct rule *rule = rule_from_cls_rule(rule_); struct modify_flows_cbdata *cbdata = cbdata_; - if (cbdata->out_port != htons(OFPP_NONE) - && !rule_has_out_port(rule, cbdata->out_port)) { - return; + if (!rule_is_hidden(rule)) { + cbdata->match = rule; + modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions, rule); } - - modify_flow(cbdata->ofproto, cbdata->ofm, cbdata->n_actions, - cbdata->command, rule); } +/* Implements core of OFPFC_MODIFY and OFPFC_MODIFY_STRICT where 'rule' has + * been identified as a flow in 'p''s flow table to be modified, by changing + * the rule's actions to match those in 'ofm' (which is followed by 'n_actions' + * ofp_action[] structures). */ static int -modify_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm, - size_t n_actions, uint16_t command) +modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm, + size_t n_actions, struct rule *rule) { - struct modify_flows_cbdata cbdata; + size_t actions_len = n_actions * sizeof *rule->actions; + + rule->flow_cookie = ofm->cookie; + + /* If the actions are the same, do nothing. */ + if (n_actions == rule->n_actions + && !memcmp(ofm->actions, rule->actions, actions_len)) + { + return 0; + } + + /* Replace actions. */ + free(rule->actions); + rule->actions = xmemdup(ofm->actions, actions_len); + rule->n_actions = n_actions; + + /* Make sure that the datapath gets updated properly. */ + if (rule->cr.wc.wildcards) { + COVERAGE_INC(ofproto_mod_wc_flow); + p->need_revalidate = true; + } else { + rule_update_actions(p, rule); + } + + return 0; +} + +/* OFPFC_DELETE implementation. */ + +struct delete_flows_cbdata { + struct ofproto *ofproto; + uint16_t out_port; +}; + +static void delete_flows_cb(struct cls_rule *, void *cbdata_); +static void delete_flow(struct ofproto *, struct rule *, uint16_t out_port); + +/* Implements OFPFC_DELETE. */ +static void +delete_flows_loose(struct ofproto *p, const struct ofp_flow_mod *ofm) +{ + struct delete_flows_cbdata cbdata; struct cls_rule target; cbdata.ofproto = p; - cbdata.ofm = ofm; - cbdata.out_port = (command == OFPFC_DELETE ? ofm->out_port - : htons(OFPP_NONE)); - cbdata.n_actions = n_actions; - cbdata.command = command; + cbdata.out_port = ofm->out_port; - cls_rule_from_match(&target, &ofm->match, 0); + cls_rule_from_match(&ofm->match, 0, p->tun_id_from_cookie, ofm->cookie, + &target); classifier_for_each_match(&p->cls, &target, CLS_INC_ALL, - modify_flows_cb, &cbdata); - return 0; + delete_flows_cb, &cbdata); } +/* Implements OFPFC_DELETE_STRICT. */ +static void +delete_flow_strict(struct ofproto *p, struct ofp_flow_mod *ofm) +{ + struct rule *rule = find_flow_strict(p, ofm); + if (rule) { + delete_flow(p, rule, ofm->out_port); + } +} + +/* Callback for delete_flows_loose(). */ +static void +delete_flows_cb(struct cls_rule *rule_, void *cbdata_) +{ + struct rule *rule = rule_from_cls_rule(rule_); + struct delete_flows_cbdata *cbdata = cbdata_; + + delete_flow(cbdata->ofproto, rule, cbdata->out_port); +} + +/* Implements core of OFPFC_DELETE and OFPFC_DELETE_STRICT where 'rule' has + * been identified as a flow to delete from 'p''s flow table, by deleting the + * flow and sending out a OFPT_FLOW_REMOVED message to any interested + * controller. + * + * Will not delete 'rule' if it is hidden. Will delete 'rule' only if + * 'out_port' is htons(OFPP_NONE) or if 'rule' actually outputs to the + * specified 'out_port'. */ +static void +delete_flow(struct ofproto *p, struct rule *rule, uint16_t out_port) +{ + if (rule_is_hidden(rule)) { + return; + } + + if (out_port != htons(OFPP_NONE) && !rule_has_out_port(rule, out_port)) { + return; + } + + send_flow_removed(p, rule, time_msec(), OFPRR_DELETE); + rule_remove(p, rule); +} + static int handle_flow_mod(struct ofproto *p, struct ofconn *ofconn, struct ofp_flow_mod *ofm) @@ -2841,12 +3464,24 @@ handle_flow_mod(struct ofproto *p, struct ofconn *ofconn, size_t n_actions; int error; + error = reject_slave_controller(ofconn, &ofm->header); + if (error) { + return error; + } error = check_ofp_message_array(&ofm->header, OFPT_FLOW_MOD, sizeof *ofm, sizeof *ofm->actions, &n_actions); if (error) { return error; } + /* We do not support the emergency flow cache. It will hopefully + * get dropped from OpenFlow in the near future. */ + if (ofm->flags & htons(OFPFF_EMERG)) { + /* There isn't a good fit for an error code, so just state that the + * flow table is full. */ + return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL); + } + normalize_match(&ofm->match); if (!ofm->match.wildcards) { ofm->priority = htons(UINT16_MAX); @@ -2863,72 +3498,89 @@ handle_flow_mod(struct ofproto *p, struct ofconn *ofconn, return add_flow(p, ofconn, ofm, n_actions); case OFPFC_MODIFY: - return modify_flows_loose(p, ofm, n_actions, OFPFC_MODIFY); + return modify_flows_loose(p, ofconn, ofm, n_actions); case OFPFC_MODIFY_STRICT: - return modify_flows_strict(p, ofm, n_actions, OFPFC_MODIFY); + return modify_flow_strict(p, ofconn, ofm, n_actions); case OFPFC_DELETE: - return modify_flows_loose(p, ofm, n_actions, OFPFC_DELETE); + delete_flows_loose(p, ofm); + return 0; case OFPFC_DELETE_STRICT: - return modify_flows_strict(p, ofm, n_actions, OFPFC_DELETE); + delete_flow_strict(p, ofm); + return 0; default: return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_BAD_COMMAND); } } -static void -send_capability_reply(struct ofproto *p, struct ofconn *ofconn, uint32_t xid) +static int +handle_tun_id_from_cookie(struct ofproto *p, struct nxt_tun_id_cookie *msg) { - struct ofmp_capability_reply *ocr; - struct ofpbuf *b; - char capabilities[] = "com.nicira.mgmt.manager=false\n"; - - ocr = make_openflow_xid(sizeof(*ocr), OFPT_VENDOR, xid, &b); - ocr->header.header.vendor = htonl(NX_VENDOR_ID); - ocr->header.header.subtype = htonl(NXT_MGMT); - ocr->header.type = htons(OFMPT_CAPABILITY_REPLY); - - ocr->format = htonl(OFMPCOF_SIMPLE); - ocr->mgmt_id = htonll(p->mgmt_id); + int error; - ofpbuf_put(b, capabilities, strlen(capabilities)); + error = check_ofp_message(&msg->header, OFPT_VENDOR, sizeof *msg); + if (error) { + return error; + } - queue_tx(b, ofconn, ofconn->reply_counter); + p->tun_id_from_cookie = !!msg->set; + return 0; } static int -handle_ofmp(struct ofproto *p, struct ofconn *ofconn, - struct ofmp_header *ofmph) +handle_role_request(struct ofproto *ofproto, + struct ofconn *ofconn, struct nicira_header *msg) { - size_t msg_len = ntohs(ofmph->header.header.length); - if (msg_len < sizeof(*ofmph)) { - VLOG_WARN_RL(&rl, "dropping short managment message: %d\n", msg_len); - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); + struct nx_role_request *nrr; + struct nx_role_request *reply; + struct ofpbuf *buf; + uint32_t role; + + if (ntohs(msg->header.length) != sizeof *nrr) { + VLOG_WARN_RL(&rl, "received role request of length %zu (expected %zu)", + ntohs(msg->header.length), sizeof *nrr); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } + nrr = (struct nx_role_request *) msg; - if (ofmph->type == htons(OFMPT_CAPABILITY_REQUEST)) { - struct ofmp_capability_request *ofmpcr; + if (ofconn->type != OFCONN_CONTROLLER) { + VLOG_WARN_RL(&rl, "ignoring role request on non-controller " + "connection"); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM); + } - if (msg_len < sizeof(struct ofmp_capability_request)) { - VLOG_WARN_RL(&rl, "dropping short capability request: %d\n", - msg_len); - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); - } + role = ntohl(nrr->role); + if (role != NX_ROLE_OTHER && role != NX_ROLE_MASTER + && role != NX_ROLE_SLAVE) { + VLOG_WARN_RL(&rl, "received request for unknown role %"PRIu32, role); - ofmpcr = (struct ofmp_capability_request *)ofmph; - if (ofmpcr->format != htonl(OFMPCAF_SIMPLE)) { - /* xxx Find a better type than bad subtype */ - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); - } + /* There's no good error code for this. */ + return ofp_mkerr(OFPET_BAD_REQUEST, -1); + } - send_capability_reply(p, ofconn, ofmph->header.header.xid); - return 0; - } else { - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); + if (role == NX_ROLE_MASTER) { + struct ofconn *other; + + HMAP_FOR_EACH (other, struct ofconn, hmap_node, + &ofproto->controllers) { + if (other->role == NX_ROLE_MASTER) { + other->role = NX_ROLE_SLAVE; + } + } } + ofconn->role = role; + + reply = make_openflow_xid(sizeof *reply, OFPT_VENDOR, msg->header.xid, + &buf); + reply->nxh.vendor = htonl(NX_VENDOR_ID); + reply->nxh.subtype = htonl(NXT_ROLE_REPLY); + reply->role = htonl(role); + queue_tx(buf, ofconn, ofconn->reply_counter); + + return 0; } static int @@ -2938,13 +3590,19 @@ handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg) struct nicira_header *nh; if (ntohs(ovh->header.length) < sizeof(struct ofp_vendor_header)) { - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); + VLOG_WARN_RL(&rl, "received vendor message of length %zu " + "(expected at least %zu)", + ntohs(ovh->header.length), sizeof(struct ofp_vendor_header)); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } if (ovh->vendor != htonl(NX_VENDOR_ID)) { return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_VENDOR); } if (ntohs(ovh->header.length) < sizeof(struct nicira_header)) { - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LENGTH); + VLOG_WARN_RL(&rl, "received Nicira vendor message of length %zu " + "(expected at least %zu)", + ntohs(ovh->header.length), sizeof(struct nicira_header)); + return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN); } nh = msg; @@ -2953,25 +3611,29 @@ handle_vendor(struct ofproto *p, struct ofconn *ofconn, void *msg) return switch_status_handle_request(p->switch_status, ofconn->rconn, msg); - case NXT_ACT_SET_CONFIG: - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); /* XXX */ - - case NXT_ACT_GET_CONFIG: - return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); /* XXX */ - - case NXT_COMMAND_REQUEST: - if (p->executer) { - return executer_handle_request(p->executer, ofconn->rconn, msg); - } - break; + case NXT_TUN_ID_FROM_COOKIE: + return handle_tun_id_from_cookie(p, msg); - case NXT_MGMT: - return handle_ofmp(p, ofconn, msg); + case NXT_ROLE_REQUEST: + return handle_role_request(p, ofconn, msg); } return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_SUBTYPE); } +static int +handle_barrier_request(struct ofconn *ofconn, struct ofp_header *oh) +{ + struct ofp_header *ob; + struct ofpbuf *buf; + + /* Currently, everything executes synchronously, so we can just + * immediately send the barrier reply. */ + ob = make_openflow_xid(sizeof *ob, OFPT_BARRIER_REPLY, oh->xid, &buf); + queue_tx(buf, ofconn, ofconn->reply_counter); + return 0; +} + static void handle_openflow(struct ofconn *ofconn, struct ofproto *p, struct ofpbuf *ofp_msg) @@ -3006,7 +3668,7 @@ handle_openflow(struct ofconn *ofconn, struct ofproto *p, break; case OFPT_PORT_MOD: - error = handle_port_mod(p, oh); + error = handle_port_mod(p, ofconn, oh); break; case OFPT_FLOW_MOD: @@ -3021,6 +3683,10 @@ handle_openflow(struct ofconn *ofconn, struct ofproto *p, error = handle_vendor(p, ofconn, ofp_msg->data); break; + case OFPT_BARRIER_REQUEST: + error = handle_barrier_request(ofconn, oh); + break; + default: if (VLOG_IS_WARN_ENABLED()) { char *s = ofp_to_string(oh, ntohs(oh->length), 2); @@ -3037,25 +3703,16 @@ handle_openflow(struct ofconn *ofconn, struct ofproto *p, } static void -handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) +handle_odp_miss_msg(struct ofproto *p, struct ofpbuf *packet) { struct odp_msg *msg = packet->data; - uint16_t in_port = odp_port_to_ofp_port(msg->port); struct rule *rule; struct ofpbuf payload; flow_t flow; - /* Handle controller actions. */ - if (msg->type == _ODPL_ACTION_NR) { - COVERAGE_INC(ofproto_ctlr_action); - pinsched_send(p->action_sched, in_port, packet, - send_packet_in_action, p); - return; - } - payload.data = msg + 1; payload.size = msg->length - sizeof *msg; - flow_extract(&payload, msg->port, &flow); + flow_extract(&payload, msg->arg, msg->port, &flow); /* Check with in-band control to see if this packet should be sent * to the local port regardless of the flow table. */ @@ -3084,7 +3741,7 @@ handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) } COVERAGE_INC(ofproto_packet_in); - pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p); + send_packet_in(p, packet); return; } @@ -3105,8 +3762,7 @@ handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) rule_execute(p, rule, &payload, &flow); rule_reinstall(p, rule); - if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY - && rconn_is_connected(p->controller->rconn)) { + if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY) { /* * Extra-special case for fail-open mode. * @@ -3117,11 +3773,40 @@ handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) * * See the top-level comment in fail-open.c for more information. */ - pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p); + send_packet_in(p, packet); } else { ofpbuf_delete(packet); } } + +static void +handle_odp_msg(struct ofproto *p, struct ofpbuf *packet) +{ + struct odp_msg *msg = packet->data; + + switch (msg->type) { + case _ODPL_ACTION_NR: + COVERAGE_INC(ofproto_ctlr_action); + send_packet_in(p, packet); + break; + + case _ODPL_SFLOW_NR: + if (p->sflow) { + ofproto_sflow_received(p->sflow, msg); + } + ofpbuf_delete(packet); + break; + + case _ODPL_MISS_NR: + handle_odp_miss_msg(p, packet); + break; + + default: + VLOG_WARN_RL(&rl, "received ODP message of unexpected type %"PRIu32, + msg->type); + break; + } +} static void revalidate_cb(struct cls_rule *sub_, void *cbdata_) @@ -3165,25 +3850,46 @@ revalidate_rule(struct ofproto *p, struct rule *rule) } static struct ofpbuf * -compose_flow_exp(const struct rule *rule, long long int now, uint8_t reason) +compose_flow_removed(struct ofproto *p, const struct rule *rule, + long long int now, uint8_t reason) { - struct ofp_flow_expired *ofe; + struct ofp_flow_removed *ofr; struct ofpbuf *buf; - - ofe = make_openflow(sizeof *ofe, OFPT_FLOW_EXPIRED, &buf); - flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofe->match); - ofe->priority = htons(rule->cr.priority); - ofe->reason = reason; - ofe->duration = (now - rule->created) / 1000; - ofe->packet_count = rule->packet_count; - ofe->byte_count = rule->byte_count; + long long int tdiff = now - rule->created; + uint32_t sec = tdiff / 1000; + uint32_t msec = tdiff - (sec * 1000); + + ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf); + flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, p->tun_id_from_cookie, + &ofr->match); + ofr->cookie = rule->flow_cookie; + ofr->priority = htons(rule->cr.priority); + ofr->reason = reason; + ofr->duration_sec = htonl(sec); + ofr->duration_nsec = htonl(msec * 1000000); + ofr->idle_timeout = htons(rule->idle_timeout); + ofr->packet_count = htonll(rule->packet_count); + ofr->byte_count = htonll(rule->byte_count); return buf; } static void -send_flow_exp(struct ofproto *p, struct rule *rule, - long long int now, uint8_t reason) +uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule) +{ + assert(rule->installed); + assert(!rule->cr.wc.wildcards); + + if (rule->super) { + rule_remove(ofproto, rule); + } else { + rule_uninstall(ofproto, rule); + } +} + +static void +send_flow_removed(struct ofproto *p, struct rule *rule, + long long int now, uint8_t reason) { struct ofconn *ofconn; struct ofconn *prev; @@ -3197,11 +3903,12 @@ send_flow_exp(struct ofproto *p, struct rule *rule, prev = NULL; LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) { - if (ofconn->send_flow_exp && rconn_is_connected(ofconn->rconn)) { + if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn) + && ofconn_receives_async_msgs(ofconn)) { if (prev) { queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter); } else { - buf = compose_flow_exp(rule, now, reason); + buf = compose_flow_removed(p, rule, now, reason); } prev = ofconn; } @@ -3211,18 +3918,6 @@ send_flow_exp(struct ofproto *p, struct rule *rule, } } -static void -uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule) -{ - assert(rule->installed); - assert(!rule->cr.wc.wildcards); - - if (rule->super) { - rule_remove(ofproto, rule); - } else { - rule_uninstall(ofproto, rule); - } -} static void expire_rule(struct cls_rule *cls_rule, void *p_) @@ -3239,38 +3934,75 @@ expire_rule(struct cls_rule *cls_rule, void *p_) ? rule->used + rule->idle_timeout * 1000 : LLONG_MAX); expire = MIN(hard_expire, idle_expire); - if (expire == LLONG_MAX) { - if (rule->installed && time_msec() >= rule->used + 5000) { - uninstall_idle_flow(p, rule); - } - return; - } now = time_msec(); if (now < expire) { if (rule->installed && now >= rule->used + 5000) { uninstall_idle_flow(p, rule); + } else if (!rule->cr.wc.wildcards) { + active_timeout(p, rule); } + return; } COVERAGE_INC(ofproto_expired); + + /* Update stats. This code will be a no-op if the rule expired + * due to an idle timeout. */ if (rule->cr.wc.wildcards) { - /* Update stats. (This code will be a no-op if the rule expired - * due to an idle timeout, because in that case the rule has no - * subrules left.) */ struct rule *subrule, *next; LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) { rule_remove(p, subrule); } + } else { + rule_uninstall(p, rule); } - send_flow_exp(p, rule, now, - (now >= hard_expire - ? OFPER_HARD_TIMEOUT : OFPER_IDLE_TIMEOUT)); + if (!rule_is_hidden(rule)) { + send_flow_removed(p, rule, now, + (now >= hard_expire + ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT)); + } rule_remove(p, rule); } +static void +active_timeout(struct ofproto *ofproto, struct rule *rule) +{ + if (ofproto->netflow && !is_controller_rule(rule) && + netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) { + struct ofexpired expired; + struct odp_flow odp_flow; + + /* Get updated flow stats. */ + memset(&odp_flow, 0, sizeof odp_flow); + if (rule->installed) { + odp_flow.key = rule->cr.flow; + odp_flow.flags = ODPFF_ZERO_TCP_FLAGS; + dpif_flow_get(ofproto->dpif, &odp_flow); + + if (odp_flow.stats.n_packets) { + update_time(ofproto, rule, &odp_flow.stats); + netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos, + odp_flow.stats.tcp_flags); + } + } + + expired.flow = rule->cr.flow; + expired.packet_count = rule->packet_count + + odp_flow.stats.n_packets; + expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes; + expired.used = rule->used; + + netflow_expire(ofproto->netflow, &rule->nf_flow, &expired); + + /* Schedule us to send the accumulated records once we have + * collected all of them. */ + poll_immediate_wake(); + } +} + static void update_used(struct ofproto *p) { @@ -3296,74 +4028,156 @@ update_used(struct ofproto *p) continue; } - update_time(rule, &f->stats); + update_time(p, rule, &f->stats); rule_account(p, rule, f->stats.n_bytes); } free(flows); } +/* pinsched callback for sending 'packet' on 'ofconn'. */ static void -do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id, - const struct ofpbuf *packet, int send_len) +do_send_packet_in(struct ofpbuf *packet, void *ofconn_) { - struct odp_msg *msg = packet->data; - struct ofpbuf payload; - struct ofpbuf *opi; - uint8_t reason; + struct ofconn *ofconn = ofconn_; - /* Extract packet payload from 'msg'. */ - payload.data = msg + 1; - payload.size = msg->length - sizeof *msg; + rconn_send_with_limit(ofconn->rconn, packet, + ofconn->packet_in_counter, 100); +} + +/* Takes 'packet', which has been converted with do_convert_to_packet_in(), and + * finalizes its content for sending on 'ofconn', and passes it to 'ofconn''s + * packet scheduler for sending. + * + * 'max_len' specifies the maximum number of bytes of the packet to send on + * 'ofconn' (INT_MAX specifies no limit). + * + * If 'clone' is true, the caller retains ownership of 'packet'. Otherwise, + * ownership is transferred to this function. */ +static void +schedule_packet_in(struct ofconn *ofconn, struct ofpbuf *packet, int max_len, + bool clone) +{ + struct ofproto *ofproto = ofconn->ofproto; + struct ofp_packet_in *opi = packet->data; + uint16_t in_port = ofp_port_to_odp_port(ntohs(opi->in_port)); + int send_len, trim_size; + uint32_t buffer_id; + + /* Get buffer. */ + if (opi->reason == OFPR_ACTION) { + buffer_id = UINT32_MAX; + } else if (ofproto->fail_open && fail_open_is_active(ofproto->fail_open)) { + buffer_id = pktbuf_get_null(); + } else if (!ofconn->pktbuf) { + buffer_id = UINT32_MAX; + } else { + struct ofpbuf payload; + payload.data = opi->data; + payload.size = packet->size - offsetof(struct ofp_packet_in, data); + buffer_id = pktbuf_save(ofconn->pktbuf, &payload, in_port); + } + + /* Figure out how much of the packet to send. */ + send_len = ntohs(opi->total_len); + if (buffer_id != UINT32_MAX) { + send_len = MIN(send_len, ofconn->miss_send_len); + } + send_len = MIN(send_len, max_len); + + /* Adjust packet length and clone if necessary. */ + trim_size = offsetof(struct ofp_packet_in, data) + send_len; + if (clone) { + packet = ofpbuf_clone_data(packet->data, trim_size); + opi = packet->data; + } else { + packet->size = trim_size; + } - /* Construct ofp_packet_in message. */ - reason = msg->type == _ODPL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH; - opi = make_packet_in(buffer_id, odp_port_to_ofp_port(msg->port), reason, - &payload, send_len); + /* Update packet headers. */ + opi->buffer_id = htonl(buffer_id); + update_openflow_length(packet); - /* Send. */ - rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100); + /* Hand over to packet scheduler. It might immediately call into + * do_send_packet_in() or it might buffer it for a while (until a later + * call to pinsched_run()). */ + pinsched_send(ofconn->schedulers[opi->reason], in_port, + packet, do_send_packet_in, ofconn); } -static void -send_packet_in_action(struct ofpbuf *packet, void *p_) +/* Replace struct odp_msg header in 'packet' by equivalent struct + * ofp_packet_in. The odp_msg must have sufficient headroom to do so (e.g. as + * returned by dpif_recv()). + * + * The conversion is not complete: the caller still needs to trim any unneeded + * payload off the end of the buffer, set the length in the OpenFlow header, + * and set buffer_id. Those require us to know the controller settings and so + * must be done on a per-controller basis. + * + * Returns the maximum number of bytes of the packet that should be sent to + * the controller (INT_MAX if no limit). */ +static int +do_convert_to_packet_in(struct ofpbuf *packet) { - struct ofproto *p = p_; - struct ofconn *ofconn; - struct odp_msg *msg; + struct odp_msg *msg = packet->data; + struct ofp_packet_in *opi; + uint8_t reason; + uint16_t total_len; + uint16_t in_port; + int max_len; - msg = packet->data; - LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) { - if (ofconn == p->controller || ofconn->miss_send_len) { - do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg); - } + /* Extract relevant header fields */ + if (msg->type == _ODPL_ACTION_NR) { + reason = OFPR_ACTION; + max_len = msg->arg; + } else { + reason = OFPR_NO_MATCH; + max_len = INT_MAX; } - ofpbuf_delete(packet); + total_len = msg->length - sizeof *msg; + in_port = odp_port_to_ofp_port(msg->port); + + /* Repurpose packet buffer by overwriting header. */ + ofpbuf_pull(packet, sizeof(struct odp_msg)); + opi = ofpbuf_push_zeros(packet, offsetof(struct ofp_packet_in, data)); + opi->header.version = OFP_VERSION; + opi->header.type = OFPT_PACKET_IN; + opi->total_len = htons(total_len); + opi->in_port = htons(in_port); + opi->reason = reason; + + return max_len; } +/* Given 'packet' containing an odp_msg of type _ODPL_ACTION_NR or + * _ODPL_MISS_NR, sends an OFPT_PACKET_IN message to each OpenFlow controller + * as necessary according to their individual configurations. + * + * 'packet' must have sufficient headroom to convert it into a struct + * ofp_packet_in (e.g. as returned by dpif_recv()). + * + * Takes ownership of 'packet'. */ static void -send_packet_in_miss(struct ofpbuf *packet, void *p_) +send_packet_in(struct ofproto *ofproto, struct ofpbuf *packet) { - struct ofproto *p = p_; - bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open); - struct ofconn *ofconn; - struct ofpbuf payload; - struct odp_msg *msg; + struct ofconn *ofconn, *prev; + int max_len; - msg = packet->data; - payload.data = msg + 1; - payload.size = msg->length - sizeof *msg; - LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) { - if (ofconn->miss_send_len) { - struct pktbuf *pb = ofconn->pktbuf; - uint32_t buffer_id = (in_fail_open - ? pktbuf_get_null() - : pktbuf_save(pb, &payload, msg->port)); - int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len - : UINT32_MAX); - do_send_packet_in(ofconn, buffer_id, packet, send_len); + max_len = do_convert_to_packet_in(packet); + + prev = NULL; + LIST_FOR_EACH (ofconn, struct ofconn, node, &ofproto->all_conns) { + if (ofconn_receives_async_msgs(ofconn)) { + if (prev) { + schedule_packet_in(prev, packet, max_len, true); + } + prev = ofconn; } } - ofpbuf_delete(packet); + if (prev) { + schedule_packet_in(prev, packet, max_len, false); + } else { + ofpbuf_delete(packet); + } } static uint64_t @@ -3390,17 +4204,14 @@ static uint64_t pick_fallback_dpid(void) { uint8_t ea[ETH_ADDR_LEN]; - eth_addr_random(ea); - ea[0] = 0x00; /* Set Nicira OUI. */ - ea[1] = 0x23; - ea[2] = 0x20; + eth_addr_nicira_random(ea); return eth_addr_to_uint64(ea); } static bool default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, struct odp_actions *actions, tag_type *tags, - void *ofproto_) + uint16_t *nf_output_iface, void *ofproto_) { struct ofproto *ofproto = ofproto_; int out_port; @@ -3427,9 +4238,10 @@ default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, /* Determine output port. */ out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags); if (out_port < 0) { - add_output_group_action(actions, DP_GROUP_FLOOD); + add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface); } else if (out_port != flow->in_port) { odp_actions_add(actions, ODPAT_OUTPUT)->output.port = out_port; + *nf_output_iface = out_port; } else { /* Drop. */ }