X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=8fc9916d5be5a295637c62a4f005c99709c13747;hb=a6b7506dab305d91fc5f2ac6416a714e5fa09dd4;hp=9a2a4e88a68fabae18d79f65dd0cfe81591bd903;hpb=f0f8c6c221a890d0dc9476f9c76081afbc6c9392;p=sliver-openvswitch.git diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 9a2a4e88a..8fc9916d5 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -25,6 +25,7 @@ #include "bitmap.h" #include "byte-order.h" #include "classifier.h" +#include "connectivity.h" #include "connmgr.h" #include "coverage.h" #include "dynamic-string.h" @@ -47,8 +48,10 @@ #include "pktbuf.h" #include "poll-loop.h" #include "random.h" +#include "seq.h" #include "shash.h" #include "simap.h" +#include "smap.h" #include "sset.h" #include "timeval.h" #include "unaligned.h" @@ -57,14 +60,11 @@ VLOG_DEFINE_THIS_MODULE(ofproto); -COVERAGE_DEFINE(ofproto_error); COVERAGE_DEFINE(ofproto_flush); -COVERAGE_DEFINE(ofproto_no_packet_in); COVERAGE_DEFINE(ofproto_packet_out); COVERAGE_DEFINE(ofproto_queue_req); COVERAGE_DEFINE(ofproto_recv_openflow); COVERAGE_DEFINE(ofproto_reinit_ports); -COVERAGE_DEFINE(ofproto_uninstallable); COVERAGE_DEFINE(ofproto_update_port); enum ofproto_state { @@ -240,6 +240,23 @@ static void update_port(struct ofproto *, const char *devname); static int init_ports(struct ofproto *); static void reinit_ports(struct ofproto *); +static long long int ofport_get_usage(const struct ofproto *, + ofp_port_t ofp_port); +static void ofport_set_usage(struct ofproto *, ofp_port_t ofp_port, + long long int last_used); +static void ofport_remove_usage(struct ofproto *, ofp_port_t ofp_port); + +/* Ofport usage. + * + * Keeps track of the currently used and recently used ofport values and is + * used to prevent immediate recycling of ofport values. */ +struct ofport_usage { + struct hmap_node hmap_node; /* In struct ofproto's "ofport_usage" hmap. */ + ofp_port_t ofp_port; /* OpenFlow port number. */ + long long int last_used; /* Last time the 'ofp_port' was used. LLONG_MAX + represents in-use ofports. */ +}; + /* rule. */ static void ofproto_rule_destroy__(struct rule *); static void ofproto_rule_send_removed(struct rule *, uint8_t reason); @@ -256,6 +273,12 @@ static enum ofperr modify_flows__(struct ofproto *, struct ofconn *, static void delete_flow__(struct rule *rule, struct ofopgroup *, enum ofp_flow_removed_reason) OVS_REQUIRES(ofproto_mutex); +static bool ofproto_group_exists__(const struct ofproto *ofproto, + uint32_t group_id) + OVS_REQ_RDLOCK(ofproto->groups_rwlock); +static bool ofproto_group_exists(const struct ofproto *ofproto, + uint32_t group_id) + OVS_EXCLUDED(ofproto->groups_rwlock); static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *); static bool handle_openflow(struct ofconn *, const struct ofpbuf *); static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *, @@ -411,6 +434,7 @@ ofproto_enumerate_types(struct sset *types) { size_t i; + sset_clear(types); for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->enumerate_types(types); } @@ -437,7 +461,7 @@ ofproto_enumerate_names(const char *type, struct sset *names) { const struct ofproto_class *class = ofproto_class_find__(type); return class ? class->enumerate_names(type, names) : EAFNOSUPPORT; - } +} int ofproto_create(const char *datapath_name, const char *datapath_type, @@ -485,6 +509,7 @@ ofproto_create(const char *datapath_name, const char *datapath_type, ofproto->dp_desc = NULL; ofproto->frag_handling = OFPC_FRAG_NORMAL; hmap_init(&ofproto->ports); + hmap_init(&ofproto->ofport_usage); shash_init(&ofproto->port_by_name); simap_init(&ofproto->ofp_requests); ofproto->max_ports = ofp_to_u16(OFPP_MAX); @@ -509,6 +534,30 @@ ofproto_create(const char *datapath_name, const char *datapath_type, ovs_rwlock_init(&ofproto->groups_rwlock); hmap_init(&ofproto->groups); ovs_mutex_unlock(&ofproto_mutex); + ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS | + OFPGFC_SELECT_WEIGHT; + ofproto->ogf.max_groups[OFPGT11_ALL] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_SELECT] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_INDIRECT] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_FF] = OFPG_MAX; + ofproto->ogf.actions[0] = + (1 << OFPAT11_OUTPUT) | + (1 << OFPAT11_COPY_TTL_OUT) | + (1 << OFPAT11_COPY_TTL_IN) | + (1 << OFPAT11_SET_MPLS_TTL) | + (1 << OFPAT11_DEC_MPLS_TTL) | + (1 << OFPAT11_PUSH_VLAN) | + (1 << OFPAT11_POP_VLAN) | + (1 << OFPAT11_PUSH_MPLS) | + (1 << OFPAT11_POP_MPLS) | + (1 << OFPAT11_SET_QUEUE) | + (1 << OFPAT11_GROUP) | + (1 << OFPAT11_SET_NW_TTL) | + (1 << OFPAT11_DEC_NW_TTL) | + (1 << OFPAT12_SET_FIELD); +/* not supported: + * (1 << OFPAT13_PUSH_PBB) | + * (1 << OFPAT13_POP_PBB) */ error = ofproto->ofproto_class->construct(ofproto); if (error) { @@ -518,11 +567,6 @@ ofproto_create(const char *datapath_name, const char *datapath_type, return error; } - /* The "max_ports" member should have been set by ->construct(ofproto). - * Port 0 is not a valid OpenFlow port, so mark that as unavailable. */ - ofproto->ofp_port_ids = bitmap_allocate(ofproto->max_ports); - bitmap_set1(ofproto->ofp_port_ids, 0); - /* Check that hidden tables, if any, are at the end. */ ovs_assert(ofproto->n_tables); for (i = 0; i + 1 < ofproto->n_tables; i++) { @@ -697,7 +741,7 @@ ofproto_set_n_handler_threads(unsigned limit) if (limit) { n_handler_threads = limit; } else { - int n_proc = sysconf(_SC_NPROCESSORS_ONLN); + int n_proc = count_cpu_cores(); n_handler_threads = n_proc > 2 ? n_proc - 2 : 1; } } @@ -843,6 +887,27 @@ ofproto_port_get_stp_status(struct ofproto *ofproto, ofp_port_t ofp_port, ? ofproto->ofproto_class->get_stp_port_status(ofport, s) : EOPNOTSUPP); } + +/* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in + * 's'. If the 'enabled' member in 's' is false, then the other members + * are not meaningful. + * + * Returns 0 if successful, otherwise a positive errno value.*/ +int +ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port, + struct ofproto_port_stp_stats *s) +{ + struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); + if (!ofport) { + VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent " + "port %"PRIu16, ofproto->name, ofp_port); + return ENODEV; + } + + return (ofproto->ofproto_class->get_stp_port_stats + ? ofproto->ofproto_class->get_stp_port_stats(ofport, s) + : EOPNOTSUPP); +} /* Queue DSCP configuration. */ @@ -1097,7 +1162,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, } table->max_flows = s->max_flows; - ovs_rwlock_rdlock(&table->cls.rwlock); + ovs_rwlock_wrlock(&table->cls.rwlock); if (classifier_count(&table->cls) > table->max_flows && table->eviction_fields) { /* 'table' contains more flows than allowed. We might not be able to @@ -1113,6 +1178,10 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, break; } } + + classifier_set_prefix_fields(&table->cls, + s->prefix_fields, s->n_prefix_fields); + ovs_rwlock_unlock(&table->cls.rwlock); } @@ -1129,7 +1198,8 @@ ofproto_get_snoops(const struct ofproto *ofproto, struct sset *snoops) } static void -ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule) +ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule, + uint8_t reason) OVS_REQUIRES(ofproto_mutex) { struct ofopgroup *group; @@ -1137,7 +1207,7 @@ ofproto_rule_delete__(struct ofproto *ofproto, struct rule *rule) ovs_assert(!rule->pending); group = ofopgroup_create_unattached(ofproto); - delete_flow__(rule, group, OFPRR_DELETE); + delete_flow__(rule, group, reason); ofopgroup_submit(group); } @@ -1192,7 +1262,7 @@ ofproto_flush__(struct ofproto *ofproto) ovs_rwlock_unlock(&table->cls.rwlock); CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, cr, &cursor) { if (!rule->pending) { - ofproto_rule_delete__(ofproto, rule); + ofproto_rule_delete__(ofproto, rule, OFPRR_DELETE); } } } @@ -1227,8 +1297,8 @@ ofproto_destroy__(struct ofproto *ofproto) free(ofproto->serial_desc); free(ofproto->dp_desc); hmap_destroy(&ofproto->ports); + hmap_destroy(&ofproto->ofport_usage); shash_destroy(&ofproto->port_by_name); - bitmap_free(ofproto->ofp_port_ids); simap_destroy(&ofproto->ofp_requests); OFPROTO_FOR_EACH_TABLE (table, ofproto) { @@ -1248,6 +1318,7 @@ ofproto_destroy(struct ofproto *p) OVS_EXCLUDED(ofproto_mutex) { struct ofport *ofport, *next_ofport; + struct ofport_usage *usage, *next_usage; if (!p) { return; @@ -1265,6 +1336,11 @@ ofproto_destroy(struct ofproto *p) ofport_destroy(ofport); } + HMAP_FOR_EACH_SAFE (usage, next_usage, hmap_node, &p->ofport_usage) { + hmap_remove(&p->ofport_usage, &usage->hmap_node); + free(usage); + } + p->ofproto_class->destruct(p); ofproto_destroy__(p); } @@ -1312,23 +1388,6 @@ ofproto_type_run(const char *datapath_type) return error; } -int -ofproto_type_run_fast(const char *datapath_type) -{ - const struct ofproto_class *class; - int error; - - datapath_type = ofproto_normalize_type(datapath_type); - class = ofproto_class_find__(datapath_type); - - error = class->type_run_fast ? class->type_run_fast(datapath_type) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: type_run_fast failed (%s)", - datapath_type, ovs_strerror(error)); - } - return error; -} - void ofproto_type_wait(const char *datapath_type) { @@ -1358,10 +1417,8 @@ any_pending_ops(const struct ofproto *p) int ofproto_run(struct ofproto *p) { - struct sset changed_netdevs; - const char *changed_netdev; - struct ofport *ofport; int error; + uint64_t new_seq; error = p->ofproto_class->run(p); if (error && error != EAGAIN) { @@ -1412,24 +1469,29 @@ ofproto_run(struct ofproto *p) } } - /* Update OpenFlow port status for any port whose netdev has changed. - * - * Refreshing a given 'ofport' can cause an arbitrary ofport to be - * destroyed, so it's not safe to update ports directly from the - * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we - * need this two-phase approach. */ - sset_init(&changed_netdevs); - HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { - unsigned int change_seq = netdev_change_seq(ofport->netdev); - if (ofport->change_seq != change_seq) { - ofport->change_seq = change_seq; - sset_add(&changed_netdevs, netdev_get_name(ofport->netdev)); + new_seq = seq_read(connectivity_seq_get()); + if (new_seq != p->change_seq) { + struct sset devnames; + const char *devname; + struct ofport *ofport; + + /* Update OpenFlow port status for any port whose netdev has changed. + * + * Refreshing a given 'ofport' can cause an arbitrary ofport to be + * destroyed, so it's not safe to update ports directly from the + * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we + * need this two-phase approach. */ + sset_init(&devnames); + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + sset_add(&devnames, netdev_get_name(ofport->netdev)); } + SSET_FOR_EACH (devname, &devnames) { + update_port(p, devname); + } + sset_destroy(&devnames); + + p->change_seq = new_seq; } - SSET_FOR_EACH (changed_netdev, &changed_netdevs) { - update_port(p, changed_netdev); - } - sset_destroy(&changed_netdevs); switch (p->state) { case S_OPENFLOW: @@ -1497,40 +1559,14 @@ ofproto_run(struct ofproto *p) return error; } -/* Performs periodic activity required by 'ofproto' that needs to be done - * with the least possible latency. - * - * It makes sense to call this function a couple of times per poll loop, to - * provide a significant performance boost on some benchmarks with the - * ofproto-dpif implementation. */ -int -ofproto_run_fast(struct ofproto *p) -{ - int error; - - error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)", - p->name, ovs_strerror(error)); - } - return error; -} - void ofproto_wait(struct ofproto *p) { - struct ofport *ofport; - p->ofproto_class->wait(p); if (p->ofproto_class->port_poll_wait) { p->ofproto_class->port_poll_wait(p); } - - HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { - if (ofport->change_seq != netdev_change_seq(ofport->netdev)) { - poll_immediate_wake(); - } - } + seq_wait(connectivity_seq_get(), p->change_seq); switch (p->state) { case S_OPENFLOW: @@ -1724,12 +1760,18 @@ ofproto_port_add(struct ofproto *ofproto, struct netdev *netdev, update_port(ofproto, netdev_name); } if (ofp_portp) { - struct ofproto_port ofproto_port; - - ofproto_port_query_by_name(ofproto, netdev_get_name(netdev), - &ofproto_port); - *ofp_portp = error ? OFPP_NONE : ofproto_port.ofp_port; - ofproto_port_destroy(&ofproto_port); + *ofp_portp = OFPP_NONE; + if (!error) { + struct ofproto_port ofproto_port; + + error = ofproto_port_query_by_name(ofproto, + netdev_get_name(netdev), + &ofproto_port); + if (!error) { + *ofp_portp = ofproto_port.ofp_port; + ofproto_port_destroy(&ofproto_port); + } + } } return error; } @@ -1781,6 +1823,30 @@ ofproto_port_del(struct ofproto *ofproto, ofp_port_t ofp_port) return error; } +static void +flow_mod_init(struct ofputil_flow_mod *fm, + const struct match *match, unsigned int priority, + const struct ofpact *ofpacts, size_t ofpacts_len, + enum ofp_flow_mod_command command) +{ + memset(fm, 0, sizeof *fm); + fm->match = *match; + fm->priority = priority; + fm->cookie = 0; + fm->new_cookie = 0; + fm->modify_cookie = false; + fm->table_id = 0; + fm->command = command; + fm->idle_timeout = 0; + fm->hard_timeout = 0; + fm->buffer_id = UINT32_MAX; + fm->out_port = OFPP_ANY; + fm->out_group = OFPG_ANY; + fm->flags = 0; + fm->ofpacts = CONST_CAST(struct ofpact *, ofpacts); + fm->ofpacts_len = ofpacts_len; +} + static int simple_flow_mod(struct ofproto *ofproto, const struct match *match, unsigned int priority, @@ -1789,22 +1855,7 @@ simple_flow_mod(struct ofproto *ofproto, { struct ofputil_flow_mod fm; - memset(&fm, 0, sizeof fm); - fm.match = *match; - fm.priority = priority; - fm.cookie = 0; - fm.new_cookie = 0; - fm.modify_cookie = false; - fm.table_id = 0; - fm.command = command; - fm.idle_timeout = 0; - fm.hard_timeout = 0; - fm.buffer_id = UINT32_MAX; - fm.out_port = OFPP_ANY; - fm.out_group = OFPG_ANY; - fm.flags = 0; - fm.ofpacts = CONST_CAST(struct ofpact *, ofpacts); - fm.ofpacts_len = ofpacts_len; + flow_mod_init(&fm, match, priority, ofpacts, ofpacts_len, command); return handle_flow_mod__(ofproto, NULL, &fm, NULL); } @@ -1941,35 +1992,56 @@ alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name) port_idx = port_idx ? port_idx : UINT16_MAX; if (port_idx >= ofproto->max_ports - || bitmap_is_set(ofproto->ofp_port_ids, port_idx)) { - uint16_t end_port_no = ofproto->alloc_port_no; + || ofport_get_usage(ofproto, u16_to_ofp(port_idx)) == LLONG_MAX) { + uint16_t lru_ofport = 0, end_port_no = ofproto->alloc_port_no; + long long int last_used_at, lru = LLONG_MAX; /* Search for a free OpenFlow port number. We try not to * immediately reuse them to prevent problems due to old - * flows. */ + * flows. + * + * We limit the automatically assigned port numbers to the lower half + * of the port range, to reserve the upper half for assignment by + * controllers. */ for (;;) { - if (++ofproto->alloc_port_no >= ofproto->max_ports) { - ofproto->alloc_port_no = 0; + if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) { + ofproto->alloc_port_no = 1; } - if (!bitmap_is_set(ofproto->ofp_port_ids, - ofproto->alloc_port_no)) { + last_used_at = ofport_get_usage(ofproto, + u16_to_ofp(ofproto->alloc_port_no)); + if (!last_used_at) { + port_idx = ofproto->alloc_port_no; + break; + } else if ( last_used_at < time_msec() - 60*60*1000) { + /* If the port with ofport 'ofproto->alloc_port_no' was deleted + * more than an hour ago, consider it usable. */ + ofport_remove_usage(ofproto, + u16_to_ofp(ofproto->alloc_port_no)); port_idx = ofproto->alloc_port_no; break; + } else if (last_used_at < lru) { + lru = last_used_at; + lru_ofport = ofproto->alloc_port_no; } + if (ofproto->alloc_port_no == end_port_no) { + if (lru_ofport) { + port_idx = lru_ofport; + break; + } return OFPP_NONE; } } } - bitmap_set1(ofproto->ofp_port_ids, port_idx); + ofport_set_usage(ofproto, u16_to_ofp(port_idx), LLONG_MAX); return u16_to_ofp(port_idx); } static void -dealloc_ofp_port(const struct ofproto *ofproto, ofp_port_t ofp_port) +dealloc_ofp_port(struct ofproto *ofproto, ofp_port_t ofp_port) { if (ofp_to_u16(ofp_port) < ofproto->max_ports) { - bitmap_set0(ofproto->ofp_port_ids, ofp_to_u16(ofp_port)); + ofport_set_usage(ofproto, ofp_port, time_msec()); } } @@ -2054,7 +2126,6 @@ ofport_install(struct ofproto *p, } ofport->ofproto = p; ofport->netdev = netdev; - ofport->change_seq = netdev_change_seq(netdev); ofport->pp = *pp; ofport->ofp_port = pp->port_no; ofport->created = time_msec(); @@ -2194,6 +2265,55 @@ ofproto_get_port(const struct ofproto *ofproto, ofp_port_t ofp_port) return NULL; } +static long long int +ofport_get_usage(const struct ofproto *ofproto, ofp_port_t ofp_port) +{ + struct ofport_usage *usage; + + HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), + &ofproto->ofport_usage) { + if (usage->ofp_port == ofp_port) { + return usage->last_used; + } + } + return 0; +} + +static void +ofport_set_usage(struct ofproto *ofproto, ofp_port_t ofp_port, + long long int last_used) +{ + struct ofport_usage *usage; + HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), + &ofproto->ofport_usage) { + if (usage->ofp_port == ofp_port) { + usage->last_used = last_used; + return; + } + } + ovs_assert(last_used == LLONG_MAX); + + usage = xmalloc(sizeof *usage); + usage->ofp_port = ofp_port; + usage->last_used = last_used; + hmap_insert(&ofproto->ofport_usage, &usage->hmap_node, + hash_ofp_port(ofp_port)); +} + +static void +ofport_remove_usage(struct ofproto *ofproto, ofp_port_t ofp_port) +{ + struct ofport_usage *usage; + HMAP_FOR_EACH_IN_BUCKET (usage, hmap_node, hash_ofp_port(ofp_port), + &ofproto->ofport_usage) { + if (usage->ofp_port == ofp_port) { + hmap_remove(&ofproto->ofport_usage, &usage->hmap_node); + free(usage); + break; + } + } +} + int ofproto_port_get_stats(const struct ofport *port, struct netdev_stats *stats) { @@ -2240,7 +2360,6 @@ update_port(struct ofproto *ofproto, const char *name) * Don't close the old netdev yet in case port_modified has to * remove a retained reference to it.*/ port->netdev = netdev; - port->change_seq = netdev_change_seq(netdev); if (port->ofproto->ofproto_class->port_modified) { port->ofproto->ofproto_class->port_modified(port); @@ -2443,10 +2562,14 @@ ofproto_rule_destroy__(struct rule *rule) rule->ofproto->ofproto_class->rule_dealloc(rule); } +static uint32_t get_provider_meter_id(const struct ofproto *, + uint32_t of_meter_id); + /* Creates and returns a new 'struct rule_actions', with a ref_count of 1, * whose actions are a copy of from the 'ofpacts_len' bytes of 'ofpacts'. */ struct rule_actions * -rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len) +rule_actions_create(const struct ofproto *ofproto, + const struct ofpact *ofpacts, size_t ofpacts_len) { struct rule_actions *actions; @@ -2454,7 +2577,10 @@ rule_actions_create(const struct ofpact *ofpacts, size_t ofpacts_len) atomic_init(&actions->ref_count, 1); actions->ofpacts = xmemdup(ofpacts, ofpacts_len); actions->ofpacts_len = ofpacts_len; - actions->meter_id = ofpacts_get_meter(ofpacts, ofpacts_len); + actions->provider_meter_id + = get_provider_meter_id(ofproto, + ofpacts_get_meter(ofpacts, ofpacts_len)); + return actions; } @@ -2480,6 +2606,7 @@ rule_actions_unref(struct rule_actions *actions) atomic_sub(&actions->ref_count, 1, &orig); if (orig == 1) { + free(actions->ofpacts); free(actions); } else { ovs_assert(orig != 0); @@ -2725,28 +2852,33 @@ reject_slave_controller(struct ofconn *ofconn) } } -/* Checks that the 'ofpacts_len' bytes of actions in 'ofpacts' are appropriate - * for a packet with the prerequisites satisfied by 'flow' in table 'table_id'. - * 'flow' may be temporarily modified, but is restored at return. - */ +/* Checks that the 'ofpacts_len' bytes of action in 'ofpacts' are appropriate + * for 'ofproto': + * + * - If they use a meter, then 'ofproto' has that meter configured. + * + * - If they use any groups, then 'ofproto' has that group configured. + * + * Returns 0 if successful, otherwise an OpenFlow error. */ static enum ofperr ofproto_check_ofpacts(struct ofproto *ofproto, - const struct ofpact ofpacts[], size_t ofpacts_len, - struct flow *flow, uint8_t table_id) + const struct ofpact ofpacts[], size_t ofpacts_len) { - enum ofperr error; + const struct ofpact *a; uint32_t mid; - error = ofpacts_check(ofpacts, ofpacts_len, flow, - u16_to_ofp(ofproto->max_ports), table_id); - if (error) { - return error; - } - mid = ofpacts_get_meter(ofpacts, ofpacts_len); - if (mid && ofproto_get_provider_meter_id(ofproto, mid) == UINT32_MAX) { + if (mid && get_provider_meter_id(ofproto, mid) == UINT32_MAX) { return OFPERR_OFPMMFC_INVALID_METER; } + + OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) { + if (a->type == OFPACT_GROUP + && !ofproto_group_exists(ofproto, ofpact_get_GROUP(a)->group_id)) { + return OFPERR_OFPBAC_BAD_OUT_GROUP; + } + } + return 0; } @@ -2781,7 +2913,6 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) goto exit_free_ofpacts; } - /* Get payload. */ if (po.buffer_id != UINT32_MAX) { error = ofconn_pktbuf_retrieve(ofconn, po.buffer_id, &payload, NULL); @@ -2796,7 +2927,7 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) /* Verify actions against packet, then send packet if successful. */ in_port_.ofp_port = po.in_port; flow_extract(payload, 0, 0, NULL, &in_port_, &flow); - error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len, &flow, 0); + error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len); if (!error) { error = p->ofproto_class->packet_out(p, payload, &flow, po.ofpacts, po.ofpacts_len); @@ -2916,15 +3047,15 @@ handle_table_stats_request(struct ofconn *ofconn, ots = xcalloc(p->n_tables, sizeof *ots); for (i = 0; i < p->n_tables; i++) { ots[i].table_id = i; - sprintf(ots[i].name, "table%zu", i); + sprintf(ots[i].name, "table%"PRIuSIZE, i); ots[i].match = htonll(OFPXMT13_MASK); ots[i].wildcards = htonll(OFPXMT13_MASK); ots[i].write_actions = htonl(OFPAT11_OUTPUT); ots[i].apply_actions = htonl(OFPAT11_OUTPUT); ots[i].write_setfields = htonll(OFPXMT13_MASK); ots[i].apply_setfields = htonll(OFPXMT13_MASK); - ots[i].metadata_match = htonll(UINT64_MAX); - ots[i].metadata_write = htonll(UINT64_MAX); + ots[i].metadata_match = OVS_BE64_MAX; + ots[i].metadata_write = OVS_BE64_MAX; ots[i].instructions = htonl(OFPIT11_ALL); ots[i].config = htonl(OFPTC11_TABLE_MISS_MASK); ots[i].max_entries = htonl(1000000); /* An arbitrary big number. */ @@ -3232,7 +3363,13 @@ collect_rule(struct rule *rule, const struct rule_criteria *c, struct rule_collection *rules) OVS_REQUIRES(ofproto_mutex) { - if (ofproto_rule_is_hidden(rule)) { + /* We ordinarily want to skip hidden rules, but there has to be a way for + * code internal to OVS to modify and delete them, so if the criteria + * specify a priority that can only be for a hidden flow, then allow hidden + * rules to be selected. (This doesn't allow OpenFlow clients to meddle + * with hidden flows because OpenFlow uses only a 16-bit field to specify + * priority.) */ + if (ofproto_rule_is_hidden(rule) && c->cr.priority <= UINT16_MAX) { return 0; } else if (rule->pending) { return OFPROTO_POSTPONE; @@ -3271,7 +3408,7 @@ collect_rules_loose(struct ofproto *ofproto, goto exit; } - if (criteria->cookie_mask == htonll(UINT64_MAX)) { + if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, @@ -3332,7 +3469,7 @@ collect_rules_strict(struct ofproto *ofproto, goto exit; } - if (criteria->cookie_mask == htonll(UINT64_MAX)) { + if (criteria->cookie_mask == OVS_BE64_MAX) { struct rule *rule; HINDEX_FOR_EACH_WITH_HASH (rule, cookie_node, @@ -3476,12 +3613,12 @@ flow_stats_ds(struct rule *rule, struct ds *results) ds_put_format(results, "table_id=%"PRIu8", ", rule->table_id); } ds_put_format(results, "duration=%llds, ", (time_msec() - created) / 1000); - ds_put_format(results, "priority=%u, ", rule->cr.priority); ds_put_format(results, "n_packets=%"PRIu64", ", packet_count); ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count); cls_rule_format(&rule->cr, results); ds_put_char(results, ','); + ds_put_cstr(results, "actions="); ofpacts_format(actions->ofpacts, actions->ofpacts_len, results); ds_put_cstr(results, "\n"); @@ -3838,14 +3975,6 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, } } - /* Verify actions. */ - error = ofproto_check_ofpacts(ofproto, fm->ofpacts, fm->ofpacts_len, - &fm->match.flow, table_id); - if (error) { - cls_rule_destroy(&cr); - return error; - } - /* Serialize against pending deletion. */ if (is_flow_deletion_pending(ofproto, &cr, table_id)) { cls_rule_destroy(&cr); @@ -3898,7 +4027,7 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, *CONST_CAST(uint8_t *, &rule->table_id) = table - ofproto->tables; rule->flags = fm->flags & OFPUTIL_FF_STATE; - rule->actions = rule_actions_create(fm->ofpacts, fm->ofpacts_len); + rule->actions = rule_actions_create(ofproto, fm->ofpacts, fm->ofpacts_len); list_init(&rule->meter_list_node); rule->eviction_group = NULL; list_init(&rule->expirable); @@ -3962,20 +4091,13 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn, continue; } - /* Verify actions. */ - error = ofpacts_check(fm->ofpacts, fm->ofpacts_len, &fm->match.flow, - u16_to_ofp(ofproto->max_ports), rule->table_id); - if (error) { - return error; - } - actions_changed = !ofpacts_equal(fm->ofpacts, fm->ofpacts_len, rule->actions->ofpacts, rule->actions->ofpacts_len); op = ofoperation_create(group, rule, type, 0); - if (fm->modify_cookie && fm->new_cookie != htonll(UINT64_MAX)) { + if (fm->modify_cookie && fm->new_cookie != OVS_BE64_MAX) { ofproto_rule_change_cookie(ofproto, rule, fm->new_cookie); } if (type == OFOPERATION_REPLACE) { @@ -3999,7 +4121,8 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn, struct rule_actions *new_actions; op->actions = rule->actions; - new_actions = rule_actions_create(fm->ofpacts, fm->ofpacts_len); + new_actions = rule_actions_create(ofproto, + fm->ofpacts, fm->ofpacts_len); ovs_mutex_lock(&rule->mutex); rule->actions = new_actions; @@ -4021,7 +4144,7 @@ modify_flows_add(struct ofproto *ofproto, struct ofconn *ofconn, struct ofputil_flow_mod *fm, const struct ofp_header *request) OVS_REQUIRES(ofproto_mutex) { - if (fm->cookie_mask != htonll(0) || fm->new_cookie == htonll(UINT64_MAX)) { + if (fm->cookie_mask != htonll(0) || fm->new_cookie == OVS_BE64_MAX) { return 0; } return add_flow(ofproto, ofconn, fm, request); @@ -4225,8 +4348,7 @@ ofproto_rule_expire(struct rule *rule, uint8_t reason) ovs_assert(reason == OFPRR_HARD_TIMEOUT || reason == OFPRR_IDLE_TIMEOUT || reason == OFPRR_DELETE || reason == OFPRR_GROUP_DELETE); - ofproto_rule_send_removed(rule, reason); - ofproto_rule_delete__(ofproto, rule); + ofproto_rule_delete__(ofproto, rule, reason); } /* Reduces '*timeout' to no more than 'max'. A value of zero in either case @@ -4283,7 +4405,12 @@ handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_protocol(ofconn), - &ofpacts); + &ofpacts, + u16_to_ofp(ofproto->max_ports), + ofproto->n_tables); + if (!error) { + error = ofproto_check_ofpacts(ofproto, fm.ofpacts, fm.ofpacts_len); + } if (!error) { error = handle_flow_mod__(ofproto, ofconn, &fm, oh); } @@ -4788,12 +4915,12 @@ handle_flow_monitor_request(struct ofconn *ofconn, const struct ofp_header *oh) return 0; error: - ovs_mutex_unlock(&ofproto_mutex); - for (i = 0; i < n_monitors; i++) { ofmonitor_destroy(monitors[i]); } free(monitors); + ovs_mutex_unlock(&ofproto_mutex); + return error; } @@ -4842,13 +4969,10 @@ struct meter { /* * This is used in instruction validation at flow set-up time, * as flows may not use non-existing meters. - * This is also used by ofproto-providers to translate OpenFlow meter_ids - * in METER instructions to the corresponding provider meter IDs. * Return value of UINT32_MAX signifies an invalid meter. */ -uint32_t -ofproto_get_provider_meter_id(const struct ofproto * ofproto, - uint32_t of_meter_id) +static uint32_t +get_provider_meter_id(const struct ofproto *ofproto, uint32_t of_meter_id) { if (of_meter_id && of_meter_id <= ofproto->meter_features.max_meters) { const struct meter *meter = ofproto->meters[of_meter_id]; @@ -4928,15 +5052,17 @@ handle_modify_meter(struct ofproto *ofproto, struct ofputil_meter_mod *mm) { struct meter *meter = ofproto->meters[mm->meter.meter_id]; enum ofperr error; + uint32_t provider_meter_id; if (!meter) { return OFPERR_OFPMMFC_UNKNOWN_METER; } + provider_meter_id = meter->provider_meter_id.uint32; error = ofproto->ofproto_class->meter_set(ofproto, &meter->provider_meter_id, &mm->meter); - ovs_assert(meter->provider_meter_id.uint32 != UINT32_MAX); + ovs_assert(meter->provider_meter_id.uint32 == provider_meter_id); if (!error) { meter_update(meter, &mm->meter); } @@ -5188,7 +5314,7 @@ ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id, } static bool -ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) +ofproto_group_exists__(const struct ofproto *ofproto, uint32_t group_id) OVS_REQ_RDLOCK(ofproto->groups_rwlock) { struct ofgroup *grp; @@ -5202,6 +5328,44 @@ ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) return false; } +static bool +ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) + OVS_EXCLUDED(ofproto->groups_rwlock) +{ + bool exists; + + ovs_rwlock_rdlock(&ofproto->groups_rwlock); + exists = ofproto_group_exists__(ofproto, group_id); + ovs_rwlock_unlock(&ofproto->groups_rwlock); + + return exists; +} + +static uint32_t +group_get_ref_count(struct ofgroup *group) + OVS_EXCLUDED(ofproto_mutex) +{ + struct ofproto *ofproto = group->ofproto; + struct rule_criteria criteria; + struct rule_collection rules; + struct match match; + enum ofperr error; + uint32_t count; + + match_init_catchall(&match); + rule_criteria_init(&criteria, 0xff, &match, 0, htonll(0), htonll(0), + OFPP_ANY, group->group_id); + ovs_mutex_lock(&ofproto_mutex); + error = collect_rules_loose(ofproto, &criteria, &rules); + ovs_mutex_unlock(&ofproto_mutex); + rule_criteria_destroy(&criteria); + + count = !error && rules.n < UINT32_MAX ? rules.n : UINT32_MAX; + + rule_collection_destroy(&rules); + return count; +} + static void append_group_stats(struct ofgroup *group, struct list *replies) OVS_REQ_RDLOCK(group->rwlock) @@ -5213,14 +5377,16 @@ append_group_stats(struct ofgroup *group, struct list *replies) ogs.bucket_stats = xmalloc(group->n_buckets * sizeof *ogs.bucket_stats); + /* Provider sets the packet and byte counts, we do the rest. */ + ogs.ref_count = group_get_ref_count(group); + ogs.n_buckets = group->n_buckets; + error = (ofproto->ofproto_class->group_get_stats ? ofproto->ofproto_class->group_get_stats(group, &ogs) : EOPNOTSUPP); if (error) { - ogs.ref_count = UINT32_MAX; ogs.packet_count = UINT64_MAX; ogs.byte_count = UINT64_MAX; - ogs.n_buckets = group->n_buckets; memset(ogs.bucket_stats, 0xff, ogs.n_buckets * sizeof *ogs.bucket_stats); } @@ -5309,6 +5475,49 @@ handle_group_features_stats_request(struct ofconn *ofconn, return 0; } +static enum ofperr +handle_queue_get_config_request(struct ofconn *ofconn, + const struct ofp_header *oh) +{ + struct ofproto *p = ofconn_get_ofproto(ofconn); + struct netdev_queue_dump queue_dump; + struct ofport *ofport; + unsigned int queue_id; + struct ofpbuf *reply; + struct smap details; + ofp_port_t request; + enum ofperr error; + + error = ofputil_decode_queue_get_config_request(oh, &request); + if (error) { + return error; + } + + ofport = ofproto_get_port(p, request); + if (!ofport) { + return OFPERR_OFPQOFC_BAD_PORT; + } + + reply = ofputil_encode_queue_get_config_reply(oh); + + smap_init(&details); + NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &queue_dump, ofport->netdev) { + struct ofputil_queue_config queue; + + /* None of the existing queues have compatible properties, so we + * hard-code omitting min_rate and max_rate. */ + queue.queue_id = queue_id; + queue.min_rate = UINT16_MAX; + queue.max_rate = UINT16_MAX; + ofputil_append_queue_get_config_reply(reply, &queue); + } + smap_destroy(&details); + + ofconn_send_reply(ofconn, reply); + + return 0; +} + /* Implements OFPGC11_ADD * in which no matching flow already exists in the flow table. * @@ -5366,7 +5575,7 @@ add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm) goto unlock_out; } - if (ofproto_group_exists(ofproto, gm->group_id)) { + if (ofproto_group_exists__(ofproto, gm->group_id)) { error = OFPERR_OFPGMFC_GROUP_EXISTS; goto unlock_out; } @@ -5461,6 +5670,15 @@ static void delete_group__(struct ofproto *ofproto, struct ofgroup *ofgroup) OVS_RELEASES(ofproto->groups_rwlock) { + struct match match; + struct ofputil_flow_mod fm; + + /* Delete all flow entries containing this group in a group action */ + match_init_catchall(&match); + flow_mod_init(&fm, &match, 0, NULL, 0, OFPFC_DELETE); + fm.out_group = ofgroup->group_id; + handle_flow_mod__(ofproto, NULL, &fm, NULL); + /* Must wait until existing readers are done, * while holding the container's write lock at the same time. */ ovs_rwlock_wrlock(&ofgroup->rwlock); @@ -5576,6 +5794,13 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) if (error) { return error; } + if (oh->version >= OFP13_VERSION && ofpmsg_is_stat_request(oh) + && ofpmp_more(oh)) { + /* We have no buffer implementation for multipart requests. + * Report overflow for requests which consists of multiple + * messages. */ + return OFPERR_OFPBRC_MULTIPART_BUFFER_OVERFLOW; + } switch (type) { /* OpenFlow requests. */ @@ -5686,10 +5911,8 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPTYPE_GROUP_FEATURES_STATS_REQUEST: return handle_group_features_stats_request(ofconn, oh); - /* FIXME: Change the following once they are implemented: */ case OFPTYPE_QUEUE_GET_CONFIG_REQUEST: - case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: - return OFPERR_OFPBRC_BAD_TYPE; + return handle_queue_get_config_request(ofconn, oh); case OFPTYPE_HELLO: case OFPTYPE_ERROR: @@ -5718,9 +5941,15 @@ handle_openflow__(struct ofconn *ofconn, const struct ofpbuf *msg) case OFPTYPE_METER_STATS_REPLY: case OFPTYPE_METER_CONFIG_STATS_REPLY: case OFPTYPE_METER_FEATURES_STATS_REPLY: + case OFPTYPE_TABLE_FEATURES_STATS_REQUEST: case OFPTYPE_TABLE_FEATURES_STATS_REPLY: + case OFPTYPE_ROLE_STATUS: default: - return OFPERR_OFPBRC_BAD_TYPE; + if (ofpmsg_is_stat_request(oh)) { + return OFPERR_OFPBRC_BAD_STAT; + } else { + return OFPERR_OFPBRC_BAD_TYPE; + } } } @@ -6381,7 +6610,7 @@ static void oftable_init(struct oftable *table) { memset(table, 0, sizeof *table); - classifier_init(&table->cls); + classifier_init(&table->cls, flow_segment_u32s); table->max_flows = UINT_MAX; } @@ -6533,8 +6762,10 @@ oftable_insert_rule(struct rule *rule) cookies_insert(ofproto, rule); - if (rule->actions->meter_id) { - struct meter *meter = ofproto->meters[rule->actions->meter_id]; + if (rule->actions->provider_meter_id != UINT32_MAX) { + uint32_t meter_id = ofpacts_get_meter(rule->actions->ofpacts, + rule->actions->ofpacts_len); + struct meter *meter = ofproto->meters[meter_id]; list_insert(&meter->rules, &rule->meter_list_node); } ovs_rwlock_wrlock(&table->cls.rwlock); @@ -6606,10 +6837,10 @@ ofproto_get_vlan_usage(struct ofproto *ofproto, unsigned long int *vlan_bitmap) ofproto->vlans_changed = false; OFPROTO_FOR_EACH_TABLE (oftable, ofproto) { - const struct cls_table *table; + const struct cls_subtable *table; ovs_rwlock_rdlock(&oftable->cls.rwlock); - HMAP_FOR_EACH (table, hmap_node, &oftable->cls.tables) { + HMAP_FOR_EACH (table, hmap_node, &oftable->cls.subtables) { if (minimask_get_vid_mask(&table->mask) == VLAN_VID_MASK) { const struct cls_rule *rule;