X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto.c;h=676a6cbe2247a908f911b7af7f58b756dd0ffe7d;hb=4b0424809b823101c969a0691fc1db0c880ae64a;hp=6ea751028ac2ad6cc6521cc0d22239fbfc7c3977;hpb=732feb9311b72d96ee7ec341f82586804b64fcb8;p=sliver-openvswitch.git diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c index 6ea751028..676a6cbe2 100644 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@ -25,6 +25,7 @@ #include "bitmap.h" #include "byte-order.h" #include "classifier.h" +#include "connectivity.h" #include "connmgr.h" #include "coverage.h" #include "dynamic-string.h" @@ -47,6 +48,7 @@ #include "pktbuf.h" #include "poll-loop.h" #include "random.h" +#include "seq.h" #include "shash.h" #include "simap.h" #include "smap.h" @@ -271,9 +273,12 @@ static enum ofperr modify_flows__(struct ofproto *, struct ofconn *, static void delete_flow__(struct rule *rule, struct ofopgroup *, enum ofp_flow_removed_reason) OVS_REQUIRES(ofproto_mutex); +static bool ofproto_group_exists__(const struct ofproto *ofproto, + uint32_t group_id) + OVS_REQ_RDLOCK(ofproto->groups_rwlock); static bool ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) - OVS_REQ_RDLOCK(ofproto->groups_rwlock); + OVS_EXCLUDED(ofproto->groups_rwlock); static enum ofperr add_group(struct ofproto *, struct ofputil_group_mod *); static bool handle_openflow(struct ofconn *, const struct ofpbuf *); static enum ofperr handle_flow_mod__(struct ofproto *, struct ofconn *, @@ -301,10 +306,11 @@ static size_t allocated_ofproto_classes; /* Global lock that protects all flow table operations. */ struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER; -unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT; -unsigned n_handler_threads; +unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT; enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO; +size_t n_handlers, n_revalidators; + /* Map from datapath name to struct ofproto, for use by unixctl commands. */ static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos); @@ -429,6 +435,7 @@ ofproto_enumerate_types(struct sset *types) { size_t i; + sset_clear(types); for (i = 0; i < n_ofproto_classes; i++) { ofproto_classes[i]->enumerate_types(types); } @@ -528,6 +535,30 @@ ofproto_create(const char *datapath_name, const char *datapath_type, ovs_rwlock_init(&ofproto->groups_rwlock); hmap_init(&ofproto->groups); ovs_mutex_unlock(&ofproto_mutex); + ofproto->ogf.capabilities = OFPGFC_CHAINING | OFPGFC_SELECT_LIVENESS | + OFPGFC_SELECT_WEIGHT; + ofproto->ogf.max_groups[OFPGT11_ALL] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_SELECT] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_INDIRECT] = OFPG_MAX; + ofproto->ogf.max_groups[OFPGT11_FF] = OFPG_MAX; + ofproto->ogf.actions[0] = + (1 << OFPAT11_OUTPUT) | + (1 << OFPAT11_COPY_TTL_OUT) | + (1 << OFPAT11_COPY_TTL_IN) | + (1 << OFPAT11_SET_MPLS_TTL) | + (1 << OFPAT11_DEC_MPLS_TTL) | + (1 << OFPAT11_PUSH_VLAN) | + (1 << OFPAT11_POP_VLAN) | + (1 << OFPAT11_PUSH_MPLS) | + (1 << OFPAT11_POP_MPLS) | + (1 << OFPAT11_SET_QUEUE) | + (1 << OFPAT11_GROUP) | + (1 << OFPAT11_SET_NW_TTL) | + (1 << OFPAT11_DEC_NW_TTL) | + (1 << OFPAT12_SET_FIELD); +/* not supported: + * (1 << OFPAT13_PUSH_PBB) | + * (1 << OFPAT13_POP_PBB) */ error = ofproto->ofproto_class->construct(ofproto); if (error) { @@ -662,10 +693,9 @@ ofproto_set_in_band_queue(struct ofproto *ofproto, int queue_id) /* Sets the number of flows at which eviction from the kernel flow table * will occur. */ void -ofproto_set_flow_eviction_threshold(unsigned threshold) +ofproto_set_flow_limit(unsigned limit) { - flow_eviction_threshold = MAX(OFPROTO_FLOW_EVICTION_THRESHOLD_MIN, - threshold); + ofproto_flow_limit = limit; } /* Sets the path for handling flow misses. */ @@ -703,16 +733,22 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time, } } -/* Sets number of upcall handler threads. The default is - * (number of online cores - 2). */ void -ofproto_set_n_handler_threads(unsigned limit) +ofproto_set_threads(size_t n_handlers_, size_t n_revalidators_) { - if (limit) { - n_handler_threads = limit; - } else { - int n_proc = sysconf(_SC_NPROCESSORS_ONLN); - n_handler_threads = n_proc > 2 ? n_proc - 2 : 1; + int threads = MAX(count_cpu_cores(), 2); + + n_revalidators = n_revalidators_; + n_handlers = n_handlers_; + + if (!n_revalidators) { + n_revalidators = n_handlers + ? MAX(threads - (int) n_handlers, 1) + : threads / 4 + 1; + } + + if (!n_handlers) { + n_handlers = MAX(threads - (int) n_revalidators, 1); } } @@ -857,6 +893,27 @@ ofproto_port_get_stp_status(struct ofproto *ofproto, ofp_port_t ofp_port, ? ofproto->ofproto_class->get_stp_port_status(ofport, s) : EOPNOTSUPP); } + +/* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in + * 's'. If the 'enabled' member in 's' is false, then the other members + * are not meaningful. + * + * Returns 0 if successful, otherwise a positive errno value.*/ +int +ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port, + struct ofproto_port_stp_stats *s) +{ + struct ofport *ofport = ofproto_get_port(ofproto, ofp_port); + if (!ofport) { + VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent " + "port %"PRIu16, ofproto->name, ofp_port); + return ENODEV; + } + + return (ofproto->ofproto_class->get_stp_port_stats + ? ofproto->ofproto_class->get_stp_port_stats(ofport, s) + : EOPNOTSUPP); +} /* Queue DSCP configuration. */ @@ -1111,7 +1168,7 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, } table->max_flows = s->max_flows; - ovs_rwlock_rdlock(&table->cls.rwlock); + ovs_rwlock_wrlock(&table->cls.rwlock); if (classifier_count(&table->cls) > table->max_flows && table->eviction_fields) { /* 'table' contains more flows than allowed. We might not be able to @@ -1127,6 +1184,10 @@ ofproto_configure_table(struct ofproto *ofproto, int table_id, break; } } + + classifier_set_prefix_fields(&table->cls, + s->prefix_fields, s->n_prefix_fields); + ovs_rwlock_unlock(&table->cls.rwlock); } @@ -1333,23 +1394,6 @@ ofproto_type_run(const char *datapath_type) return error; } -int -ofproto_type_run_fast(const char *datapath_type) -{ - const struct ofproto_class *class; - int error; - - datapath_type = ofproto_normalize_type(datapath_type); - class = ofproto_class_find__(datapath_type); - - error = class->type_run_fast ? class->type_run_fast(datapath_type) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: type_run_fast failed (%s)", - datapath_type, ovs_strerror(error)); - } - return error; -} - void ofproto_type_wait(const char *datapath_type) { @@ -1379,10 +1423,8 @@ any_pending_ops(const struct ofproto *p) int ofproto_run(struct ofproto *p) { - struct sset changed_netdevs; - const char *changed_netdev; - struct ofport *ofport; int error; + uint64_t new_seq; error = p->ofproto_class->run(p); if (error && error != EAGAIN) { @@ -1433,24 +1475,29 @@ ofproto_run(struct ofproto *p) } } - /* Update OpenFlow port status for any port whose netdev has changed. - * - * Refreshing a given 'ofport' can cause an arbitrary ofport to be - * destroyed, so it's not safe to update ports directly from the - * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we - * need this two-phase approach. */ - sset_init(&changed_netdevs); - HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { - unsigned int change_seq = netdev_change_seq(ofport->netdev); - if (ofport->change_seq != change_seq) { - ofport->change_seq = change_seq; - sset_add(&changed_netdevs, netdev_get_name(ofport->netdev)); + new_seq = seq_read(connectivity_seq_get()); + if (new_seq != p->change_seq) { + struct sset devnames; + const char *devname; + struct ofport *ofport; + + /* Update OpenFlow port status for any port whose netdev has changed. + * + * Refreshing a given 'ofport' can cause an arbitrary ofport to be + * destroyed, so it's not safe to update ports directly from the + * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we + * need this two-phase approach. */ + sset_init(&devnames); + HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { + sset_add(&devnames, netdev_get_name(ofport->netdev)); } + SSET_FOR_EACH (devname, &devnames) { + update_port(p, devname); + } + sset_destroy(&devnames); + + p->change_seq = new_seq; } - SSET_FOR_EACH (changed_netdev, &changed_netdevs) { - update_port(p, changed_netdev); - } - sset_destroy(&changed_netdevs); switch (p->state) { case S_OPENFLOW: @@ -1475,7 +1522,7 @@ ofproto_run(struct ofproto *p) break; default: - NOT_REACHED(); + OVS_NOT_REACHED(); } if (time_msec() >= p->next_op_report) { @@ -1518,40 +1565,14 @@ ofproto_run(struct ofproto *p) return error; } -/* Performs periodic activity required by 'ofproto' that needs to be done - * with the least possible latency. - * - * It makes sense to call this function a couple of times per poll loop, to - * provide a significant performance boost on some benchmarks with the - * ofproto-dpif implementation. */ -int -ofproto_run_fast(struct ofproto *p) -{ - int error; - - error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0; - if (error && error != EAGAIN) { - VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)", - p->name, ovs_strerror(error)); - } - return error; -} - void ofproto_wait(struct ofproto *p) { - struct ofport *ofport; - p->ofproto_class->wait(p); if (p->ofproto_class->port_poll_wait) { p->ofproto_class->port_poll_wait(p); } - - HMAP_FOR_EACH (ofport, hmap_node, &p->ports) { - if (ofport->change_seq != netdev_change_seq(ofport->netdev)) { - poll_immediate_wake(); - } - } + seq_wait(connectivity_seq_get(), p->change_seq); switch (p->state) { case S_OPENFLOW: @@ -1604,6 +1625,19 @@ ofproto_get_memory_usage(const struct ofproto *ofproto, struct simap *usage) connmgr_get_memory_usage(ofproto->connmgr, usage); } +void +ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage) +{ + const struct ofproto_class *class; + + datapath_type = ofproto_normalize_type(datapath_type); + class = ofproto_class_find__(datapath_type); + + if (class && class->type_get_memory_usage) { + class->type_get_memory_usage(datapath_type, usage); + } +} + void ofproto_get_ofproto_controller_info(const struct ofproto *ofproto, struct shash *info) @@ -1745,12 +1779,18 @@ ofproto_port_add(struct ofproto *ofproto, struct netdev *netdev, update_port(ofproto, netdev_name); } if (ofp_portp) { - struct ofproto_port ofproto_port; - - ofproto_port_query_by_name(ofproto, netdev_get_name(netdev), - &ofproto_port); - *ofp_portp = error ? OFPP_NONE : ofproto_port.ofp_port; - ofproto_port_destroy(&ofproto_port); + *ofp_portp = OFPP_NONE; + if (!error) { + struct ofproto_port ofproto_port; + + error = ofproto_port_query_by_name(ofproto, + netdev_get_name(netdev), + &ofproto_port); + if (!error) { + *ofp_portp = ofproto_port.ofp_port; + ofproto_port_destroy(&ofproto_port); + } + } } return error; } @@ -1977,9 +2017,13 @@ alloc_ofp_port(struct ofproto *ofproto, const char *netdev_name) /* Search for a free OpenFlow port number. We try not to * immediately reuse them to prevent problems due to old - * flows. */ + * flows. + * + * We limit the automatically assigned port numbers to the lower half + * of the port range, to reserve the upper half for assignment by + * controllers. */ for (;;) { - if (++ofproto->alloc_port_no >= ofproto->max_ports) { + if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) { ofproto->alloc_port_no = 1; } last_used_at = ofport_get_usage(ofproto, @@ -2101,7 +2145,6 @@ ofport_install(struct ofproto *p, } ofport->ofproto = p; ofport->netdev = netdev; - ofport->change_seq = netdev_change_seq(netdev); ofport->pp = *pp; ofport->ofp_port = pp->port_no; ofport->created = time_msec(); @@ -2336,7 +2379,6 @@ update_port(struct ofproto *ofproto, const char *name) * Don't close the old netdev yet in case port_modified has to * remove a retained reference to it.*/ port->netdev = netdev; - port->change_seq = netdev_change_seq(netdev); if (port->ofproto->ofproto_class->port_modified) { port->ofproto->ofproto_class->port_modified(port); @@ -2633,7 +2675,7 @@ ofoperation_has_out_port(const struct ofoperation *op, ofp_port_t out_port) op->actions->ofpacts_len, out_port); } - NOT_REACHED(); + OVS_NOT_REACHED(); } static void @@ -2829,46 +2871,33 @@ reject_slave_controller(struct ofconn *ofconn) } } -/* Checks that the 'ofpacts_len' bytes of actions in 'ofpacts' are appropriate - * for a packet with the prerequisites satisfied by 'flow' in table 'table_id'. - * 'flow' may be temporarily modified, but is restored at return. - */ +/* Checks that the 'ofpacts_len' bytes of action in 'ofpacts' are appropriate + * for 'ofproto': + * + * - If they use a meter, then 'ofproto' has that meter configured. + * + * - If they use any groups, then 'ofproto' has that group configured. + * + * Returns 0 if successful, otherwise an OpenFlow error. */ static enum ofperr ofproto_check_ofpacts(struct ofproto *ofproto, - const struct ofpact ofpacts[], size_t ofpacts_len, - struct flow *flow, uint8_t table_id, - const struct ofp_header *oh) + const struct ofpact ofpacts[], size_t ofpacts_len) { - enum ofperr error; const struct ofpact *a; uint32_t mid; - error = ofpacts_check(ofpacts, ofpacts_len, flow, - u16_to_ofp(ofproto->max_ports), table_id, - oh && oh->version > OFP10_VERSION); - if (error) { - return error; + mid = ofpacts_get_meter(ofpacts, ofpacts_len); + if (mid && get_provider_meter_id(ofproto, mid) == UINT32_MAX) { + return OFPERR_OFPMMFC_INVALID_METER; } OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) { - if (a->type == OFPACT_GROUP) { - bool exists; - - ovs_rwlock_rdlock(&ofproto->groups_rwlock); - exists = ofproto_group_exists(ofproto, - ofpact_get_GROUP(a)->group_id); - ovs_rwlock_unlock(&ofproto->groups_rwlock); - - if (!exists) { - return OFPERR_OFPBAC_BAD_OUT_GROUP; - } + if (a->type == OFPACT_GROUP + && !ofproto_group_exists(ofproto, ofpact_get_GROUP(a)->group_id)) { + return OFPERR_OFPBAC_BAD_OUT_GROUP; } } - mid = ofpacts_get_meter(ofpacts, ofpacts_len); - if (mid && get_provider_meter_id(ofproto, mid) == UINT32_MAX) { - return OFPERR_OFPMMFC_INVALID_METER; - } return 0; } @@ -2903,7 +2932,6 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) goto exit_free_ofpacts; } - /* Get payload. */ if (po.buffer_id != UINT32_MAX) { error = ofconn_pktbuf_retrieve(ofconn, po.buffer_id, &payload, NULL); @@ -2918,7 +2946,7 @@ handle_packet_out(struct ofconn *ofconn, const struct ofp_header *oh) /* Verify actions against packet, then send packet if successful. */ in_port_.ofp_port = po.in_port; flow_extract(payload, 0, 0, NULL, &in_port_, &flow); - error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len, &flow, 0, oh); + error = ofproto_check_ofpacts(p, po.ofpacts, po.ofpacts_len); if (!error) { error = p->ofproto_class->packet_out(p, payload, &flow, po.ofpacts, po.ofpacts_len); @@ -3038,7 +3066,7 @@ handle_table_stats_request(struct ofconn *ofconn, ots = xcalloc(p->n_tables, sizeof *ots); for (i = 0; i < p->n_tables; i++) { ots[i].table_id = i; - sprintf(ots[i].name, "table%zu", i); + sprintf(ots[i].name, "table%"PRIuSIZE, i); ots[i].match = htonll(OFPXMT13_MASK); ots[i].wildcards = htonll(OFPXMT13_MASK); ots[i].write_actions = htonl(OFPAT11_OUTPUT); @@ -3199,14 +3227,11 @@ calc_duration(long long int start, long long int now, } /* Checks whether 'table_id' is 0xff or a valid table ID in 'ofproto'. Returns - * 0 if 'table_id' is OK, otherwise an OpenFlow error code. */ -static enum ofperr + * true if 'table_id' is OK, false otherwise. */ +static bool check_table_id(const struct ofproto *ofproto, uint8_t table_id) { - return (table_id == 0xff || table_id < ofproto->n_tables - ? 0 - : OFPERR_OFPBRC_BAD_TABLE_ID); - + return table_id == OFPTT_ALL || table_id < ofproto->n_tables; } static struct oftable * @@ -3390,12 +3415,12 @@ collect_rules_loose(struct ofproto *ofproto, OVS_REQUIRES(ofproto_mutex) { struct oftable *table; - enum ofperr error; + enum ofperr error = 0; rule_collection_init(rules); - error = check_table_id(ofproto, criteria->table_id); - if (error) { + if (!check_table_id(ofproto, criteria->table_id)) { + error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } @@ -3451,12 +3476,12 @@ collect_rules_strict(struct ofproto *ofproto, OVS_REQUIRES(ofproto_mutex) { struct oftable *table; - int error; + int error = 0; rule_collection_init(rules); - error = check_table_id(ofproto, criteria->table_id); - if (error) { + if (!check_table_id(ofproto, criteria->table_id)) { + error = OFPERR_OFPBRC_BAD_TABLE_ID; goto exit; } @@ -3909,10 +3934,10 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, struct cls_rule cr; struct rule *rule; uint8_t table_id; - int error; + int error = 0; - error = check_table_id(ofproto, fm->table_id); - if (error) { + if (!check_table_id(ofproto, fm->table_id)) { + error = OFPERR_OFPBRC_BAD_TABLE_ID; return error; } @@ -3966,14 +3991,6 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn, } } - /* Verify actions. */ - error = ofproto_check_ofpacts(ofproto, fm->ofpacts, fm->ofpacts_len, - &fm->match.flow, table_id, request); - if (error) { - cls_rule_destroy(&cr); - return error; - } - /* Serialize against pending deletion. */ if (is_flow_deletion_pending(ofproto, &cr, table_id)) { cls_rule_destroy(&cr); @@ -4072,19 +4089,6 @@ modify_flows__(struct ofproto *ofproto, struct ofconn *ofconn, enum ofperr error; size_t i; - /* Verify actions before we start to modify any rules, to avoid partial - * flow table modifications. */ - for (i = 0; i < rules->n; i++) { - struct rule *rule = rules->rules[i]; - - error = ofproto_check_ofpacts(ofproto, fm->ofpacts, fm->ofpacts_len, - &fm->match.flow, rule->table_id, - request); - if (error) { - return error; - } - } - type = fm->command == OFPFC_ADD ? OFOPERATION_REPLACE : OFOPERATION_MODIFY; group = ofopgroup_create(ofproto, ofconn, request, fm->buffer_id); error = OFPERR_OFPBRC_EPERM; @@ -4417,7 +4421,12 @@ handle_flow_mod(struct ofconn *ofconn, const struct ofp_header *oh) ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub); error = ofputil_decode_flow_mod(&fm, oh, ofconn_get_protocol(ofconn), - &ofpacts); + &ofpacts, + u16_to_ofp(ofproto->max_ports), + ofproto->n_tables); + if (!error) { + error = ofproto_check_ofpacts(ofproto, fm.ofpacts, fm.ofpacts_len); + } if (!error) { error = handle_flow_mod__(ofproto, ofconn, &fm, oh); } @@ -4719,7 +4728,7 @@ ofproto_compose_flow_refresh_update(const struct rule *rule, * actions, so that when the operation commits we report the change. */ switch (op->type) { case OFOPERATION_ADD: - NOT_REACHED(); + OVS_NOT_REACHED(); case OFOPERATION_MODIFY: case OFOPERATION_REPLACE: @@ -4731,7 +4740,7 @@ ofproto_compose_flow_refresh_update(const struct rule *rule, break; default: - NOT_REACHED(); + OVS_NOT_REACHED(); } } fu.ofpacts = actions ? actions->ofpacts : NULL; @@ -5321,7 +5330,7 @@ ofproto_group_write_lookup(const struct ofproto *ofproto, uint32_t group_id, } static bool -ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) +ofproto_group_exists__(const struct ofproto *ofproto, uint32_t group_id) OVS_REQ_RDLOCK(ofproto->groups_rwlock) { struct ofgroup *grp; @@ -5335,6 +5344,19 @@ ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) return false; } +static bool +ofproto_group_exists(const struct ofproto *ofproto, uint32_t group_id) + OVS_EXCLUDED(ofproto->groups_rwlock) +{ + bool exists; + + ovs_rwlock_rdlock(&ofproto->groups_rwlock); + exists = ofproto_group_exists__(ofproto, group_id); + ovs_rwlock_unlock(&ofproto->groups_rwlock); + + return exists; +} + static uint32_t group_get_ref_count(struct ofgroup *group) OVS_EXCLUDED(ofproto_mutex) @@ -5569,7 +5591,7 @@ add_group(struct ofproto *ofproto, struct ofputil_group_mod *gm) goto unlock_out; } - if (ofproto_group_exists(ofproto, gm->group_id)) { + if (ofproto_group_exists__(ofproto, gm->group_id)) { error = OFPERR_OFPGMFC_GROUP_EXISTS; goto unlock_out; } @@ -5756,9 +5778,32 @@ handle_group_mod(struct ofconn *ofconn, const struct ofp_header *oh) } } +static enum ofperr +table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm) +{ + /* XXX Reject all configurations because none are currently supported */ + return OFPERR_OFPTMFC_BAD_CONFIG; + + if (tm->table_id == OFPTT_ALL) { + int i; + for (i = 0; i < ofproto->n_tables; i++) { + atomic_store(&ofproto->tables[i].config, + (unsigned int)tm->config); + } + } else if (!check_table_id(ofproto, tm->table_id)) { + return OFPERR_OFPTMFC_BAD_TABLE; + } else { + atomic_store(&ofproto->tables[tm->table_id].config, + (unsigned int)tm->config); + } + + return 0; +} + static enum ofperr handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) { + struct ofproto *ofproto = ofconn_get_ofproto(ofconn); struct ofputil_table_mod tm; enum ofperr error; @@ -5772,8 +5817,7 @@ handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh) return error; } - /* XXX Actual table mod support is not implemented yet. */ - return 0; + return table_mod(ofproto, &tm); } static enum ofperr @@ -6123,7 +6167,7 @@ ofopgroup_complete(struct ofopgroup *group) break; default: - NOT_REACHED(); + OVS_NOT_REACHED(); } ofmonitor_report(ofproto->connmgr, rule, event_type, @@ -6192,7 +6236,7 @@ ofopgroup_complete(struct ofopgroup *group) break; default: - NOT_REACHED(); + OVS_NOT_REACHED(); } ofoperation_destroy(op); @@ -6604,8 +6648,9 @@ static void oftable_init(struct oftable *table) { memset(table, 0, sizeof *table); - classifier_init(&table->cls); + classifier_init(&table->cls, flow_segment_u32s); table->max_flows = UINT_MAX; + atomic_init(&table->config, (unsigned int)OFPTC11_TABLE_MISS_CONTROLLER); } /* Destroys 'table', including its classifier and eviction groups.