X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=ofproto%2Fofproto-dpif.c;h=64e27473a3860a8443c164839fdfe91a10e72706;hb=db19b72b9f6d0d40b743214241d9666116e3b448;hp=cc1e9d5adab578db41a4e4ba461670c3fe8f35cf;hpb=06f81620436881449cb9a2db4f875aa00803f28d;p=sliver-openvswitch.git diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c index cc1e9d5ad..64e27473a 100644 --- a/ofproto/ofproto-dpif.c +++ b/ofproto/ofproto-dpif.c @@ -89,9 +89,11 @@ struct rule_dpif { struct ovs_mutex stats_mutex; uint64_t packet_count OVS_GUARDED; /* Number of packets received. */ uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */ + long long int used; /* Last used time (msec). */ }; -static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes); +static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes, + long long int *used); static struct rule_dpif *rule_dpif_cast(const struct rule *); static void rule_expire(struct rule_dpif *); @@ -256,6 +258,10 @@ struct dpif_backer { * OVS_USERSPACE_ATTR_USERDATA in OVS_ACTION_ATTR_USERSPACE actions. * False if the datapath supports only 8-byte (or shorter) userdata. */ bool variable_length_userdata; + + /* Maximum number of MPLS label stack entries that the datapath supports + * in a match */ + size_t max_mpls_depth; }; /* All existing ofproto_backer instances, indexed by ofproto->up.type. */ @@ -319,6 +325,12 @@ ofproto_dpif_cast(const struct ofproto *ofproto) return CONTAINER_OF(ofproto, struct ofproto_dpif, up); } +size_t +ofproto_dpif_get_max_mpls_depth(const struct ofproto_dpif *ofproto) +{ + return ofproto->backer->max_mpls_depth; +} + static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port); static void ofproto_trace(struct ofproto_dpif *, const struct flow *, @@ -558,7 +570,8 @@ type_run(const char *type) ofproto->netflow, ofproto->up.frag_handling, ofproto->up.forward_bpdu, connmgr_has_in_band(ofproto->up.connmgr), - ofproto->backer->variable_length_userdata); + ofproto->backer->variable_length_userdata, + ofproto->backer->max_mpls_depth); HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) { xlate_bundle_set(ofproto, bundle, bundle->name, @@ -781,6 +794,7 @@ struct odp_garbage { }; static bool check_variable_length_userdata(struct dpif_backer *backer); +static size_t check_max_mpls_depth(struct dpif_backer *backer); static int open_dpif_backer(const char *type, struct dpif_backer **backerp) @@ -881,6 +895,7 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp) return error; } backer->variable_length_userdata = check_variable_length_userdata(backer); + backer->max_mpls_depth = check_max_mpls_depth(backer); if (backer->recv_set_enable) { udpif_set_threads(backer->udpif, n_handlers, n_revalidators); @@ -965,6 +980,53 @@ check_variable_length_userdata(struct dpif_backer *backer) } } +/* Tests the MPLS label stack depth supported by 'backer''s datapath. + * + * Returns the number of elements in a struct flow's mpls_lse field + * if the datapath supports at least that many entries in an + * MPLS label stack. + * Otherwise returns the number of MPLS push actions supported by + * the datapath. */ +static size_t +check_max_mpls_depth(struct dpif_backer *backer) +{ + struct flow flow; + int n; + + for (n = 0; n < FLOW_MAX_MPLS_LABELS; n++) { + struct odputil_keybuf keybuf; + struct ofpbuf key; + int error; + + memset(&flow, 0, sizeof flow); + flow.dl_type = htons(ETH_TYPE_MPLS); + flow_set_mpls_bos(&flow, n, 1); + + ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); + odp_flow_key_from_flow(&key, &flow, 0); + + error = dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY, + key.data, key.size, NULL, 0, NULL, 0, NULL); + if (error && error != EEXIST) { + if (error != EINVAL) { + VLOG_WARN("%s: MPLS stack length feature probe failed (%s)", + dpif_name(backer->dpif), ovs_strerror(error)); + } + break; + } + + error = dpif_flow_del(backer->dpif, key.data, key.size, NULL); + if (error) { + VLOG_WARN("%s: failed to delete MPLS feature probe flow", + dpif_name(backer->dpif)); + } + } + + VLOG_INFO("%s: MPLS label stack length probed as %d", + dpif_name(backer->dpif), n); + return n; +} + static int construct(struct ofproto *ofproto_) { @@ -987,7 +1049,7 @@ construct(struct ofproto *ofproto_) ofproto->mbridge = mbridge_create(); ofproto->has_bonded_bundles = false; ofproto->lacp_enabled = false; - ovs_mutex_init(&ofproto->stats_mutex); + ovs_mutex_init_adaptive(&ofproto->stats_mutex); ovs_mutex_init(&ofproto->vsp_mutex); guarded_list_init(&ofproto->pins); @@ -1342,14 +1404,16 @@ get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots) struct dpif_dp_stats s; uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags; uint64_t n_lookup; + long long int used; strcpy(ots->name, "classifier"); dpif_get_dp_stats(ofproto->backer->dpif, &s); - rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes); - rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes); - rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes); - + rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes, &used); + rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes, + &used); + rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes, + &used); n_lookup = s.n_hit + s.n_missed - n_dropped_frags; ots->lookup_count = htonll(n_lookup); ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in); @@ -1486,6 +1550,9 @@ port_destruct(struct ofport *port_) bundle_remove(port_); set_cfm(port_, NULL); set_bfd(port_, NULL); + if (port->stp_port) { + stp_port_disable(port->stp_port); + } if (ofproto->sflow) { dpif_sflow_del_port(ofproto->sflow, port->odp_port); } @@ -2851,24 +2918,39 @@ static void rule_expire(struct rule_dpif *rule) OVS_REQUIRES(ofproto_mutex) { - uint16_t idle_timeout, hard_timeout; + uint16_t hard_timeout, idle_timeout; long long int now = time_msec(); - int reason; + int reason = -1; ovs_assert(!rule->up.pending); - /* Has 'rule' expired? */ - ovs_mutex_lock(&rule->up.mutex); hard_timeout = rule->up.hard_timeout; idle_timeout = rule->up.idle_timeout; - if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) { - reason = OFPRR_HARD_TIMEOUT; - } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) { - reason = OFPRR_IDLE_TIMEOUT; - } else { - reason = -1; + + /* Has 'rule' expired? */ + if (hard_timeout) { + long long int modified; + + ovs_mutex_lock(&rule->up.mutex); + modified = rule->up.modified; + ovs_mutex_unlock(&rule->up.mutex); + + if (now > modified + hard_timeout * 1000) { + reason = OFPRR_HARD_TIMEOUT; + } + } + + if (reason < 0 && idle_timeout) { + long long int used; + + ovs_mutex_lock(&rule->stats_mutex); + used = rule->used; + ovs_mutex_unlock(&rule->stats_mutex); + + if (now > used + idle_timeout * 1000) { + reason = OFPRR_IDLE_TIMEOUT; + } } - ovs_mutex_unlock(&rule->up.mutex); if (reason >= 0) { COVERAGE_INC(ofproto_dpif_expired); @@ -2932,7 +3014,7 @@ rule_dpif_credit_stats(struct rule_dpif *rule, ovs_mutex_lock(&rule->stats_mutex); rule->packet_count += stats->n_packets; rule->byte_count += stats->n_bytes; - rule->up.used = MAX(rule->up.used, stats->used); + rule->used = MAX(rule->used, stats->used); ovs_mutex_unlock(&rule->stats_mutex); } @@ -3094,13 +3176,13 @@ rule_dealloc(struct rule *rule_) static enum ofperr rule_construct(struct rule *rule_) + OVS_NO_THREAD_SAFETY_ANALYSIS { struct rule_dpif *rule = rule_dpif_cast(rule_); - ovs_mutex_init(&rule->stats_mutex); - ovs_mutex_lock(&rule->stats_mutex); + ovs_mutex_init_adaptive(&rule->stats_mutex); rule->packet_count = 0; rule->byte_count = 0; - ovs_mutex_unlock(&rule->stats_mutex); + rule->used = rule->up.modified; return 0; } @@ -3128,13 +3210,15 @@ rule_destruct(struct rule *rule_) } static void -rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes) +rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes, + long long int *used) { struct rule_dpif *rule = rule_dpif_cast(rule_); ovs_mutex_lock(&rule->stats_mutex); *packets = rule->packet_count; *bytes = rule->byte_count; + *used = rule->used; ovs_mutex_unlock(&rule->stats_mutex); } @@ -3210,7 +3294,21 @@ static enum ofperr group_construct(struct ofgroup *group_) { struct group_dpif *group = group_dpif_cast(group_); - ovs_mutex_init(&group->stats_mutex); + const struct ofputil_bucket *bucket; + + /* Prevent group chaining because our locking structure makes it hard to + * implement deadlock-free. (See xlate_group_resource_check().) */ + LIST_FOR_EACH (bucket, list_node, &group->up.buckets) { + const struct ofpact *a; + + OFPACT_FOR_EACH (a, bucket->ofpacts, bucket->ofpacts_len) { + if (a->type == OFPACT_GROUP) { + return OFPERR_OFPGMFC_CHAINING_UNSUPPORTED; + } + } + } + + ovs_mutex_init_adaptive(&group->stats_mutex); ovs_mutex_lock(&group->stats_mutex); group_construct_stats(group); ovs_mutex_unlock(&group->stats_mutex); @@ -3655,7 +3753,7 @@ parse_flow_and_packet(int argc, const char *argv[], } if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow, - NULL, ofprotop, NULL, NULL, NULL, NULL)) { + ofprotop, NULL, NULL, NULL, NULL)) { error = "Invalid datapath flow"; goto exit; } @@ -4040,11 +4138,10 @@ static bool ofproto_dpif_contains_flow(const struct ofproto_dpif *ofproto, const struct nlattr *key, size_t key_len) { - enum odp_key_fitness fitness; struct ofproto_dpif *ofp; struct flow flow; - xlate_receive(ofproto->backer, NULL, key, key_len, &flow, &fitness, &ofp, + xlate_receive(ofproto->backer, NULL, key, key_len, &flow, &ofp, NULL, NULL, NULL, NULL); return ofp == ofproto; } @@ -4191,12 +4288,8 @@ bool ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto) OVS_EXCLUDED(ofproto->vsp_mutex) { - bool ret; - - ovs_mutex_lock(&ofproto->vsp_mutex); - ret = !hmap_is_empty(&ofproto->realdev_vid_map); - ovs_mutex_unlock(&ofproto->vsp_mutex); - return ret; + /* hmap_is_empty is thread safe. */ + return !hmap_is_empty(&ofproto->realdev_vid_map); } static ofp_port_t @@ -4234,6 +4327,10 @@ vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto, { ofp_port_t ret; + /* hmap_is_empty is thread safe, see if we can return immediately. */ + if (hmap_is_empty(&ofproto->realdev_vid_map)) { + return realdev_ofp_port; + } ovs_mutex_lock(&ofproto->vsp_mutex); ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci); ovs_mutex_unlock(&ofproto->vsp_mutex); @@ -4297,6 +4394,11 @@ vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow) ofp_port_t realdev; int vid; + /* hmap_is_empty is thread safe. */ + if (hmap_is_empty(&ofproto->vlandev_map)) { + return false; + } + ovs_mutex_lock(&ofproto->vsp_mutex); realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid); ovs_mutex_unlock(&ofproto->vsp_mutex);