struct ovs_mutex stats_mutex;
uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
+ long long int used; /* Last used time (msec). */
};
-static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
+static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes,
+ long long int *used);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
static void rule_expire(struct rule_dpif *);
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
ofproto->lacp_enabled = false;
- ovs_mutex_init(&ofproto->stats_mutex);
+ ovs_mutex_init_adaptive(&ofproto->stats_mutex);
ovs_mutex_init(&ofproto->vsp_mutex);
guarded_list_init(&ofproto->pins);
xlate_remove_ofproto(ofproto);
ovs_rwlock_unlock(&xlate_rwlock);
- /* Discard any flow_miss_batches queued up for 'ofproto', avoiding a
- * use-after-free error. */
- udpif_revalidate(ofproto->backer->udpif);
+ /* Ensure that the upcall processing threads have no remaining references
+ * to the ofproto or anything in it. */
+ udpif_synchronize(ofproto->backer->udpif);
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
struct dpif_dp_stats s;
uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags;
uint64_t n_lookup;
+ long long int used;
strcpy(ots->name, "classifier");
dpif_get_dp_stats(ofproto->backer->dpif, &s);
- rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes);
- rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes);
- rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes);
-
+ rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes, &used);
+ rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes,
+ &used);
+ rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes,
+ &used);
n_lookup = s.n_hit + s.n_missed - n_dropped_frags;
ots->lookup_count = htonll(n_lookup);
ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in);
bundle_remove(port_);
set_cfm(port_, NULL);
set_bfd(port_, NULL);
+ if (port->stp_port) {
+ stp_port_disable(port->stp_port);
+ }
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
rule_expire(struct rule_dpif *rule)
OVS_REQUIRES(ofproto_mutex)
{
- uint16_t idle_timeout, hard_timeout;
+ uint16_t hard_timeout, idle_timeout;
long long int now = time_msec();
- int reason;
+ int reason = -1;
ovs_assert(!rule->up.pending);
- /* Has 'rule' expired? */
- ovs_mutex_lock(&rule->up.mutex);
hard_timeout = rule->up.hard_timeout;
idle_timeout = rule->up.idle_timeout;
- if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) {
- reason = OFPRR_HARD_TIMEOUT;
- } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) {
- reason = OFPRR_IDLE_TIMEOUT;
- } else {
- reason = -1;
+
+ /* Has 'rule' expired? */
+ if (hard_timeout) {
+ long long int modified;
+
+ ovs_mutex_lock(&rule->up.mutex);
+ modified = rule->up.modified;
+ ovs_mutex_unlock(&rule->up.mutex);
+
+ if (now > modified + hard_timeout * 1000) {
+ reason = OFPRR_HARD_TIMEOUT;
+ }
+ }
+
+ if (reason < 0 && idle_timeout) {
+ long long int used;
+
+ ovs_mutex_lock(&rule->stats_mutex);
+ used = rule->used;
+ ovs_mutex_unlock(&rule->stats_mutex);
+
+ if (now > used + idle_timeout * 1000) {
+ reason = OFPRR_IDLE_TIMEOUT;
+ }
}
- ovs_mutex_unlock(&rule->up.mutex);
if (reason >= 0) {
COVERAGE_INC(ofproto_dpif_expired);
execute.md.tunnel = flow->tunnel;
execute.md.skb_priority = flow->skb_priority;
execute.md.pkt_mark = flow->pkt_mark;
- execute.md.in_port = ofp_port_to_odp_port(ofproto, in_port);
+ execute.md.in_port.odp_port = ofp_port_to_odp_port(ofproto, in_port);
execute.needs_help = (xout.slow & SLOW_ACTION) != 0;
error = dpif_execute(ofproto->backer->dpif, &execute);
ovs_mutex_lock(&rule->stats_mutex);
rule->packet_count += stats->n_packets;
rule->byte_count += stats->n_bytes;
- rule->up.used = MAX(rule->up.used, stats->used);
+ rule->used = MAX(rule->used, stats->used);
ovs_mutex_unlock(&rule->stats_mutex);
}
static enum ofperr
rule_construct(struct rule *rule_)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- ovs_mutex_init(&rule->stats_mutex);
- ovs_mutex_lock(&rule->stats_mutex);
+ ovs_mutex_init_adaptive(&rule->stats_mutex);
rule->packet_count = 0;
rule->byte_count = 0;
- ovs_mutex_unlock(&rule->stats_mutex);
+ rule->used = rule->up.modified;
return 0;
}
}
static void
-rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
+rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes,
+ long long int *used)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_lock(&rule->stats_mutex);
*packets = rule->packet_count;
*bytes = rule->byte_count;
+ *used = rule->used;
ovs_mutex_unlock(&rule->stats_mutex);
}
group_construct(struct ofgroup *group_)
{
struct group_dpif *group = group_dpif_cast(group_);
- ovs_mutex_init(&group->stats_mutex);
+ const struct ofputil_bucket *bucket;
+
+ /* Prevent group chaining because our locking structure makes it hard to
+ * implement deadlock-free. (See xlate_group_resource_check().) */
+ LIST_FOR_EACH (bucket, list_node, &group->up.buckets) {
+ const struct ofpact *a;
+
+ OFPACT_FOR_EACH (a, bucket->ofpacts, bucket->ofpacts_len) {
+ if (a->type == OFPACT_GROUP) {
+ return OFPERR_OFPGMFC_CHAINING_UNSUPPORTED;
+ }
+ }
+ }
+
+ ovs_mutex_init_adaptive(&group->stats_mutex);
ovs_mutex_lock(&group->stats_mutex);
group_construct_stats(group);
ovs_mutex_unlock(&group->stats_mutex);
flow_compose(packet, flow);
} else {
union flow_in_port in_port = flow->in_port;
+ struct pkt_metadata md;
/* Use the metadata from the flow and the packet argument
* to reconstruct the flow. */
- flow_extract(packet, flow->skb_priority, flow->pkt_mark, NULL,
- &in_port, flow);
+ pkt_metadata_init(&md, NULL, flow->skb_priority,
+ flow->pkt_mark, &in_port);
+
+ flow_extract(packet, &md, flow);
}
}
struct dpif_port dpif_port;
struct dpif_port_dump port_dump;
struct hmap portno_names;
+ void *state = NULL;
+ int error;
ofproto = ofproto_dpif_lookup(argv[argc - 1]);
if (!ofproto) {
}
ds_init(&ds);
- dpif_flow_dump_start(&flow_dump, ofproto->backer->dpif);
- while (dpif_flow_dump_next(&flow_dump, &key, &key_len, &mask, &mask_len,
- &actions, &actions_len, &stats)) {
+ error = dpif_flow_dump_start(&flow_dump, ofproto->backer->dpif);
+ if (error) {
+ goto exit;
+ }
+ dpif_flow_dump_state_init(ofproto->backer->dpif, &state);
+ while (dpif_flow_dump_next(&flow_dump, state, &key, &key_len,
+ &mask, &mask_len, &actions, &actions_len,
+ &stats)) {
if (!ofproto_dpif_contains_flow(ofproto, key, key_len)) {
continue;
}
format_odp_actions(&ds, actions, actions_len);
ds_put_char(&ds, '\n');
}
+ dpif_flow_dump_state_uninit(ofproto->backer->dpif, state);
+ error = dpif_flow_dump_done(&flow_dump);
- if (dpif_flow_dump_done(&flow_dump)) {
+exit:
+ if (error) {
ds_clear(&ds);
ds_put_format(&ds, "dpif/dump_flows failed: %s", ovs_strerror(errno));
unixctl_command_reply_error(conn, ds_cstr(&ds));
ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto)
OVS_EXCLUDED(ofproto->vsp_mutex)
{
- bool ret;
-
- ovs_mutex_lock(&ofproto->vsp_mutex);
- ret = !hmap_is_empty(&ofproto->realdev_vid_map);
- ovs_mutex_unlock(&ofproto->vsp_mutex);
- return ret;
+ /* hmap_is_empty is thread safe. */
+ return !hmap_is_empty(&ofproto->realdev_vid_map);
}
static ofp_port_t
{
ofp_port_t ret;
+ /* hmap_is_empty is thread safe, see if we can return immediately. */
+ if (hmap_is_empty(&ofproto->realdev_vid_map)) {
+ return realdev_ofp_port;
+ }
ovs_mutex_lock(&ofproto->vsp_mutex);
ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci);
ovs_mutex_unlock(&ofproto->vsp_mutex);
ofp_port_t realdev;
int vid;
+ /* hmap_is_empty is thread safe. */
+ if (hmap_is_empty(&ofproto->vlandev_map)) {
+ return false;
+ }
+
ovs_mutex_lock(&ofproto->vsp_mutex);
realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
ovs_mutex_unlock(&ofproto->vsp_mutex);