static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
static struct rule_dpif *rule_dpif_cast(const struct rule *);
+struct group_dpif {
+ struct ofgroup up;
+
+ /* These statistics:
+ *
+ * - Do include packets and bytes from facets that have been deleted or
+ * whose own statistics have been folded into the rule.
+ *
+ * - Do include packets and bytes sent "by hand" that were accounted to
+ * the rule without any facet being involved (this is a rare corner
+ * case in rule_execute()).
+ *
+ * - Do not include packet or bytes that can be obtained from any facet's
+ * packet_count or byte_count member or that can be obtained from the
+ * datapath by, e.g., dpif_flow_get() for any subfacet.
+ */
+ struct ovs_mutex stats_mutex;
+ uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
+ uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
+ struct bucket_counter *bucket_stats OVS_GUARDED; /* Bucket statistics. */
+};
+
struct ofbundle {
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
struct ofproto_dpif *ofproto; /* Owning ofproto. */
/* Accounting. */
uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
- uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
+ uint16_t tcp_flags; /* TCP flags seen for this 'rule'. */
struct xlate_out xout;
* Takes ownership of 'pin' and pin->packet. */
void
ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
- struct ofputil_packet_in *pin)
+ struct ofproto_packet_in *pin)
{
if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
COVERAGE_INC(packet_in_overflow);
- free(CONST_CAST(void *, pin->packet));
+ free(CONST_CAST(void *, pin->up.packet));
free(pin);
}
}
ofport->up.netdev, ofport->cfm,
ofport->bfd, ofport->peer, stp_port,
ofport->qdscp, ofport->n_qdscp,
- ofport->up.pp.config, ofport->is_tunnel,
- ofport->may_enable);
+ ofport->up.pp.config, ofport->up.pp.state,
+ ofport->is_tunnel, ofport->may_enable);
}
ovs_rwlock_unlock(&xlate_rwlock);
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
- struct ofputil_packet_in *pin, *next_pin;
+ struct ofproto_packet_in *pin, *next_pin;
struct facet *facet, *next_facet;
struct cls_cursor cursor;
struct oftable *table;
guarded_list_pop_all(&ofproto->pins, &pins);
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
list_remove(&pin->list_node);
- free(CONST_CAST(void *, pin->packet));
+ free(CONST_CAST(void *, pin->up.packet));
free(pin);
}
guarded_list_destroy(&ofproto->pins);
run_fast(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct ofputil_packet_in *pin, *next_pin;
+ struct ofproto_packet_in *pin, *next_pin;
struct list pins;
/* Do not perform any periodic activity required by 'ofproto' while
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
connmgr_send_packet_in(ofproto->up.connmgr, pin);
list_remove(&pin->list_node);
- free(CONST_CAST(void *, pin->packet));
+ free(CONST_CAST(void *, pin->up.packet));
free(pin);
}
- ofproto_dpif_monitor_run_fast();
return 0;
}
dpif_ipfix_run(ofproto->ipfix);
}
- ofproto_dpif_monitor_run_fast();
- ofproto_dpif_monitor_run();
-
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run(ofport);
}
if (time_msec() >= ofproto->consistency_rl
&& !classifier_is_empty(&ofproto->facets)
&& !ofproto->backer->need_revalidate) {
- struct cls_table *table;
+ struct cls_subtable *table;
struct cls_rule *cr;
struct facet *facet;
ofproto->consistency_rl = time_msec() + 250;
- table = CONTAINER_OF(hmap_random_node(&ofproto->facets.tables),
- struct cls_table, hmap_node);
+ table = CONTAINER_OF(hmap_random_node(&ofproto->facets.subtables),
+ struct cls_subtable, hmap_node);
cr = CONTAINER_OF(hmap_random_node(&table->rules), struct cls_rule,
hmap_node);
facet = CONTAINER_OF(cr, struct facet, cr);
if (ofproto->ipfix) {
dpif_ipfix_wait(ofproto->ipfix);
}
- ofproto_dpif_monitor_wait();
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
bundle_wait(bundle);
}
}
}
-/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
- * 'packet', which arrived on 'in_port'. */
-static bool
-execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct nlattr *odp_actions, size_t actions_len,
- struct ofpbuf *packet)
+/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
+ * 'flow' must reflect the data in 'packet'. */
+int
+ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ struct rule_dpif *rule,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
+ struct ofpbuf *packet)
{
struct odputil_keybuf keybuf;
+ struct dpif_flow_stats stats;
+ struct xlate_out xout;
+ struct xlate_in xin;
+ ofp_port_t in_port;
struct ofpbuf key;
int error;
+ ovs_assert((rule != NULL) != (ofpacts != NULL));
+
+ dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
+ if (rule) {
+ rule_dpif_credit_stats(rule, &stats);
+ }
+
+ xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
+ xin.ofpacts = ofpacts;
+ xin.ofpacts_len = ofpacts_len;
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
+
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto, flow->in_port.ofp_port));
+ in_port = flow->in_port.ofp_port;
+ if (in_port == OFPP_NONE) {
+ in_port = OFPP_LOCAL;
+ }
+ odp_flow_key_from_flow(&key, flow, ofp_port_to_odp_port(ofproto, in_port));
error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
- odp_actions, actions_len, packet);
- return !error;
+ xout.odp_actions.data, xout.odp_actions.size, packet,
+ (xout.slow & SLOW_ACTION) != 0);
+ xlate_out_uninit(&xout);
+
+ return error;
}
/* Remove 'facet' from its ofproto and free up the associated memory:
}
bool
-rule_dpif_fail_open(const struct rule_dpif *rule)
+rule_dpif_is_fail_open(const struct rule_dpif *rule)
+{
+ return is_fail_open_rule(&rule->up);
+}
+
+bool
+rule_dpif_is_table_miss(const struct rule_dpif *rule)
{
- return rule->up.cr.priority == FAIL_OPEN_PRIORITY;
+ return rule_is_table_miss(&rule->up);
}
ovs_be64
cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
} else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
cls_rule = &ofproto->drop_frags_rule->up.cr;
- if (wc) {
- flow_wildcards_init_exact(wc);
- }
+ /* Frag mask in wc already set above. */
} else {
cls_rule = classifier_lookup(cls, flow, wc);
}
struct ofpbuf *packet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct dpif_flow_stats stats;
- struct xlate_out xout;
- struct xlate_in xin;
-
- dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- rule_dpif_credit_stats(rule, &stats);
-
- xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
- xin.resubmit_stats = &stats;
- xlate_actions(&xin, &xout);
-
- execute_odp_actions(ofproto, flow, xout.odp_actions.data,
- xout.odp_actions.size, packet);
- xlate_out_uninit(&xout);
+ ofproto_dpif_execute_actions(ofproto, flow, rule, NULL, 0, packet);
}
static enum ofperr
complete_operation(rule);
}
+
+static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
+{
+ return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
+}
+
+static struct ofgroup *
+group_alloc(void)
+{
+ struct group_dpif *group = xzalloc(sizeof *group);
+ return &group->up;
+}
+
+static void
+group_dealloc(struct ofgroup *group_)
+{
+ struct group_dpif *group = group_dpif_cast(group_);
+ free(group);
+}
+
+static void
+group_construct_stats(struct group_dpif *group)
+ OVS_REQUIRES(group->stats_mutex)
+{
+ group->packet_count = 0;
+ group->byte_count = 0;
+ if (!group->bucket_stats) {
+ group->bucket_stats = xcalloc(group->up.n_buckets,
+ sizeof *group->bucket_stats);
+ } else {
+ memset(group->bucket_stats, 0, group->up.n_buckets *
+ sizeof *group->bucket_stats);
+ }
+}
+
+static enum ofperr
+group_construct(struct ofgroup *group_)
+{
+ struct group_dpif *group = group_dpif_cast(group_);
+ ovs_mutex_init(&group->stats_mutex);
+ ovs_mutex_lock(&group->stats_mutex);
+ group_construct_stats(group);
+ ovs_mutex_unlock(&group->stats_mutex);
+ return 0;
+}
+
+static void
+group_destruct__(struct group_dpif *group)
+ OVS_REQUIRES(group->stats_mutex)
+{
+ free(group->bucket_stats);
+ group->bucket_stats = NULL;
+}
+
+static void
+group_destruct(struct ofgroup *group_)
+{
+ struct group_dpif *group = group_dpif_cast(group_);
+ ovs_mutex_lock(&group->stats_mutex);
+ group_destruct__(group);
+ ovs_mutex_unlock(&group->stats_mutex);
+ ovs_mutex_destroy(&group->stats_mutex);
+}
+
+static enum ofperr
+group_modify(struct ofgroup *group_, struct ofgroup *victim_)
+{
+ struct group_dpif *group = group_dpif_cast(group_);
+ struct group_dpif *victim = group_dpif_cast(victim_);
+
+ ovs_mutex_lock(&group->stats_mutex);
+ if (victim->up.n_buckets < group->up.n_buckets) {
+ group_destruct__(group);
+ }
+ group_construct_stats(group);
+ ovs_mutex_unlock(&group->stats_mutex);
+
+ return 0;
+}
+
+static enum ofperr
+group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
+{
+ struct group_dpif *group = group_dpif_cast(group_);
+
+ /* Start from historical data for 'group' itself that are no longer tracked
+ * in facets. This counts, for example, facets that have expired. */
+ ovs_mutex_lock(&group->stats_mutex);
+ ogs->packet_count = group->packet_count;
+ ogs->byte_count = group->byte_count;
+ memcpy(ogs->bucket_stats, group->bucket_stats,
+ group->up.n_buckets * sizeof *group->bucket_stats);
+ ovs_mutex_unlock(&group->stats_mutex);
+
+ return 0;
+}
+
+bool
+group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
+ struct group_dpif **group)
+ OVS_TRY_RDLOCK(true, (*group)->up.rwlock)
+{
+ struct ofgroup *ofgroup;
+ bool found;
+
+ *group = NULL;
+ found = ofproto_group_lookup(&ofproto->up, group_id, &ofgroup);
+ *group = found ? group_dpif_cast(ofgroup) : NULL;
+
+ return found;
+}
+
+void
+group_dpif_release(struct group_dpif *group)
+ OVS_RELEASES(group->up.rwlock)
+{
+ ofproto_group_release(&group->up);
+}
+
+void
+group_dpif_get_buckets(const struct group_dpif *group,
+ const struct list **buckets)
+{
+ *buckets = &group->up.buckets;
+}
+
+enum ofp11_group_type
+group_dpif_get_type(const struct group_dpif *group)
+{
+ return group->up.type;
+}
\f
/* Sends 'packet' out 'ofport'.
* May modify 'packet'.
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct odputil_keybuf keybuf;
- struct dpif_flow_stats stats;
- struct xlate_out xout;
- struct xlate_in xin;
- struct ofpbuf key;
-
-
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto,
- flow->in_port.ofp_port));
-
- dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
-
- xlate_in_init(&xin, ofproto, flow, NULL, stats.tcp_flags, packet);
- xin.resubmit_stats = &stats;
- xin.ofpacts_len = ofpacts_len;
- xin.ofpacts = ofpacts;
-
- xlate_actions(&xin, &xout);
- dpif_execute(ofproto->backer->dpif, key.data, key.size,
- xout.odp_actions.data, xout.odp_actions.size, packet);
- xlate_out_uninit(&xout);
+ ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
+ ofpacts_len, packet);
return 0;
}
\f
actions = rule_dpif_get_actions(rule);
ds_put_char_multiple(result, '\t', level);
- ds_put_cstr(result, "OpenFlow ");
+ ds_put_cstr(result, "OpenFlow actions=");
ofpacts_format(actions->ofpacts, actions->ofpacts_len, result);
ds_put_char(result, '\n');
goto exit;
}
ds_put_format(&result, "Bridge: %s\n", ofproto->up.name);
- } else if (!parse_ofp_exact_flow(&flow, argv[argc - 1])) {
+ } else if (!parse_ofp_exact_flow(&flow, NULL, argv[argc - 1], NULL)) {
if (argc != 3) {
unixctl_command_reply_error(conn, "Must specify bridge name");
goto exit;
struct ofpbuf odp_actions;
struct trace_ctx trace;
struct match match;
- uint8_t tcp_flags;
+ uint16_t tcp_flags;
tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
trace.result = ds;
trace.xout.odp_actions.size);
if (trace.xout.slow) {
+ enum slow_path_reason slow;
+
ds_put_cstr(ds, "\nThis flow is handled by the userspace "
"slow path because it:");
- switch (trace.xout.slow) {
- case SLOW_CFM:
- ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
- break;
- case SLOW_LACP:
- ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
- break;
- case SLOW_STP:
- ds_put_cstr(ds, "\n\t- Consists of STP packets.");
- break;
- case SLOW_BFD:
- ds_put_cstr(ds, "\n\t- Consists of BFD packets.");
- break;
- case SLOW_CONTROLLER:
- ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
- "to the OpenFlow controller.");
- break;
- case __SLOW_MAX:
- NOT_REACHED();
+
+ slow = trace.xout.slow;
+ while (slow) {
+ enum slow_path_reason bit = rightmost_1bit(slow);
+
+ ds_put_format(ds, "\n\t- %s.",
+ slow_path_reason_to_explanation(bit));
+
+ slow &= ~bit;
}
}
NULL, /* meter_set */
NULL, /* meter_get */
NULL, /* meter_del */
- NULL, /* group_alloc */
- NULL, /* group_construct */
- NULL, /* group_destruct */
- NULL, /* group_dealloc */
- NULL, /* group_modify */
- NULL, /* group_get_stats */
+ group_alloc, /* group_alloc */
+ group_construct, /* group_construct */
+ group_destruct, /* group_destruct */
+ group_dealloc, /* group_dealloc */
+ group_modify, /* group_modify */
+ group_get_stats, /* group_get_stats */
};