struct rule {
struct cls_rule cr;
- uint64_t flow_cookie; /* Controller-issued identifier.
+ uint64_t flow_cookie; /* Controller-issued identifier.
(Kept in network-byte order.) */
uint16_t idle_timeout; /* In seconds from time of last use. */
uint16_t hard_timeout; /* In seconds from time of creation. */
static uint64_t pick_datapath_id(const struct ofproto *);
static uint64_t pick_fallback_dpid(void);
-static void update_used(struct ofproto *);
+static int ofproto_expire(struct ofproto *);
+
static void update_stats(struct ofproto *, struct rule *,
const struct odp_flow_stats *);
-static void expire_rule(struct cls_rule *, void *ofproto);
-static void active_timeout(struct ofproto *ofproto, struct rule *rule);
static bool revalidate_rule(struct ofproto *p, struct rule *rule);
static void revalidate_cb(struct cls_rule *rule_, void *p_);
}
if (time_msec() >= p->next_expiration) {
+ int delay = ofproto_expire(p);
+ p->next_expiration = time_msec() + delay;
COVERAGE_INC(ofproto_expiration);
- p->next_expiration = time_msec() + 1000;
- update_used(p);
-
- classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
-
- /* Let the hook know that we're at a stable point: all outstanding data
- * in existing flows has been accounted to the account_cb. Thus, the
- * hook can now reasonably do operations that depend on having accurate
- * flow volume accounting (currently, that's just bond rebalancing). */
- if (p->ofhooks->account_checkpoint_cb) {
- p->ofhooks->account_checkpoint_cb(p->aux);
- }
}
if (p->netflow) {
{
struct rule *rule;
rule = rule_create(p, NULL, actions, n_actions,
- idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
+ idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
0, 0, false);
cls_rule_from_flow(flow, wildcards, priority, &rule->cr);
rule_insert(p, rule, NULL, 0);
enum netdev_flags flags;
struct ofport *ofport;
struct netdev *netdev;
- bool carrier;
int error;
memset(&netdev_options, 0, sizeof netdev_options);
netdev_get_flags(netdev, &flags);
ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
- netdev_get_carrier(netdev, &carrier);
- ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
+ ofport->opp.state = netdev_get_carrier(netdev) ? 0 : OFPPS_LINK_DOWN;
netdev_get_features(netdev,
&ofport->opp.curr, &ofport->opp.advertised,
return false;
}
+/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
+ * 'packet', which arrived on 'in_port'.
+ *
+ * Takes ownership of 'packet'. */
+static bool
+execute_odp_actions(struct ofproto *ofproto, uint16_t in_port,
+ const union odp_action *actions, size_t n_actions,
+ struct ofpbuf *packet)
+{
+ if (n_actions == 1 && actions[0].type == ODPAT_CONTROLLER) {
+ /* As an optimization, avoid a round-trip from userspace to kernel to
+ * userspace. This also avoids possibly filling up kernel packet
+ * buffers along the way. */
+ struct odp_msg *msg;
+
+ msg = ofpbuf_push_uninit(packet, sizeof *msg);
+ msg->type = _ODPL_ACTION_NR;
+ msg->length = sizeof(struct odp_msg) + packet->size;
+ msg->port = in_port;
+ msg->reserved = 0;
+ msg->arg = actions[0].controller.arg;
+
+ send_packet_in(ofproto, packet);
+
+ return true;
+ } else {
+ int error;
+
+ error = dpif_execute(ofproto->dpif, in_port,
+ actions, n_actions, packet);
+ ofpbuf_delete(packet);
+ return !error;
+ }
+}
+
/* Executes the actions indicated by 'rule' on 'packet', which is in flow
- * 'flow' and is considered to have arrived on ODP port 'in_port'.
+ * 'flow' and is considered to have arrived on ODP port 'in_port'. 'packet'
+ * must have at least sizeof(struct ofp_packet_in) bytes of headroom.
*
* The flow that 'packet' actually contains does not need to actually match
* 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
* 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
* 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
* function will compose a set of ODP actions based on 'rule''s OpenFlow
- * actions and apply them to 'packet'. */
+ * actions and apply them to 'packet'.
+ *
+ * Takes ownership of 'packet'. */
static void
rule_execute(struct ofproto *ofproto, struct rule *rule,
struct ofpbuf *packet, const flow_t *flow)
{
const union odp_action *actions;
+ struct odp_flow_stats stats;
size_t n_actions;
struct odp_actions a;
+ assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
+
/* Grab or compose the ODP actions.
*
* The special case for an exact-match 'rule' where 'flow' is not the
struct rule *super = rule->super ? rule->super : rule;
if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
packet, &a, NULL, 0, NULL)) {
+ ofpbuf_delete(packet);
return;
}
actions = a.actions;
}
/* Execute the ODP actions. */
- if (!dpif_execute(ofproto->dpif, flow->in_port,
- actions, n_actions, packet)) {
- struct odp_flow_stats stats;
- flow_extract_stats(flow, packet, &stats);
+ flow_extract_stats(flow, packet, &stats);
+ if (execute_odp_actions(ofproto, flow->in_port,
+ actions, n_actions, packet)) {
update_stats(ofproto, rule, &stats);
rule->used = time_msec();
netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
}
}
+/* Inserts 'rule' into 'p''s flow table.
+ *
+ * If 'packet' is nonnull, takes ownership of 'packet', executes 'rule''s
+ * actions on it and credits the statistics for sending the packet to 'rule'.
+ * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of
+ * headroom. */
static void
rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
uint16_t in_port)
return subrule;
}
+/* Remove 'rule' from 'ofproto' and free up the associated memory:
+ *
+ * - If 'rule' was installed in the datapath, uninstalls it and updates
+ * 'rule''s statistics (or its super-rule's statistics, if it is a
+ * subrule), via rule_uninstall().
+ *
+ * - Removes 'rule' from the classifier.
+ *
+ * - If 'rule' is a super-rule that has subrules, revalidates (and possibly
+ * uninstalls and destroys) its subrules, via rule_destroy().
+ */
static void
rule_remove(struct ofproto *ofproto, struct rule *rule)
{
}
}
+/* 'rule' must be an exact-match rule in 'p'.
+ *
+ * If 'rule' is installed in the datapath, uninstalls it and updates's
+ * statistics. If 'rule' is a subrule, the statistics that are updated are
+ * actually its super-rule's statistics; otherwise 'rule''s own statistics are
+ * updated.
+ *
+ * If 'rule' is not installed, this function has no effect. */
static void
rule_uninstall(struct ofproto *p, struct rule *rule)
{
ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
break;
+ case NXAST_DROP_SPOOFED_ARP:
+ if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
+ odp_actions_add(ctx->out, ODPAT_DROP_SPOOFED_ARP);
+ }
+ break;
+
/* If you add a new action here that modifies flow data, don't forget to
* update the flow key in ctx->flow at the same time. */
return 0;
}
-static void
-count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
-{
- struct rule *rule = rule_from_cls_rule(cls_rule);
- int *n_subrules = n_subrules_;
-
- if (rule->super) {
- (*n_subrules)++;
- }
-}
-
static int
handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
struct ofp_stats_request *request)
struct ofpbuf *msg;
struct odp_stats dpstats;
int n_exact, n_subrules, n_wild;
+ struct rule *rule;
msg = start_stats_reply(request, sizeof *ots * 2);
/* Count rules of various kinds. */
n_subrules = 0;
- classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
+ CLASSIFIER_FOR_EACH_EXACT_RULE (rule, struct rule, cr, &p->cls) {
+ if (rule->super) {
+ n_subrules++;
+ }
+ }
n_exact = classifier_count_exact(&p->cls) - n_subrules;
n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
}
static void
-append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
+append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
struct ofpbuf **msgp)
{
struct netdev_stats stats;
struct ofp_port_stats *ops;
- /* Intentionally ignore return value, since errors will set
- * 'stats' to all-1s, which is correct for OpenFlow, and
+ /* Intentionally ignore return value, since errors will set
+ * 'stats' to all-1s, which is correct for OpenFlow, and
* netdev_get_stats() will log errors. */
netdev_get_stats(port->netdev, &stats);
msg = start_stats_reply(osr, sizeof *ops * 16);
if (psr->port_no != htons(OFPP_NONE)) {
- port = port_array_get(&p->ports,
+ port = port_array_get(&p->ports,
ofp_port_to_odp_port(ntohs(psr->port_no)));
if (port) {
append_port_stat(port, ntohs(psr->port_no), ofconn, &msg);
ds_put_cstr(results, "\n");
}
-/* Adds a pretty-printed description of all flows to 'results', including
+/* Adds a pretty-printed description of all flows to 'results', including
* those marked hidden by secchan (e.g., by in-band control). */
void
ofproto_get_all_flows(struct ofproto *p, struct ds *results)
}
rule_insert(p, rule, packet, in_port);
- ofpbuf_delete(packet);
return error;
}
flow_extract(packet, 0, in_port, &flow);
rule_execute(ofproto, rule, packet, &flow);
- ofpbuf_delete(packet);
return 0;
}
}
}
- rule_execute(p, rule, &payload, &flow);
- rule_reinstall(p, rule);
-
if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY) {
/*
* Extra-special case for fail-open mode.
*
* See the top-level comment in fail-open.c for more information.
*/
- send_packet_in(p, packet);
- } else {
- ofpbuf_delete(packet);
+ send_packet_in(p, ofpbuf_clone_with_headroom(packet,
+ DPIF_RECV_MSG_PADDING));
}
+
+ ofpbuf_pull(packet, sizeof *msg);
+ rule_execute(p, rule, packet, &flow);
+ rule_reinstall(p, rule);
}
static void
}
}
\f
+/* Flow expiration. */
+
+struct expire_cbdata {
+ struct ofproto *ofproto;
+ int dp_max_idle;
+};
+
+static int ofproto_dp_max_idle(const struct ofproto *);
+static void ofproto_update_used(struct ofproto *);
+static void rule_expire(struct cls_rule *, void *cbdata);
+
+/* This function is called periodically by ofproto_run(). Its job is to
+ * collect updates for the flows that have been installed into the datapath,
+ * most importantly when they last were used, and then use that information to
+ * expire flows that have not been used recently.
+ *
+ * Returns the number of milliseconds after which it should be called again. */
+static int
+ofproto_expire(struct ofproto *ofproto)
+{
+ struct expire_cbdata cbdata;
+
+ /* Update 'used' for each flow in the datapath. */
+ ofproto_update_used(ofproto);
+
+ /* Expire idle flows.
+ *
+ * A wildcarded flow is idle only when all of its subrules have expired due
+ * to becoming idle, so iterate through the exact-match flows first. */
+ cbdata.ofproto = ofproto;
+ cbdata.dp_max_idle = ofproto_dp_max_idle(ofproto);
+ classifier_for_each(&ofproto->cls, CLS_INC_EXACT, rule_expire, &cbdata);
+ classifier_for_each(&ofproto->cls, CLS_INC_WILD, rule_expire, &cbdata);
+
+ /* Let the hook know that we're at a stable point: all outstanding data
+ * in existing flows has been accounted to the account_cb. Thus, the
+ * hook can now reasonably do operations that depend on having accurate
+ * flow volume accounting (currently, that's just bond rebalancing). */
+ if (ofproto->ofhooks->account_checkpoint_cb) {
+ ofproto->ofhooks->account_checkpoint_cb(ofproto->aux);
+ }
+
+ return MIN(cbdata.dp_max_idle, 1000);
+}
+
+/* Update 'used' member of each flow currently installed into the datapath. */
+static void
+ofproto_update_used(struct ofproto *p)
+{
+ struct odp_flow *flows;
+ size_t n_flows;
+ size_t i;
+ int error;
+
+ error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
+ if (error) {
+ return;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ struct odp_flow *f = &flows[i];
+ struct rule *rule;
+
+ rule = rule_from_cls_rule(
+ classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
+
+ if (rule && rule->installed) {
+ update_time(p, rule, &f->stats);
+ rule_account(p, rule, f->stats.n_bytes);
+ } else {
+ /* There's a flow in the datapath that we know nothing about.
+ * Delete it. */
+ COVERAGE_INC(ofproto_unexpected_rule);
+ dpif_flow_del(p->dpif, f);
+ }
+
+ }
+ free(flows);
+}
+
+/* Calculates and returns the number of milliseconds of idle time after which
+ * flows should expire from the datapath and we should fold their statistics
+ * into their parent rules in userspace. */
+static int
+ofproto_dp_max_idle(const struct ofproto *ofproto)
+{
+ /*
+ * Idle time histogram.
+ *
+ * Most of the time a switch has a relatively small number of flows. When
+ * this is the case we might as well keep statistics for all of them in
+ * userspace and to cache them in the kernel datapath for performance as
+ * well.
+ *
+ * As the number of flows increases, the memory required to maintain
+ * statistics about them in userspace and in the kernel becomes
+ * significant. However, with a large number of flows it is likely that
+ * only a few of them are "heavy hitters" that consume a large amount of
+ * bandwidth. At this point, only heavy hitters are worth caching in the
+ * kernel and maintaining in userspaces; other flows we can discard.
+ *
+ * The technique used to compute the idle time is to build a histogram with
+ * N_BUCKETS bucket whose width is BUCKET_WIDTH msecs each. Each flow that
+ * is installed in the kernel gets dropped in the appropriate bucket.
+ * After the histogram has been built, we compute the cutoff so that only
+ * the most-recently-used 1% of flows (but at least 1000 flows) are kept
+ * cached. At least the most-recently-used bucket of flows is kept, so
+ * actually an arbitrary number of flows can be kept in any given
+ * expiration run (though the next run will delete most of those unless
+ * they receive additional data).
+ *
+ * This requires a second pass through the exact-match flows, in addition
+ * to the pass made by ofproto_update_used(), because the former function
+ * never looks at uninstallable flows.
+ */
+ enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
+ enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
+ int buckets[N_BUCKETS] = { 0 };
+ int total, bucket;
+ struct rule *rule;
+ long long int now;
+ int i;
+
+ total = classifier_count_exact(&ofproto->cls);
+ if (total <= 1000) {
+ return N_BUCKETS * BUCKET_WIDTH;
+ }
+
+ /* Build histogram. */
+ now = time_msec();
+ CLASSIFIER_FOR_EACH_EXACT_RULE (rule, struct rule, cr, &ofproto->cls) {
+ long long int idle = now - rule->used;
+ int bucket = (idle <= 0 ? 0
+ : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
+ : (unsigned int) idle / BUCKET_WIDTH);
+ buckets[bucket]++;
+ }
+
+ /* Find the first bucket whose flows should be expired. */
+ for (bucket = 0; bucket < N_BUCKETS; bucket++) {
+ if (buckets[bucket]) {
+ int subtotal = 0;
+ do {
+ subtotal += buckets[bucket++];
+ } while (bucket < N_BUCKETS && subtotal < MAX(1000, total / 100));
+ break;
+ }
+ }
+
+ if (VLOG_IS_DBG_ENABLED()) {
+ struct ds s;
+
+ ds_init(&s);
+ ds_put_cstr(&s, "keep");
+ for (i = 0; i < N_BUCKETS; i++) {
+ if (i == bucket) {
+ ds_put_cstr(&s, ", drop");
+ }
+ if (buckets[i]) {
+ ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
+ }
+ }
+ VLOG_INFO("%s: %s (msec:count)",
+ dpif_name(ofproto->dpif), ds_cstr(&s));
+ ds_destroy(&s);
+ }
+
+ return bucket * BUCKET_WIDTH;
+}
+
+static void
+rule_active_timeout(struct ofproto *ofproto, struct rule *rule)
+{
+ if (ofproto->netflow && !is_controller_rule(rule) &&
+ netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
+ struct ofexpired expired;
+ struct odp_flow odp_flow;
+
+ /* Get updated flow stats.
+ *
+ * XXX We could avoid this call entirely if (1) ofproto_update_used()
+ * updated TCP flags and (2) the dpif_flow_list_all() in
+ * ofproto_update_used() zeroed TCP flags. */
+ memset(&odp_flow, 0, sizeof odp_flow);
+ if (rule->installed) {
+ odp_flow.key = rule->cr.flow;
+ odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
+ dpif_flow_get(ofproto->dpif, &odp_flow);
+
+ if (odp_flow.stats.n_packets) {
+ update_time(ofproto, rule, &odp_flow.stats);
+ netflow_flow_update_flags(&rule->nf_flow,
+ odp_flow.stats.tcp_flags);
+ }
+ }
+
+ expired.flow = rule->cr.flow;
+ expired.packet_count = rule->packet_count +
+ odp_flow.stats.n_packets;
+ expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
+ expired.used = rule->used;
+
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
+ }
+}
+
+/* If 'cls_rule' is an OpenFlow rule, that has expired according to OpenFlow
+ * rules, then delete it entirely.
+ *
+ * If 'cls_rule' is a subrule, that has not been used recently, remove it from
+ * the datapath and fold its statistics back into its super-rule.
+ *
+ * (This is a callback function for classifier_for_each().) */
+static void
+rule_expire(struct cls_rule *cls_rule, void *cbdata_)
+{
+ struct expire_cbdata *cbdata = cbdata_;
+ struct ofproto *ofproto = cbdata->ofproto;
+ struct rule *rule = rule_from_cls_rule(cls_rule);
+ long long int hard_expire, idle_expire, expire, now;
+
+ /* Calculate OpenFlow expiration times for 'rule'. */
+ hard_expire = (rule->hard_timeout
+ ? rule->created + rule->hard_timeout * 1000
+ : LLONG_MAX);
+ idle_expire = (rule->idle_timeout
+ && (rule->super || list_is_empty(&rule->list))
+ ? rule->used + rule->idle_timeout * 1000
+ : LLONG_MAX);
+ expire = MIN(hard_expire, idle_expire);
+
+ now = time_msec();
+ if (now < expire) {
+ /* 'rule' has not expired according to OpenFlow rules. */
+ if (!rule->cr.wc.wildcards) {
+ if (now >= rule->used + cbdata->dp_max_idle) {
+ /* This rule is idle, so drop it to free up resources. */
+ if (rule->super) {
+ /* It's not part of the OpenFlow flow table, so we can
+ * delete it entirely and fold its statistics into its
+ * super-rule. */
+ rule_remove(ofproto, rule);
+ } else {
+ /* It is part of the OpenFlow flow table, so we have to
+ * keep the rule but we can at least uninstall it from the
+ * datapath. */
+ rule_uninstall(ofproto, rule);
+ }
+ } else {
+ /* Send NetFlow active timeout if appropriate. */
+ rule_active_timeout(cbdata->ofproto, rule);
+ }
+ }
+ } else {
+ /* 'rule' has expired according to OpenFlow rules. */
+ COVERAGE_INC(ofproto_expired);
+
+ /* Update stats. (This is a no-op if the rule expired due to an idle
+ * timeout, because that only happens when the rule has no subrules
+ * left.) */
+ if (rule->cr.wc.wildcards) {
+ struct rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, struct rule, list,
+ &rule->list) {
+ rule_remove(cbdata->ofproto, subrule);
+ }
+ } else {
+ rule_uninstall(cbdata->ofproto, rule);
+ }
+
+ /* Get rid of the rule. */
+ if (!rule_is_hidden(rule)) {
+ send_flow_removed(cbdata->ofproto, rule, now,
+ (now >= hard_expire
+ ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
+ }
+ rule_remove(cbdata->ofproto, rule);
+ }
+}
+\f
static void
revalidate_cb(struct cls_rule *sub_, void *cbdata_)
{
return buf;
}
-static void
-uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
-{
- assert(rule->installed);
- assert(!rule->cr.wc.wildcards);
-
- if (rule->super) {
- rule_remove(ofproto, rule);
- } else {
- rule_uninstall(ofproto, rule);
- }
-}
-
static void
send_flow_removed(struct ofproto *p, struct rule *rule,
long long int now, uint8_t reason)
struct ofconn *prev;
struct ofpbuf *buf = NULL;
+ if (!rule->send_flow_removed) {
+ return;
+ }
+
/* We limit the maximum number of queued flow expirations it by accounting
* them under the counter for replies. That works because preventing
* OpenFlow requests from being processed also prevents new flows from
prev = NULL;
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
- if (rule->send_flow_removed && rconn_is_connected(ofconn->rconn)
+ if (rconn_is_connected(ofconn->rconn)
&& ofconn_receives_async_msgs(ofconn)) {
if (prev) {
queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
}
}
-
-static void
-expire_rule(struct cls_rule *cls_rule, void *p_)
-{
- struct ofproto *p = p_;
- struct rule *rule = rule_from_cls_rule(cls_rule);
- long long int hard_expire, idle_expire, expire, now;
-
- hard_expire = (rule->hard_timeout
- ? rule->created + rule->hard_timeout * 1000
- : LLONG_MAX);
- idle_expire = (rule->idle_timeout
- && (rule->super || list_is_empty(&rule->list))
- ? rule->used + rule->idle_timeout * 1000
- : LLONG_MAX);
- expire = MIN(hard_expire, idle_expire);
-
- now = time_msec();
- if (now < expire) {
- if (rule->installed && now >= rule->used + 5000) {
- uninstall_idle_flow(p, rule);
- } else if (!rule->cr.wc.wildcards) {
- active_timeout(p, rule);
- }
-
- return;
- }
-
- COVERAGE_INC(ofproto_expired);
-
- /* Update stats. This code will be a no-op if the rule expired
- * due to an idle timeout. */
- if (rule->cr.wc.wildcards) {
- struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
- rule_remove(p, subrule);
- }
- } else {
- rule_uninstall(p, rule);
- }
-
- if (!rule_is_hidden(rule)) {
- send_flow_removed(p, rule, now,
- (now >= hard_expire
- ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
- }
- rule_remove(p, rule);
-}
-
-static void
-active_timeout(struct ofproto *ofproto, struct rule *rule)
-{
- if (ofproto->netflow && !is_controller_rule(rule) &&
- netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
- struct ofexpired expired;
- struct odp_flow odp_flow;
-
- /* Get updated flow stats. */
- memset(&odp_flow, 0, sizeof odp_flow);
- if (rule->installed) {
- odp_flow.key = rule->cr.flow;
- odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
- dpif_flow_get(ofproto->dpif, &odp_flow);
-
- if (odp_flow.stats.n_packets) {
- update_time(ofproto, rule, &odp_flow.stats);
- netflow_flow_update_flags(&rule->nf_flow,
- odp_flow.stats.tcp_flags);
- }
- }
-
- expired.flow = rule->cr.flow;
- expired.packet_count = rule->packet_count +
- odp_flow.stats.n_packets;
- expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
- expired.used = rule->used;
-
- netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
-
- /* Schedule us to send the accumulated records once we have
- * collected all of them. */
- poll_immediate_wake();
- }
-}
-
-static void
-update_used(struct ofproto *p)
-{
- struct odp_flow *flows;
- size_t n_flows;
- size_t i;
- int error;
-
- error = dpif_flow_list_all(p->dpif, &flows, &n_flows);
- if (error) {
- return;
- }
-
- for (i = 0; i < n_flows; i++) {
- struct odp_flow *f = &flows[i];
- struct rule *rule;
-
- rule = rule_from_cls_rule(
- classifier_find_rule_exactly(&p->cls, &f->key, 0, UINT16_MAX));
- if (!rule || !rule->installed) {
- COVERAGE_INC(ofproto_unexpected_rule);
- dpif_flow_del(p->dpif, f);
- continue;
- }
-
- update_time(p, rule, &f->stats);
- rule_account(p, rule, f->stats.n_bytes);
- }
- free(flows);
-}
-
/* pinsched callback for sending 'packet' on 'ofconn'. */
static void
do_send_packet_in(struct ofpbuf *packet, void *ofconn_)