COVERAGE_DEFINE(packet_in_overflow);
COVERAGE_DEFINE(flow_mod_overflow);
-#define N_THREADS 16
-
/* Number of implemented OpenFlow tables. */
enum { N_TABLES = 255 };
enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
struct flow_miss;
struct facet;
+struct rule_dpif {
+ struct rule up;
+
+ /* These statistics:
+ *
+ * - Do include packets and bytes from facets that have been deleted or
+ * whose own statistics have been folded into the rule.
+ *
+ * - Do include packets and bytes sent "by hand" that were accounted to
+ * the rule without any facet being involved (this is a rare corner
+ * case in rule_execute()).
+ *
+ * - Do not include packet or bytes that can be obtained from any facet's
+ * packet_count or byte_count member or that can be obtained from the
+ * datapath by, e.g., dpif_flow_get() for any subfacet.
+ */
+ struct ovs_mutex stats_mutex;
+ uint64_t packet_count OVS_GUARDED; /* Number of packets received. */
+ uint64_t byte_count OVS_GUARDED; /* Number of bytes received. */
+};
+
static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
+static struct rule_dpif *rule_dpif_cast(const struct rule *);
struct ofbundle {
struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
* Flow expiration works in terms of subfacets, so a facet must have at
* least one subfacet or it will never expire, leaking memory. */
struct facet {
- /* Owners. */
- struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
+ /* Owner. */
struct ofproto_dpif *ofproto;
/* Owned data. */
/* Number of subfacets added or deleted from 'created' to 'last_minute.' */
unsigned long long int total_subfacet_add_count;
unsigned long long int total_subfacet_del_count;
+
+ /* Number of upcall handling threads. */
+ unsigned int n_handler_threads;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
}
+/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
+ * Takes ownership of 'pin' and pin->packet. */
void
ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
struct ofputil_packet_in *pin)
VLOG_ERR("Failed to enable receiving packets in dpif.");
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
dpif_flow_flush(backer->dpif);
backer->need_revalidate = REV_RECONFIGURE;
}
+ /* If the n_handler_threads is reconfigured, call udpif_recv_set()
+ * to reset the handler threads. */
+ if (backer->n_handler_threads != n_handler_threads) {
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
+ }
+
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
int stp_port = ofport->stp_port
? stp_port_no(ofport->stp_port)
- : 0;
+ : -1;
xlate_ofport_set(ofproto, ofport->bundle, ofport,
ofport->up.ofp_port, ofport->odp_port,
ofport->up.netdev, ofport->cfm,
close_dpif_backer(backer);
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
backer->max_n_subfacet = 0;
backer->created = time_msec();
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct shash_node *node, *next;
- odp_port_t max_ports;
+ uint32_t max_ports;
int error;
error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
}
max_ports = dpif_get_max_ports(ofproto->backer->dpif);
- ofproto_init_max_ports(ofproto_, u16_to_ofp(MIN(odp_to_u32(max_ports),
- ofp_to_u16(OFPP_MAX))));
+ ofproto_init_max_ports(ofproto_, MIN(max_ports, ofp_to_u16(OFPP_MAX)));
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
ofproto->mbridge = mbridge_create();
ofproto->has_bonded_bundles = false;
- ovs_mutex_init(&ofproto->vsp_mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_init(&ofproto->vsp_mutex);
classifier_init(&ofproto->facets);
ofproto->consistency_rl = LLONG_MIN;
list_init(&ofproto->completions);
- ovs_mutex_init(&ofproto->flow_mod_mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_init(&ofproto->flow_mod_mutex);
ovs_mutex_lock(&ofproto->flow_mod_mutex);
list_init(&ofproto->flow_mods);
ofproto->n_flow_mods = 0;
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
- ovs_mutex_init(&ofproto->pin_mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_init(&ofproto->pin_mutex);
ovs_mutex_lock(&ofproto->pin_mutex);
list_init(&ofproto->pins);
ofproto->n_pins = 0;
if (rule_dpif_lookup_in_table(ofproto, &fm.match.flow, NULL, TBL_INTERNAL,
rulep)) {
- ovs_rwlock_unlock(&(*rulep)->up.evict);
+ rule_dpif_release(*rulep);
} else {
NOT_REACHED();
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
- struct ofputil_flow_mod *pin, *next_pin;
+ struct ofputil_packet_in *pin, *next_pin;
struct ofputil_flow_mod *fm, *next_fm;
+ struct facet *facet, *next_facet;
+ struct cls_cursor cursor;
struct oftable *table;
+ ovs_rwlock_rdlock(&ofproto->facets.rwlock);
+ cls_cursor_init(&cursor, &ofproto->facets, NULL);
+ ovs_rwlock_unlock(&ofproto->facets.rwlock);
+ CLS_CURSOR_FOR_EACH_SAFE (facet, next_facet, cr, &cursor) {
+ facet_remove(facet);
+ }
+
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ovs_rwlock_wrlock(&xlate_rwlock);
xlate_remove_ofproto(ofproto);
ovs_rwlock_unlock(&xlate_rwlock);
+ flow_miss_batch_ofproto_destroyed(ofproto->backer->udpif, ofproto);
+
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
complete_operations(ofproto);
ovs_rwlock_wrlock(&table->cls.rwlock);
cls_cursor_init(&cursor, &table->cls, NULL);
CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
- ofproto_rule_destroy(&ofproto->up, &table->cls, &rule->up);
+ ofproto_rule_delete(&ofproto->up, &table->cls, &rule->up);
}
ovs_rwlock_unlock(&table->cls.rwlock);
}
+ complete_operations(ofproto);
ovs_mutex_lock(&ofproto->flow_mod_mutex);
LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &ofproto->flow_mods) {
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) {
list_remove(&pin->list_node);
ofproto->n_pins--;
- free(pin->ofpacts);
+ free(CONST_CAST(void *, pin->packet));
free(pin);
}
ovs_mutex_unlock(&ofproto->pin_mutex);
}
ovs_mutex_lock(&ofproto->flow_mod_mutex);
- if (ofproto->n_flow_mods) {
- flow_mods = ofproto->flow_mods;
- list_moved(&flow_mods);
- list_init(&ofproto->flow_mods);
- ofproto->n_flow_mods = 0;
- } else {
- list_init(&flow_mods);
- }
+ list_move(&flow_mods, &ofproto->flow_mods);
+ list_init(&ofproto->flow_mods);
+ ofproto->n_flow_mods = 0;
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
LIST_FOR_EACH_SAFE (fm, next_fm, list_node, &flow_mods) {
}
ovs_mutex_lock(&ofproto->pin_mutex);
- if (ofproto->n_pins) {
- pins = ofproto->pins;
- list_moved(&pins);
- list_init(&ofproto->pins);
- ofproto->n_pins = 0;
- } else {
- list_init(&pins);
- }
+ list_move(&pins, &ofproto->pins);
+ list_init(&ofproto->pins);
+ ofproto->n_pins = 0;
ovs_mutex_unlock(&ofproto->pin_mutex);
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
if (ofproto->sflow) {
dpif_sflow_run(ofproto->sflow);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_run(ofproto->ipfix);
+ }
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run(ofport);
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
+ if (ofproto->ipfix) {
+ dpif_ipfix_wait(ofproto->ipfix);
+ }
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_wait(ofport);
}
cfm_set_netdev(port->cfm, port->up.netdev);
}
+ if (port->bfd) {
+ bfd_set_netdev(port->bfd, port->up.netdev);
+ }
+
if (port->is_tunnel && tnl_port_reconfigure(port, port->up.netdev,
port->odp_port)) {
ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate =
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_ipfix *di = ofproto->ipfix;
+ bool has_options = bridge_exporter_options || flow_exporters_options;
- if (bridge_exporter_options || flow_exporters_options) {
- if (!di) {
- di = ofproto->ipfix = dpif_ipfix_create();
- }
+ if (has_options && !di) {
+ di = ofproto->ipfix = dpif_ipfix_create();
+ }
+
+ if (di) {
+ /* Call set_options in any case to cleanly flush the flow
+ * caches in the last exporters that are to be destroyed. */
dpif_ipfix_set_options(
di, bridge_exporter_options, flow_exporters_options,
n_flow_exporters_options);
- } else {
- if (di) {
+
+ if (!has_options) {
dpif_ipfix_unref(di);
ofproto->ipfix = NULL;
}
}
+
return 0;
}
struct bfd *old;
old = ofport->bfd;
- ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev), cfg);
+ ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev),
+ cfg, ofport->up.netdev);
if (ofport->bfd != old) {
ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
bool carrier_changed = carrier_seq != ofport->carrier_seq;
bool enable = netdev_get_carrier(ofport->up.netdev);
+ bool cfm_enable = false;
+ bool bfd_enable = false;
ofport->carrier_seq = carrier_seq;
int cfm_opup = cfm_get_opup(ofport->cfm);
cfm_run(ofport->cfm);
- enable = enable && !cfm_get_fault(ofport->cfm);
+ cfm_enable = !cfm_get_fault(ofport->cfm);
if (cfm_opup >= 0) {
- enable = enable && cfm_opup;
+ cfm_enable = cfm_enable && cfm_opup;
}
}
if (ofport->bfd) {
bfd_run(ofport->bfd);
- enable = enable && bfd_forwarding(ofport->bfd);
+ bfd_enable = bfd_forwarding(ofport->bfd);
+ }
+
+ if (ofport->bfd || ofport->cfm) {
+ enable = enable && (cfm_enable || bfd_enable);
}
if (ofport->bundle) {
sset_find_and_delete(&ofproto->ghost_ports,
netdev_get_name(ofport->up.netdev));
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- if (!ofport->is_tunnel) {
+ if (!ofport->is_tunnel && !netdev_vport_is_patch(ofport->up.netdev)) {
error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
if (!error) {
/* The caller is going to close ofport->up.netdev. If this is a
facet->byte_count += miss->stats.n_bytes;
facet->prev_byte_count += miss->stats.n_bytes;
- subfacet = subfacet_create(facet, miss);
want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
/* Don't install the flow if it's the result of the "userspace"
* pass made by update_stats(), because the former function never looks at
* uninstallable subfacets.
*/
- enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
+ enum { BUCKET_WIDTH = 100 };
enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
int buckets[N_BUCKETS] = { 0 };
int total, subtotal, bucket;
return;
}
- if (!ovs_rwlock_trywrlock(&rule->up.evict)) {
- COVERAGE_INC(ofproto_dpif_expired);
-
- /* Get rid of the rule. */
- ofproto_rule_expire(&rule->up, reason);
- }
+ COVERAGE_INC(ofproto_dpif_expired);
+ ofproto_rule_expire(&rule->up, reason);
}
\f
/* Facets. */
is_controller = ofpacts_len > 0
&& ofpacts->type == OFPACT_CONTROLLER
&& ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len);
- rule_release(rule);
+ rule_dpif_release(rule);
return is_controller;
}
return false;
rule_dpif_lookup(facet->ofproto, &facet->flow, NULL, &rule);
xlate_in_init(&xin, facet->ofproto, &facet->flow, rule, 0, NULL);
xlate_actions(&xin, &xout);
- rule_release(rule);
+ rule_dpif_release(rule);
ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
&& facet->xout.slow == xout.slow;
|| memcmp(&facet->xout.wc, &xout.wc, sizeof xout.wc)) {
facet_remove(facet);
xlate_out_uninit(&xout);
- rule_release(new_rule);
+ rule_dpif_release(new_rule);
return false;
}
facet->used = MAX(facet->used, new_rule->up.created);
xlate_out_uninit(&xout);
- rule_release(new_rule);
+ rule_dpif_release(new_rule);
return true;
}
}
rule_dpif_lookup(ofproto, flow, NULL, &rule);
- rule_credit_stats(rule, stats);
+ rule_dpif_credit_stats(rule, stats);
xlate_in_init(&xin, ofproto, flow, rule, stats->tcp_flags, NULL);
xin.resubmit_stats = stats;
xin.may_learn = may_learn;
xlate_actions_for_side_effects(&xin);
- rule_release(rule);
+ rule_dpif_release(rule);
}
static void
}
void
-rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
+rule_dpif_credit_stats(struct rule_dpif *rule,
+ const struct dpif_flow_stats *stats)
{
ovs_mutex_lock(&rule->stats_mutex);
rule->packet_count += stats->n_packets;
rule->byte_count += stats->n_bytes;
- ofproto_rule_update_used(&rule->up, stats->used);
+ rule->up.used = MAX(rule->up.used, stats->used);
ovs_mutex_unlock(&rule->stats_mutex);
}
+
+bool
+rule_dpif_fail_open(const struct rule_dpif *rule)
+{
+ return rule->up.cr.priority == FAIL_OPEN_PRIORITY;
+}
+
+ovs_be64
+rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
+{
+ return rule->up.flow_cookie;
+}
+
+void
+rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
+ uint16_t hard_timeout)
+{
+ ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
+}
+
+void
+rule_dpif_get_actions(const struct rule_dpif *rule,
+ const struct ofpact **ofpacts, size_t *ofpacts_len)
+{
+ *ofpacts = rule->up.ofpacts;
+ *ofpacts_len = rule->up.ofpacts_len;
+}
\f
/* Subfacets. */
flow->in_port.ofp_port);
}
- *rule = choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule,
- ofproto->no_packet_in_rule);
- ovs_rwlock_rdlock(&(*rule)->up.evict);
+ choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule,
+ ofproto->no_packet_in_rule, rule);
}
bool
rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
const struct flow *flow, struct flow_wildcards *wc,
uint8_t table_id, struct rule_dpif **rule)
- OVS_ACQ_RDLOCK((*rule)->up.evict)
+ OVS_TRY_RDLOCK(true, (*rule)->up.rwlock)
{
struct cls_rule *cls_rule;
struct classifier *cls;
}
*rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
- if (*rule && ovs_rwlock_tryrdlock(&(*rule)->up.evict)) {
+ if (*rule && ovs_rwlock_tryrdlock(&(*rule)->up.rwlock)) {
/* The rule is in the process of being removed. Best we can do is
* pretend it isn't there. */
*rule = NULL;
/* Given a port configuration (specified as zero if there's no port), chooses
* which of 'miss_rule' and 'no_packet_in_rule' should be used in case of a
* flow table miss. */
-struct rule_dpif *
+void
choose_miss_rule(enum ofputil_port_config config, struct rule_dpif *miss_rule,
- struct rule_dpif *no_packet_in_rule)
+ struct rule_dpif *no_packet_in_rule, struct rule_dpif **rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
- return config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule;
+ *rule = config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule;
+ ovs_rwlock_rdlock(&(*rule)->up.rwlock);
}
void
-rule_release(struct rule_dpif *rule)
+rule_dpif_release(struct rule_dpif *rule)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
{
if (rule) {
- ovs_rwlock_unlock(&rule->up.evict);
+ ovs_rwlock_unlock(&rule->up.rwlock);
}
}
}
}
+static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
+{
+ return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
+}
+
static struct rule *
rule_alloc(void)
{
rule_construct(struct rule *rule_)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- ovs_mutex_init(&rule->stats_mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_init(&rule->stats_mutex);
ovs_mutex_lock(&rule->stats_mutex);
rule->packet_count = 0;
rule->byte_count = 0;
ovs_mutex_unlock(&rule->stats_mutex);
- complete_operation(rule);
return 0;
}
static void
-rule_destruct(struct rule *rule_)
+rule_insert(struct rule *rule_)
+{
+ struct rule_dpif *rule = rule_dpif_cast(rule_);
+ complete_operation(rule);
+}
+
+static void
+rule_delete(struct rule *rule_)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
complete_operation(rule);
+}
+
+static void
+rule_destruct(struct rule *rule_)
+{
+ struct rule_dpif *rule = rule_dpif_cast(rule_);
ovs_mutex_destroy(&rule->stats_mutex);
}
struct xlate_in xin;
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- rule_credit_stats(rule, &stats);
+ rule_dpif_credit_stats(rule, &stats);
xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
xin.resubmit_stats = &stats;
}
static void
-rule_modify_actions(struct rule *rule_)
+rule_modify_actions(struct rule *rule_, bool reset_counters)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
+ if (reset_counters) {
+ ovs_mutex_lock(&rule->stats_mutex);
+ rule->packet_count = 0;
+ rule->byte_count = 0;
+ ovs_mutex_unlock(&rule->stats_mutex);
+ }
+
complete_operation(rule);
}
\f
xlate_out_uninit(&trace.xout);
}
- rule_release(rule);
+ rule_dpif_release(rule);
}
static void
NULL, /* rule_choose_table */
rule_alloc,
rule_construct,
+ rule_insert,
+ rule_delete,
rule_destruct,
rule_dealloc,
rule_get_stats,
NULL, /* meter_set */
NULL, /* meter_get */
NULL, /* meter_del */
+ NULL, /* group_alloc */
+ NULL, /* group_construct */
+ NULL, /* group_destruct */
+ NULL, /* group_dealloc */
+ NULL, /* group_modify */
+ NULL, /* group_get_stats */
};