#include "bitmap.h"
#include "byte-order.h"
#include "classifier.h"
+#include "connectivity.h"
#include "connmgr.h"
#include "coverage.h"
#include "dynamic-string.h"
#include "pktbuf.h"
#include "poll-loop.h"
#include "random.h"
+#include "seq.h"
#include "shash.h"
#include "simap.h"
#include "smap.h"
/* Global lock that protects all flow table operations. */
struct ovs_mutex ofproto_mutex = OVS_MUTEX_INITIALIZER;
-unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
-unsigned n_handler_threads;
+unsigned ofproto_flow_limit = OFPROTO_FLOW_LIMIT_DEFAULT;
enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
+size_t n_handlers, n_revalidators;
+
/* Map from datapath name to struct ofproto, for use by unixctl commands. */
static struct hmap all_ofprotos = HMAP_INITIALIZER(&all_ofprotos);
{
size_t i;
+ sset_clear(types);
for (i = 0; i < n_ofproto_classes; i++) {
ofproto_classes[i]->enumerate_types(types);
}
ofproto->ogf.max_groups[OFPGT11_INDIRECT] = OFPG_MAX;
ofproto->ogf.max_groups[OFPGT11_FF] = OFPG_MAX;
ofproto->ogf.actions[0] =
-#define OFPAT11_ACTION(ENUM, STRUCT, EXTENSIBLE, NAME) (1 << ENUM) |
-#include "ofp-util.def"
- 0;
+ (1 << OFPAT11_OUTPUT) |
+ (1 << OFPAT11_COPY_TTL_OUT) |
+ (1 << OFPAT11_COPY_TTL_IN) |
+ (1 << OFPAT11_SET_MPLS_TTL) |
+ (1 << OFPAT11_DEC_MPLS_TTL) |
+ (1 << OFPAT11_PUSH_VLAN) |
+ (1 << OFPAT11_POP_VLAN) |
+ (1 << OFPAT11_PUSH_MPLS) |
+ (1 << OFPAT11_POP_MPLS) |
+ (1 << OFPAT11_SET_QUEUE) |
+ (1 << OFPAT11_GROUP) |
+ (1 << OFPAT11_SET_NW_TTL) |
+ (1 << OFPAT11_DEC_NW_TTL) |
+ (1 << OFPAT12_SET_FIELD);
+/* not supported:
+ * (1 << OFPAT13_PUSH_PBB) |
+ * (1 << OFPAT13_POP_PBB) */
error = ofproto->ofproto_class->construct(ofproto);
if (error) {
/* Sets the number of flows at which eviction from the kernel flow table
* will occur. */
void
-ofproto_set_flow_eviction_threshold(unsigned threshold)
+ofproto_set_flow_limit(unsigned limit)
{
- flow_eviction_threshold = MAX(OFPROTO_FLOW_EVICTION_THRESHOLD_MIN,
- threshold);
+ ofproto_flow_limit = limit;
}
/* Sets the path for handling flow misses. */
}
}
-/* Sets number of upcall handler threads. The default is
- * (number of online cores - 2). */
void
-ofproto_set_n_handler_threads(unsigned limit)
+ofproto_set_threads(size_t n_handlers_, size_t n_revalidators_)
{
- if (limit) {
- n_handler_threads = limit;
- } else {
- int n_proc = sysconf(_SC_NPROCESSORS_ONLN);
- n_handler_threads = n_proc > 2 ? n_proc - 2 : 1;
+ int threads = MAX(count_cpu_cores(), 2);
+
+ n_revalidators = n_revalidators_;
+ n_handlers = n_handlers_;
+
+ if (!n_revalidators) {
+ n_revalidators = n_handlers
+ ? MAX(threads - (int) n_handlers, 1)
+ : threads / 4 + 1;
+ }
+
+ if (!n_handlers) {
+ n_handlers = MAX(threads - (int) n_revalidators, 1);
}
}
? ofproto->ofproto_class->get_stp_port_status(ofport, s)
: EOPNOTSUPP);
}
+
+/* Retrieves STP port statistics of 'ofp_port' on 'ofproto' and stores it in
+ * 's'. If the 'enabled' member in 's' is false, then the other members
+ * are not meaningful.
+ *
+ * Returns 0 if successful, otherwise a positive errno value.*/
+int
+ofproto_port_get_stp_stats(struct ofproto *ofproto, ofp_port_t ofp_port,
+ struct ofproto_port_stp_stats *s)
+{
+ struct ofport *ofport = ofproto_get_port(ofproto, ofp_port);
+ if (!ofport) {
+ VLOG_WARN_RL(&rl, "%s: cannot get STP stats on nonexistent "
+ "port %"PRIu16, ofproto->name, ofp_port);
+ return ENODEV;
+ }
+
+ return (ofproto->ofproto_class->get_stp_port_stats
+ ? ofproto->ofproto_class->get_stp_port_stats(ofport, s)
+ : EOPNOTSUPP);
+}
\f
/* Queue DSCP configuration. */
}
table->max_flows = s->max_flows;
- ovs_rwlock_rdlock(&table->cls.rwlock);
+ ovs_rwlock_wrlock(&table->cls.rwlock);
if (classifier_count(&table->cls) > table->max_flows
&& table->eviction_fields) {
/* 'table' contains more flows than allowed. We might not be able to
break;
}
}
+
+ classifier_set_prefix_fields(&table->cls,
+ s->prefix_fields, s->n_prefix_fields);
+
ovs_rwlock_unlock(&table->cls.rwlock);
}
\f
return error;
}
-int
-ofproto_type_run_fast(const char *datapath_type)
-{
- const struct ofproto_class *class;
- int error;
-
- datapath_type = ofproto_normalize_type(datapath_type);
- class = ofproto_class_find__(datapath_type);
-
- error = class->type_run_fast ? class->type_run_fast(datapath_type) : 0;
- if (error && error != EAGAIN) {
- VLOG_ERR_RL(&rl, "%s: type_run_fast failed (%s)",
- datapath_type, ovs_strerror(error));
- }
- return error;
-}
-
void
ofproto_type_wait(const char *datapath_type)
{
int
ofproto_run(struct ofproto *p)
{
- struct sset changed_netdevs;
- const char *changed_netdev;
- struct ofport *ofport;
int error;
+ uint64_t new_seq;
error = p->ofproto_class->run(p);
if (error && error != EAGAIN) {
}
}
- /* Update OpenFlow port status for any port whose netdev has changed.
- *
- * Refreshing a given 'ofport' can cause an arbitrary ofport to be
- * destroyed, so it's not safe to update ports directly from the
- * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
- * need this two-phase approach. */
- sset_init(&changed_netdevs);
- HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
- unsigned int change_seq = netdev_change_seq(ofport->netdev);
- if (ofport->change_seq != change_seq) {
- ofport->change_seq = change_seq;
- sset_add(&changed_netdevs, netdev_get_name(ofport->netdev));
+ new_seq = seq_read(connectivity_seq_get());
+ if (new_seq != p->change_seq) {
+ struct sset devnames;
+ const char *devname;
+ struct ofport *ofport;
+
+ /* Update OpenFlow port status for any port whose netdev has changed.
+ *
+ * Refreshing a given 'ofport' can cause an arbitrary ofport to be
+ * destroyed, so it's not safe to update ports directly from the
+ * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
+ * need this two-phase approach. */
+ sset_init(&devnames);
+ HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
+ sset_add(&devnames, netdev_get_name(ofport->netdev));
}
+ SSET_FOR_EACH (devname, &devnames) {
+ update_port(p, devname);
+ }
+ sset_destroy(&devnames);
+
+ p->change_seq = new_seq;
}
- SSET_FOR_EACH (changed_netdev, &changed_netdevs) {
- update_port(p, changed_netdev);
- }
- sset_destroy(&changed_netdevs);
switch (p->state) {
case S_OPENFLOW:
break;
default:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
if (time_msec() >= p->next_op_report) {
return error;
}
-/* Performs periodic activity required by 'ofproto' that needs to be done
- * with the least possible latency.
- *
- * It makes sense to call this function a couple of times per poll loop, to
- * provide a significant performance boost on some benchmarks with the
- * ofproto-dpif implementation. */
-int
-ofproto_run_fast(struct ofproto *p)
-{
- int error;
-
- error = p->ofproto_class->run_fast ? p->ofproto_class->run_fast(p) : 0;
- if (error && error != EAGAIN) {
- VLOG_ERR_RL(&rl, "%s: fastpath run failed (%s)",
- p->name, ovs_strerror(error));
- }
- return error;
-}
-
void
ofproto_wait(struct ofproto *p)
{
- struct ofport *ofport;
-
p->ofproto_class->wait(p);
if (p->ofproto_class->port_poll_wait) {
p->ofproto_class->port_poll_wait(p);
}
-
- HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
- if (ofport->change_seq != netdev_change_seq(ofport->netdev)) {
- poll_immediate_wake();
- }
- }
+ seq_wait(connectivity_seq_get(), p->change_seq);
switch (p->state) {
case S_OPENFLOW:
connmgr_get_memory_usage(ofproto->connmgr, usage);
}
+void
+ofproto_type_get_memory_usage(const char *datapath_type, struct simap *usage)
+{
+ const struct ofproto_class *class;
+
+ datapath_type = ofproto_normalize_type(datapath_type);
+ class = ofproto_class_find__(datapath_type);
+
+ if (class && class->type_get_memory_usage) {
+ class->type_get_memory_usage(datapath_type, usage);
+ }
+}
+
void
ofproto_get_ofproto_controller_info(const struct ofproto *ofproto,
struct shash *info)
update_port(ofproto, netdev_name);
}
if (ofp_portp) {
- struct ofproto_port ofproto_port;
-
- ofproto_port_query_by_name(ofproto, netdev_get_name(netdev),
- &ofproto_port);
- *ofp_portp = error ? OFPP_NONE : ofproto_port.ofp_port;
- ofproto_port_destroy(&ofproto_port);
+ *ofp_portp = OFPP_NONE;
+ if (!error) {
+ struct ofproto_port ofproto_port;
+
+ error = ofproto_port_query_by_name(ofproto,
+ netdev_get_name(netdev),
+ &ofproto_port);
+ if (!error) {
+ *ofp_portp = ofproto_port.ofp_port;
+ ofproto_port_destroy(&ofproto_port);
+ }
+ }
}
return error;
}
/* Search for a free OpenFlow port number. We try not to
* immediately reuse them to prevent problems due to old
- * flows. */
+ * flows.
+ *
+ * We limit the automatically assigned port numbers to the lower half
+ * of the port range, to reserve the upper half for assignment by
+ * controllers. */
for (;;) {
- if (++ofproto->alloc_port_no >= ofproto->max_ports) {
+ if (++ofproto->alloc_port_no >= MIN(ofproto->max_ports, 32768)) {
ofproto->alloc_port_no = 1;
}
last_used_at = ofport_get_usage(ofproto,
}
ofport->ofproto = p;
ofport->netdev = netdev;
- ofport->change_seq = netdev_change_seq(netdev);
ofport->pp = *pp;
ofport->ofp_port = pp->port_no;
ofport->created = time_msec();
* Don't close the old netdev yet in case port_modified has to
* remove a retained reference to it.*/
port->netdev = netdev;
- port->change_seq = netdev_change_seq(netdev);
if (port->ofproto->ofproto_class->port_modified) {
port->ofproto->ofproto_class->port_modified(port);
op->actions->ofpacts_len, out_port);
}
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
static void
ots = xcalloc(p->n_tables, sizeof *ots);
for (i = 0; i < p->n_tables; i++) {
ots[i].table_id = i;
- sprintf(ots[i].name, "table%zu", i);
+ sprintf(ots[i].name, "table%"PRIuSIZE, i);
ots[i].match = htonll(OFPXMT13_MASK);
ots[i].wildcards = htonll(OFPXMT13_MASK);
ots[i].write_actions = htonl(OFPAT11_OUTPUT);
* actions, so that when the operation commits we report the change. */
switch (op->type) {
case OFOPERATION_ADD:
- NOT_REACHED();
+ OVS_NOT_REACHED();
case OFOPERATION_MODIFY:
case OFOPERATION_REPLACE:
break;
default:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
}
fu.ofpacts = actions ? actions->ofpacts : NULL;
}
}
+static enum ofperr
+table_mod(struct ofproto *ofproto, const struct ofputil_table_mod *tm)
+{
+ /* XXX Reject all configurations because none are currently supported */
+ return OFPERR_OFPTMFC_BAD_CONFIG;
+
+ if (tm->table_id == OFPTT_ALL) {
+ int i;
+ for (i = 0; i < ofproto->n_tables; i++) {
+ atomic_store(&ofproto->tables[i].config,
+ (unsigned int)tm->config);
+ }
+ } else if (!check_table_id(ofproto, tm->table_id)) {
+ return OFPERR_OFPTMFC_BAD_TABLE;
+ } else {
+ atomic_store(&ofproto->tables[tm->table_id].config,
+ (unsigned int)tm->config);
+ }
+
+ return 0;
+}
+
static enum ofperr
handle_table_mod(struct ofconn *ofconn, const struct ofp_header *oh)
{
+ struct ofproto *ofproto = ofconn_get_ofproto(ofconn);
struct ofputil_table_mod tm;
enum ofperr error;
return error;
}
- /* XXX Actual table mod support is not implemented yet. */
- return 0;
+ return table_mod(ofproto, &tm);
}
static enum ofperr
break;
default:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
ofmonitor_report(ofproto->connmgr, rule, event_type,
break;
default:
- NOT_REACHED();
+ OVS_NOT_REACHED();
}
ofoperation_destroy(op);
oftable_init(struct oftable *table)
{
memset(table, 0, sizeof *table);
- classifier_init(&table->cls);
+ classifier_init(&table->cls, flow_segment_u32s);
table->max_flows = UINT_MAX;
+ atomic_init(&table->config, (unsigned int)OFPTC11_TABLE_MISS_CONTROLLER);
}
/* Destroys 'table', including its classifier and eviction groups.