From: Giuseppe Lettieri
Date: Sat, 17 Aug 2013 13:08:21 +0000 (+0200)
Subject: Merge branch 'mainstream'
X-Git-Tag: sliver-openvswitch-2.0.90-1~24
X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=77d14d9c7f9ce7245eff56aacd420646577892d0;hp=a91da17ea6f910863c2a771ebfa4100bbad3f481;p=sliver-openvswitch.git
Merge branch 'mainstream'
---
diff --git a/datapath/linux/compat/include/linux/netdevice.h b/datapath/linux/compat/include/linux/netdevice.h
index f62bd6de0..2ceff22b3 100644
--- a/datapath/linux/compat/include/linux/netdevice.h
+++ b/datapath/linux/compat/include/linux/netdevice.h
@@ -98,7 +98,7 @@ static inline int netdev_rx_handler_register(struct net_device *dev,
#ifdef HAVE_RHEL_OVS_HOOK
rcu_assign_pointer(dev->ax25_ptr, rx_handler_data);
nr_bridges++;
- rcu_assign_pointer(openvswitch_handle_frame_hook, rx_handler_data);
+ rcu_assign_pointer(openvswitch_handle_frame_hook, rx_handler);
#else
if (dev->br_port)
return -EBUSY;
diff --git a/datapath/linux/compat/include/net/net_namespace.h b/datapath/linux/compat/include/net/net_namespace.h
index 85dee1a53..440c6016a 100644
--- a/datapath/linux/compat/include/net/net_namespace.h
+++ b/datapath/linux/compat/include/net/net_namespace.h
@@ -82,23 +82,23 @@ extern void rpl_unregister_pernet_gen_device(struct rpl_pernet_operations *ops);
#else /* for 2.6.32* */
-int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
-void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
+int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
+void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
#define DEFINE_COMPAT_PNET_REG_FUNC(TYPE) \
\
static struct rpl_pernet_operations *pnet_gen_##TYPE; \
-static int __net_init compat_init_net_gen_##TYPE(struct net *net) \
+static int compat_init_net_gen_##TYPE(struct net *net) \
{ \
return compat_init_net(net, pnet_gen_##TYPE); \
} \
\
-static void __net_exit compat_exit_net_gen_##TYPE(struct net *net) \
+static void compat_exit_net_gen_##TYPE(struct net *net) \
{ \
compat_exit_net(net, pnet_gen_##TYPE); \
} \
\
-static int __net_init rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+static int rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
{ \
pnet_gen_##TYPE = rpl_pnet; \
rpl_pnet->ops.init = compat_init_net_gen_##TYPE; \
@@ -106,7 +106,7 @@ static int __net_init rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operation
return register_pernet_gen_##TYPE(pnet_gen_##TYPE->id, &rpl_pnet->ops); \
} \
\
-static void __net_exit rpl_unregister_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+static void rpl_unregister_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
{ \
unregister_pernet_gen_##TYPE(*pnet_gen_##TYPE->id, &rpl_pnet->ops); \
}
diff --git a/datapath/linux/compat/net_namespace.c b/datapath/linux/compat/net_namespace.c
index 82404af6e..843e6c152 100644
--- a/datapath/linux/compat/net_namespace.c
+++ b/datapath/linux/compat/net_namespace.c
@@ -8,7 +8,7 @@
static int net_assign_generic(struct net *net, int id, void *data);
#endif
-int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
+int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
{
int err;
void *ovs_net = kzalloc(pnet->size, GFP_KERNEL);
@@ -32,7 +32,7 @@ err:
return err;
}
-void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
+void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
{
void *ovs_net = net_generic(net, *pnet->id);
diff --git a/datapath/linux/compat/vxlan.c b/datapath/linux/compat/vxlan.c
index 6e6b94500..f3df4e3f4 100644
--- a/datapath/linux/compat/vxlan.c
+++ b/datapath/linux/compat/vxlan.c
@@ -429,7 +429,7 @@ void vxlan_handler_put(struct vxlan_handler *vh)
queue_work(&vh->del_work);
}
-static __net_init int vxlan_init_net(struct net *net)
+static int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int h;
diff --git a/lib/bfd.c b/lib/bfd.c
index 74b27c476..6f86f260c 100644
--- a/lib/bfd.c
+++ b/lib/bfd.c
@@ -15,6 +15,7 @@
#include
#include "bfd.h"
+#include
#include
#include
#include
diff --git a/lib/netdev-dummy.c b/lib/netdev-dummy.c
index 5c312109b..e17ef9ddb 100644
--- a/lib/netdev-dummy.c
+++ b/lib/netdev-dummy.c
@@ -60,18 +60,18 @@ struct netdev_dummy {
/* Protects all members below. */
struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
- uint8_t hwaddr[ETH_ADDR_LEN];
- int mtu;
- struct netdev_stats stats;
- enum netdev_flags flags;
- unsigned int change_seq;
- int ifindex;
-
- struct pstream *pstream;
- struct dummy_stream *streams;
- size_t n_streams;
-
- struct list rxes; /* List of child "netdev_rx_dummy"s. */
+ uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
+ int mtu OVS_GUARDED;
+ struct netdev_stats stats OVS_GUARDED;
+ enum netdev_flags flags OVS_GUARDED;
+ unsigned int change_seq OVS_GUARDED;
+ int ifindex OVS_GUARDED;
+
+ struct pstream *pstream OVS_GUARDED;
+ struct dummy_stream *streams OVS_GUARDED;
+ size_t n_streams OVS_GUARDED;
+
+ struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
};
/* Max 'recv_queue_len' in struct netdev_dummy. */
@@ -272,6 +272,7 @@ netdev_dummy_construct(struct netdev *netdev_)
atomic_add(&next_n, 1, &n);
ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_lock(&netdev->mutex);
netdev->hwaddr[0] = 0xaa;
netdev->hwaddr[1] = 0x55;
netdev->hwaddr[2] = n >> 24;
@@ -288,6 +289,7 @@ netdev_dummy_construct(struct netdev *netdev_)
netdev->n_streams = 0;
list_init(&netdev->rxes);
+ ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_lock(&dummy_list_mutex);
list_push_back(&dummy_list, &netdev->list_node);
@@ -306,11 +308,13 @@ netdev_dummy_destruct(struct netdev *netdev_)
list_remove(&netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
+ ovs_mutex_lock(&netdev->mutex);
pstream_close(netdev->pstream);
for (i = 0; i < netdev->n_streams; i++) {
dummy_stream_close(&netdev->streams[i]);
}
free(netdev->streams);
+ ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_destroy(&netdev->mutex);
}
@@ -327,12 +331,15 @@ netdev_dummy_get_config(const struct netdev *netdev_, struct smap *args)
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->ifindex >= 0) {
smap_add_format(args, "ifindex", "%d", netdev->ifindex);
}
if (netdev->pstream) {
smap_add(args, "pstream", pstream_get_name(netdev->pstream));
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
@@ -480,7 +487,10 @@ netdev_dummy_send(struct netdev *netdev, const void *buffer, size_t size)
const struct eth_header *eth = buffer;
int max_size;
+ ovs_mutex_lock(&dev->mutex);
max_size = dev->mtu + ETH_HEADER_LEN;
+ ovs_mutex_unlock(&dev->mutex);
+
if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
max_size += VLAN_HEADER_LEN;
}
diff --git a/lib/netdev-linux.c b/lib/netdev-linux.c
index 9a80b676c..2db56ac30 100644
--- a/lib/netdev-linux.c
+++ b/lib/netdev-linux.c
@@ -1041,21 +1041,16 @@ netdev_linux_get_etheraddr(const struct netdev *netdev_,
return error;
}
-/* Returns the maximum size of transmitted (and received) packets on 'netdev',
- * in bytes, not including the hardware header; thus, this is typically 1500
- * bytes for Ethernet devices. */
static int
-netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+netdev_linux_get_mtu__(struct netdev_linux *netdev, int *mtup)
{
- struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
- ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_MTU)) {
struct ifreq ifr;
netdev->netdev_mtu_error = af_inet_ifreq_ioctl(
- netdev_get_name(netdev_), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
+ netdev_get_name(&netdev->up), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
netdev->mtu = ifr.ifr_mtu;
netdev->cache_valid |= VALID_MTU;
}
@@ -1064,6 +1059,21 @@ netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
if (!error) {
*mtup = netdev->mtu;
}
+
+ return error;
+}
+
+/* Returns the maximum size of transmitted (and received) packets on 'netdev',
+ * in bytes, not including the hardware header; thus, this is typically 1500
+ * bytes for Ethernet devices. */
+static int
+netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+
+ ovs_mutex_lock(&netdev->mutex);
+ error = netdev_linux_get_mtu__(netdev, mtup);
ovs_mutex_unlock(&netdev->mutex);
return error;
@@ -1562,7 +1572,6 @@ netdev_internal_set_stats(struct netdev *netdev,
static void
netdev_linux_read_features(struct netdev_linux *netdev)
- OVS_REQUIRES(netdev->mutex)
{
struct ethtool_cmd ecmd;
uint32_t speed;
@@ -2707,7 +2716,7 @@ htb_setup_class__(struct netdev *netdev, unsigned int handle,
int error;
int mtu;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot set up HTB on device %s that lacks MTU",
netdev_get_name(netdev));
@@ -2803,9 +2812,10 @@ htb_parse_tcmsg__(struct ofpbuf *tcmsg, unsigned int *queue_id,
}
static void
-htb_parse_qdisc_details__(struct netdev *netdev,
+htb_parse_qdisc_details__(struct netdev *netdev_,
const struct smap *details, struct htb_class *hc)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
const char *max_rate_s;
max_rate_s = smap_get(details, "max-rate");
@@ -2813,7 +2823,8 @@ htb_parse_qdisc_details__(struct netdev *netdev,
if (!hc->max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
hc->max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}
hc->min_rate = hc->max_rate;
@@ -2832,7 +2843,7 @@ htb_parse_class_details__(struct netdev *netdev,
const char *priority_s = smap_get(details, "priority");
int mtu, error;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot parse HTB class on device %s that lacks MTU",
netdev_get_name(netdev));
@@ -3280,9 +3291,10 @@ hfsc_query_class__(const struct netdev *netdev, unsigned int handle,
}
static void
-hfsc_parse_qdisc_details__(struct netdev *netdev, const struct smap *details,
+hfsc_parse_qdisc_details__(struct netdev *netdev_, const struct smap *details,
struct hfsc_class *class)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
uint32_t max_rate;
const char *max_rate_s;
@@ -3292,7 +3304,8 @@ hfsc_parse_qdisc_details__(struct netdev *netdev, const struct smap *details,
if (!max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}
diff --git a/lib/seq.c b/lib/seq.c
index 36e506570..ed205a137 100644
--- a/lib/seq.c
+++ b/lib/seq.c
@@ -148,6 +148,7 @@ seq_wait__(struct seq *seq, uint64_t value)
waiter = xmalloc(sizeof *waiter);
waiter->seq = seq;
hmap_insert(&seq->waiters, &waiter->hmap_node, hash);
+ waiter->ovsthread_id = id;
waiter->value = value;
waiter->thread = seq_thread_get();
list_push_back(&waiter->thread->waiters, &waiter->list_node);
diff --git a/ofproto/ofproto-dpif-upcall.c b/ofproto/ofproto-dpif-upcall.c
index ff9b2d5f2..242086584 100644
--- a/ofproto/ofproto-dpif-upcall.c
+++ b/ofproto/ofproto-dpif-upcall.c
@@ -639,25 +639,28 @@ execute_flow_miss(struct flow_miss *miss, struct dpif_op *ops, size_t *n_ops)
flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
- struct ofputil_packet_in pin;
-
- /* Extra-special case for fail-open mode.
- *
- * We are in fail-open mode and the packet matched the fail-open
- * rule, but we are connected to a controller too. We should send
- * the packet up to the controller in the hope that it will try to
- * set up a flow and thereby allow us to exit fail-open.
- *
- * See the top-level comment in fail-open.c for more information. */
- pin.packet = packet->data;
- pin.packet_len = packet->size;
- pin.reason = OFPR_NO_MATCH;
- pin.controller_id = 0;
- pin.table_id = 0;
- pin.cookie = 0;
- pin.send_len = 0; /* Not used for flow table misses. */
- flow_get_metadata(&miss->flow, &pin.fmd);
- ofproto_dpif_send_packet_in(ofproto, &pin);
+ LIST_FOR_EACH (packet, list_node, &miss->packets) {
+ struct ofputil_packet_in *pin;
+
+ /* Extra-special case for fail-open mode.
+ *
+ * We are in fail-open mode and the packet matched the fail-open
+ * rule, but we are connected to a controller too. We should send
+ * the packet up to the controller in the hope that it will try to
+ * set up a flow and thereby allow us to exit fail-open.
+ *
+ * See the top-level comment in fail-open.c for more information. */
+ pin = xmalloc(sizeof(*pin));
+ pin->packet = xmemdup(packet->data, packet->size);
+ pin->packet_len = packet->size;
+ pin->reason = OFPR_NO_MATCH;
+ pin->controller_id = 0;
+ pin->table_id = 0;
+ pin->cookie = 0;
+ pin->send_len = 0; /* Not used for flow table misses. */
+ flow_get_metadata(&miss->flow, &pin->fmd);
+ ofproto_dpif_send_packet_in(ofproto, pin);
+ }
}
if (miss->xout.slow) {
diff --git a/ofproto/ofproto-dpif-xlate.c b/ofproto/ofproto-dpif-xlate.c
index 8be808827..e80ec8491 100644
--- a/ofproto/ofproto-dpif-xlate.c
+++ b/ofproto/ofproto-dpif-xlate.c
@@ -289,6 +289,12 @@ xlate_remove_ofproto(struct ofproto_dpif *ofproto)
}
hmap_remove(&xbridges, &xbridge->hmap_node);
+ mac_learning_unref(xbridge->ml);
+ mbridge_unref(xbridge->mbridge);
+ dpif_sflow_unref(xbridge->sflow);
+ dpif_ipfix_unref(xbridge->ipfix);
+ stp_unref(xbridge->stp);
+ hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
}
diff --git a/ofproto/ofproto-dpif.c b/ofproto/ofproto-dpif.c
index 229b16cc7..46902159b 100644
--- a/ofproto/ofproto-dpif.c
+++ b/ofproto/ofproto-dpif.c
@@ -75,8 +75,6 @@ COVERAGE_DEFINE(subfacet_install_fail);
COVERAGE_DEFINE(packet_in_overflow);
COVERAGE_DEFINE(flow_mod_overflow);
-#define N_THREADS 16
-
/* Number of implemented OpenFlow tables. */
enum { N_TABLES = 255 };
enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
@@ -431,6 +429,9 @@ struct dpif_backer {
/* Number of subfacets added or deleted from 'created' to 'last_minute.' */
unsigned long long int total_subfacet_add_count;
unsigned long long int total_subfacet_del_count;
+
+ /* Number of upcall handling threads. */
+ unsigned int n_handler_threads;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
@@ -559,6 +560,8 @@ ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto,
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
}
+/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
+ * Takes ownership of 'pin' and pin->packet. */
void
ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
struct ofputil_packet_in *pin)
@@ -700,11 +703,20 @@ type_run(const char *type)
VLOG_ERR("Failed to enable receiving packets in dpif.");
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
dpif_flow_flush(backer->dpif);
backer->need_revalidate = REV_RECONFIGURE;
}
+ /* If the n_handler_threads is reconfigured, call udpif_recv_set()
+ * to reset the handler threads. */
+ if (backer->n_handler_threads != n_handler_threads) {
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
+ }
+
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
@@ -1051,6 +1063,8 @@ type_wait(const char *type)
}
timer_wait(&backer->next_expiration);
+ dpif_wait(backer->dpif);
+ udpif_wait(backer->udpif);
}
/* Basic life-cycle. */
@@ -1209,7 +1223,9 @@ open_dpif_backer(const char *type, struct dpif_backer **backerp)
close_dpif_backer(backer);
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
backer->max_n_subfacet = 0;
backer->created = time_msec();
@@ -1404,7 +1420,7 @@ destruct(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
- struct ofputil_flow_mod *pin, *next_pin;
+ struct ofputil_packet_in *pin, *next_pin;
struct ofputil_flow_mod *fm, *next_fm;
struct oftable *table;
@@ -1441,7 +1457,7 @@ destruct(struct ofproto *ofproto_)
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) {
list_remove(&pin->list_node);
ofproto->n_pins--;
- free(pin->ofpacts);
+ free(CONST_CAST(void *, pin->packet));
free(pin);
}
ovs_mutex_unlock(&ofproto->pin_mutex);
@@ -1625,8 +1641,6 @@ wait(struct ofproto *ofproto_)
return;
}
- dpif_wait(ofproto->backer->dpif);
- udpif_wait(ofproto->backer->udpif);
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
diff --git a/ofproto/ofproto-provider.h b/ofproto/ofproto-provider.h
index aa262bc07..ef4d5883e 100644
--- a/ofproto/ofproto-provider.h
+++ b/ofproto/ofproto-provider.h
@@ -262,6 +262,10 @@ struct rule {
* ofproto-dpif implementation */
extern unsigned flow_eviction_threshold;
+/* Number of upcall handler threads. Only affects the ofproto-dpif
+ * implementation. */
+extern unsigned n_handler_threads;
+
/* Determines which model to use for handling misses in the ofproto-dpif
* implementation */
extern enum ofproto_flow_miss_model flow_miss_model;
diff --git a/ofproto/ofproto.c b/ofproto/ofproto.c
index bbdb2d208..c8edb2d47 100644
--- a/ofproto/ofproto.c
+++ b/ofproto/ofproto.c
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
#include "bitmap.h"
#include "byte-order.h"
#include "classifier.h"
@@ -229,6 +230,7 @@ static size_t n_ofproto_classes;
static size_t allocated_ofproto_classes;
unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
+unsigned n_handler_threads;
enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
/* Map from datapath name to struct ofproto, for use by unixctl commands. */
@@ -628,6 +630,18 @@ ofproto_set_mac_table_config(struct ofproto *ofproto, unsigned idle_time,
}
}
+/* Sets number of upcall handler threads. The default is
+ * (number of online cores - 1). */
+void
+ofproto_set_n_handler_threads(unsigned limit)
+{
+ if (limit) {
+ n_handler_threads = limit;
+ } else {
+ n_handler_threads = MAX(1, sysconf(_SC_NPROCESSORS_ONLN) - 1);
+ }
+}
+
void
ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc)
{
@@ -3345,7 +3359,6 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
struct rule *victim;
struct rule *rule;
uint8_t table_id;
- bool overlaps;
int error;
error = check_table_id(ofproto, fm->table_id);
@@ -3402,13 +3415,18 @@ add_flow(struct ofproto *ofproto, struct ofconn *ofconn,
}
/* Check for overlap, if requested. */
- ovs_rwlock_rdlock(&table->cls.rwlock);
- overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
- ovs_rwlock_unlock(&table->cls.rwlock);
- if (fm->flags & OFPFF_CHECK_OVERLAP && overlaps) {
- cls_rule_destroy(&rule->cr);
- ofproto->ofproto_class->rule_dealloc(rule);
- return OFPERR_OFPFMFC_OVERLAP;
+ if (fm->flags & OFPFF_CHECK_OVERLAP) {
+ bool overlaps;
+
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+
+ if (overlaps) {
+ cls_rule_destroy(&rule->cr);
+ ofproto->ofproto_class->rule_dealloc(rule);
+ return OFPERR_OFPFMFC_OVERLAP;
+ }
}
/* FIXME: Implement OFPFF12_RESET_COUNTS */
diff --git a/ofproto/ofproto.h b/ofproto/ofproto.h
index 1bde3859f..516bbad5b 100644
--- a/ofproto/ofproto.h
+++ b/ofproto/ofproto.h
@@ -247,6 +247,7 @@ void ofproto_set_flow_miss_model(unsigned model);
void ofproto_set_forward_bpdu(struct ofproto *, bool forward_bpdu);
void ofproto_set_mac_table_config(struct ofproto *, unsigned idle_time,
size_t max_entries);
+void ofproto_set_n_handler_threads(unsigned limit);
void ofproto_set_dp_desc(struct ofproto *, const char *dp_desc);
int ofproto_set_snoops(struct ofproto *, const struct sset *snoops);
int ofproto_set_netflow(struct ofproto *,
diff --git a/vswitchd/bridge.c b/vswitchd/bridge.c
index abbda5668..3d6312580 100644
--- a/vswitchd/bridge.c
+++ b/vswitchd/bridge.c
@@ -502,6 +502,9 @@ bridge_reconfigure(const struct ovsrec_open_vswitch *ovs_cfg)
smap_get_int(&ovs_cfg->other_config, "flow-eviction-threshold",
OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT));
+ ofproto_set_n_handler_threads(
+ smap_get_int(&ovs_cfg->other_config, "n-handler-threads", 0));
+
bridge_configure_flow_miss_model(smap_get(&ovs_cfg->other_config,
"force-miss-model"));
diff --git a/vswitchd/vswitch.xml b/vswitchd/vswitch.xml
index b89d58c99..5bbe943f2 100644
--- a/vswitchd/vswitch.xml
+++ b/vswitchd/vswitch.xml
@@ -158,6 +158,22 @@
+
+
+
+ Specifies the number of threads for software datapaths to use for
+ handling new flows. The default is one less than the number of
+ online CPU cores (but at least 1).
+
+
+ This configuration is per datapath. If you have more than one
+ software datapath (e.g. some system
bridges and some
+ netdev
bridges), then the total number of threads is
+ n-handler-threads
times the number of software
+ datapaths.
+
+