#ifdef HAVE_RHEL_OVS_HOOK
rcu_assign_pointer(dev->ax25_ptr, rx_handler_data);
nr_bridges++;
- rcu_assign_pointer(openvswitch_handle_frame_hook, rx_handler_data);
+ rcu_assign_pointer(openvswitch_handle_frame_hook, rx_handler);
#else
if (dev->br_port)
return -EBUSY;
#else /* for 2.6.32* */
-int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
-void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
+int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet);
+void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet);
#define DEFINE_COMPAT_PNET_REG_FUNC(TYPE) \
\
static struct rpl_pernet_operations *pnet_gen_##TYPE; \
-static int __net_init compat_init_net_gen_##TYPE(struct net *net) \
+static int compat_init_net_gen_##TYPE(struct net *net) \
{ \
return compat_init_net(net, pnet_gen_##TYPE); \
} \
\
-static void __net_exit compat_exit_net_gen_##TYPE(struct net *net) \
+static void compat_exit_net_gen_##TYPE(struct net *net) \
{ \
compat_exit_net(net, pnet_gen_##TYPE); \
} \
\
-static int __net_init rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+static int rpl_register_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
{ \
pnet_gen_##TYPE = rpl_pnet; \
rpl_pnet->ops.init = compat_init_net_gen_##TYPE; \
return register_pernet_gen_##TYPE(pnet_gen_##TYPE->id, &rpl_pnet->ops); \
} \
\
-static void __net_exit rpl_unregister_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
+static void rpl_unregister_pernet_gen_##TYPE(struct rpl_pernet_operations *rpl_pnet) \
{ \
unregister_pernet_gen_##TYPE(*pnet_gen_##TYPE->id, &rpl_pnet->ops); \
}
static int net_assign_generic(struct net *net, int id, void *data);
#endif
-int __net_init compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
+int compat_init_net(struct net *net, struct rpl_pernet_operations *pnet)
{
int err;
void *ovs_net = kzalloc(pnet->size, GFP_KERNEL);
return err;
}
-void __net_exit compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
+void compat_exit_net(struct net *net, struct rpl_pernet_operations *pnet)
{
void *ovs_net = net_generic(net, *pnet->id);
queue_work(&vh->del_work);
}
-static __net_init int vxlan_init_net(struct net *net)
+static int vxlan_init_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
unsigned int h;
#include <config.h>
#include "bfd.h"
+#include <sys/types.h>
#include <arpa/inet.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
/* Protects all members below. */
struct ovs_mutex mutex OVS_ACQ_AFTER(dummy_list_mutex);
- uint8_t hwaddr[ETH_ADDR_LEN];
- int mtu;
- struct netdev_stats stats;
- enum netdev_flags flags;
- unsigned int change_seq;
- int ifindex;
-
- struct pstream *pstream;
- struct dummy_stream *streams;
- size_t n_streams;
-
- struct list rxes; /* List of child "netdev_rx_dummy"s. */
+ uint8_t hwaddr[ETH_ADDR_LEN] OVS_GUARDED;
+ int mtu OVS_GUARDED;
+ struct netdev_stats stats OVS_GUARDED;
+ enum netdev_flags flags OVS_GUARDED;
+ unsigned int change_seq OVS_GUARDED;
+ int ifindex OVS_GUARDED;
+
+ struct pstream *pstream OVS_GUARDED;
+ struct dummy_stream *streams OVS_GUARDED;
+ size_t n_streams OVS_GUARDED;
+
+ struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
};
/* Max 'recv_queue_len' in struct netdev_dummy. */
atomic_add(&next_n, 1, &n);
ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_lock(&netdev->mutex);
netdev->hwaddr[0] = 0xaa;
netdev->hwaddr[1] = 0x55;
netdev->hwaddr[2] = n >> 24;
netdev->n_streams = 0;
list_init(&netdev->rxes);
+ ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_lock(&dummy_list_mutex);
list_push_back(&dummy_list, &netdev->list_node);
list_remove(&netdev->list_node);
ovs_mutex_unlock(&dummy_list_mutex);
+ ovs_mutex_lock(&netdev->mutex);
pstream_close(netdev->pstream);
for (i = 0; i < netdev->n_streams; i++) {
dummy_stream_close(&netdev->streams[i]);
}
free(netdev->streams);
+ ovs_mutex_unlock(&netdev->mutex);
ovs_mutex_destroy(&netdev->mutex);
}
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
+ ovs_mutex_lock(&netdev->mutex);
if (netdev->ifindex >= 0) {
smap_add_format(args, "ifindex", "%d", netdev->ifindex);
}
if (netdev->pstream) {
smap_add(args, "pstream", pstream_get_name(netdev->pstream));
}
+ ovs_mutex_unlock(&netdev->mutex);
+
return 0;
}
const struct eth_header *eth = buffer;
int max_size;
+ ovs_mutex_lock(&dev->mutex);
max_size = dev->mtu + ETH_HEADER_LEN;
+ ovs_mutex_unlock(&dev->mutex);
+
if (eth->eth_type == htons(ETH_TYPE_VLAN)) {
max_size += VLAN_HEADER_LEN;
}
return error;
}
-/* Returns the maximum size of transmitted (and received) packets on 'netdev',
- * in bytes, not including the hardware header; thus, this is typically 1500
- * bytes for Ethernet devices. */
static int
-netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+netdev_linux_get_mtu__(struct netdev_linux *netdev, int *mtup)
{
- struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
- ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_MTU)) {
struct ifreq ifr;
netdev->netdev_mtu_error = af_inet_ifreq_ioctl(
- netdev_get_name(netdev_), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
+ netdev_get_name(&netdev->up), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
netdev->mtu = ifr.ifr_mtu;
netdev->cache_valid |= VALID_MTU;
}
if (!error) {
*mtup = netdev->mtu;
}
+
+ return error;
+}
+
+/* Returns the maximum size of transmitted (and received) packets on 'netdev',
+ * in bytes, not including the hardware header; thus, this is typically 1500
+ * bytes for Ethernet devices. */
+static int
+netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+
+ ovs_mutex_lock(&netdev->mutex);
+ error = netdev_linux_get_mtu__(netdev, mtup);
ovs_mutex_unlock(&netdev->mutex);
return error;
static void
netdev_linux_read_features(struct netdev_linux *netdev)
- OVS_REQUIRES(netdev->mutex)
{
struct ethtool_cmd ecmd;
uint32_t speed;
int error;
int mtu;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot set up HTB on device %s that lacks MTU",
netdev_get_name(netdev));
}
static void
-htb_parse_qdisc_details__(struct netdev *netdev,
+htb_parse_qdisc_details__(struct netdev *netdev_,
const struct smap *details, struct htb_class *hc)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
const char *max_rate_s;
max_rate_s = smap_get(details, "max-rate");
if (!hc->max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
hc->max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}
hc->min_rate = hc->max_rate;
const char *priority_s = smap_get(details, "priority");
int mtu, error;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot parse HTB class on device %s that lacks MTU",
netdev_get_name(netdev));
}
static void
-hfsc_parse_qdisc_details__(struct netdev *netdev, const struct smap *details,
+hfsc_parse_qdisc_details__(struct netdev *netdev_, const struct smap *details,
struct hfsc_class *class)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
uint32_t max_rate;
const char *max_rate_s;
if (!max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}
waiter = xmalloc(sizeof *waiter);
waiter->seq = seq;
hmap_insert(&seq->waiters, &waiter->hmap_node, hash);
+ waiter->ovsthread_id = id;
waiter->value = value;
waiter->thread = seq_thread_get();
list_push_back(&waiter->thread->waiters, &waiter->list_node);
flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
- struct ofputil_packet_in pin;
-
- /* Extra-special case for fail-open mode.
- *
- * We are in fail-open mode and the packet matched the fail-open
- * rule, but we are connected to a controller too. We should send
- * the packet up to the controller in the hope that it will try to
- * set up a flow and thereby allow us to exit fail-open.
- *
- * See the top-level comment in fail-open.c for more information. */
- pin.packet = packet->data;
- pin.packet_len = packet->size;
- pin.reason = OFPR_NO_MATCH;
- pin.controller_id = 0;
- pin.table_id = 0;
- pin.cookie = 0;
- pin.send_len = 0; /* Not used for flow table misses. */
- flow_get_metadata(&miss->flow, &pin.fmd);
- ofproto_dpif_send_packet_in(ofproto, &pin);
+ LIST_FOR_EACH (packet, list_node, &miss->packets) {
+ struct ofputil_packet_in *pin;
+
+ /* Extra-special case for fail-open mode.
+ *
+ * We are in fail-open mode and the packet matched the fail-open
+ * rule, but we are connected to a controller too. We should send
+ * the packet up to the controller in the hope that it will try to
+ * set up a flow and thereby allow us to exit fail-open.
+ *
+ * See the top-level comment in fail-open.c for more information. */
+ pin = xmalloc(sizeof(*pin));
+ pin->packet = xmemdup(packet->data, packet->size);
+ pin->packet_len = packet->size;
+ pin->reason = OFPR_NO_MATCH;
+ pin->controller_id = 0;
+ pin->table_id = 0;
+ pin->cookie = 0;
+ pin->send_len = 0; /* Not used for flow table misses. */
+ flow_get_metadata(&miss->flow, &pin->fmd);
+ ofproto_dpif_send_packet_in(ofproto, pin);
+ }
}
if (miss->xout.slow) {
}
hmap_remove(&xbridges, &xbridge->hmap_node);
+ mac_learning_unref(xbridge->ml);
+ mbridge_unref(xbridge->mbridge);
+ dpif_sflow_unref(xbridge->sflow);
+ dpif_ipfix_unref(xbridge->ipfix);
+ stp_unref(xbridge->stp);
+ hmap_destroy(&xbridge->xports);
free(xbridge->name);
free(xbridge);
}
COVERAGE_DEFINE(packet_in_overflow);
COVERAGE_DEFINE(flow_mod_overflow);
-#define N_THREADS 16
-
/* Number of implemented OpenFlow tables. */
enum { N_TABLES = 255 };
enum { TBL_INTERNAL = N_TABLES - 1 }; /* Used for internal hidden rules. */
/* Number of subfacets added or deleted from 'created' to 'last_minute.' */
unsigned long long int total_subfacet_add_count;
unsigned long long int total_subfacet_del_count;
+
+ /* Number of upcall handling threads. */
+ unsigned int n_handler_threads;
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
ovs_mutex_unlock(&ofproto->flow_mod_mutex);
}
+/* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
+ * Takes ownership of 'pin' and pin->packet. */
void
ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
struct ofputil_packet_in *pin)
VLOG_ERR("Failed to enable receiving packets in dpif.");
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
dpif_flow_flush(backer->dpif);
backer->need_revalidate = REV_RECONFIGURE;
}
+ /* If the n_handler_threads is reconfigured, call udpif_recv_set()
+ * to reset the handler threads. */
+ if (backer->n_handler_threads != n_handler_threads) {
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
+ }
+
if (backer->need_revalidate) {
struct ofproto_dpif *ofproto;
struct simap_node *node;
}
timer_wait(&backer->next_expiration);
+ dpif_wait(backer->dpif);
+ udpif_wait(backer->udpif);
}
\f
/* Basic life-cycle. */
close_dpif_backer(backer);
return error;
}
- udpif_recv_set(backer->udpif, N_THREADS, backer->recv_set_enable);
+ udpif_recv_set(backer->udpif, n_handler_threads,
+ backer->recv_set_enable);
+ backer->n_handler_threads = n_handler_threads;
backer->max_n_subfacet = 0;
backer->created = time_msec();
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct rule_dpif *rule, *next_rule;
- struct ofputil_flow_mod *pin, *next_pin;
+ struct ofputil_packet_in *pin, *next_pin;
struct ofputil_flow_mod *fm, *next_fm;
struct oftable *table;
LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &ofproto->pins) {
list_remove(&pin->list_node);
ofproto->n_pins--;
- free(pin->ofpacts);
+ free(CONST_CAST(void *, pin->packet));
free(pin);
}
ovs_mutex_unlock(&ofproto->pin_mutex);
return;
}
- dpif_wait(ofproto->backer->dpif);
- udpif_wait(ofproto->backer->udpif);
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
* ofproto-dpif implementation */
extern unsigned flow_eviction_threshold;
+/* Number of upcall handler threads. Only affects the ofproto-dpif
+ * implementation. */
+extern unsigned n_handler_threads;
+
/* Determines which model to use for handling misses in the ofproto-dpif
* implementation */
extern enum ofproto_flow_miss_model flow_miss_model;
#include <inttypes.h>
#include <stdbool.h>
#include <stdlib.h>
+#include <unistd.h>
#include "bitmap.h"
#include "byte-order.h"
#include "classifier.h"
static size_t allocated_ofproto_classes;
unsigned flow_eviction_threshold = OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT;
+unsigned n_handler_threads;
enum ofproto_flow_miss_model flow_miss_model = OFPROTO_HANDLE_MISS_AUTO;
/* Map from datapath name to struct ofproto, for use by unixctl commands. */
}
}
+/* Sets number of upcall handler threads. The default is
+ * (number of online cores - 1). */
+void
+ofproto_set_n_handler_threads(unsigned limit)
+{
+ if (limit) {
+ n_handler_threads = limit;
+ } else {
+ n_handler_threads = MAX(1, sysconf(_SC_NPROCESSORS_ONLN) - 1);
+ }
+}
+
void
ofproto_set_dp_desc(struct ofproto *p, const char *dp_desc)
{
struct rule *victim;
struct rule *rule;
uint8_t table_id;
- bool overlaps;
int error;
error = check_table_id(ofproto, fm->table_id);
}
/* Check for overlap, if requested. */
- ovs_rwlock_rdlock(&table->cls.rwlock);
- overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
- ovs_rwlock_unlock(&table->cls.rwlock);
- if (fm->flags & OFPFF_CHECK_OVERLAP && overlaps) {
- cls_rule_destroy(&rule->cr);
- ofproto->ofproto_class->rule_dealloc(rule);
- return OFPERR_OFPFMFC_OVERLAP;
+ if (fm->flags & OFPFF_CHECK_OVERLAP) {
+ bool overlaps;
+
+ ovs_rwlock_rdlock(&table->cls.rwlock);
+ overlaps = classifier_rule_overlaps(&table->cls, &rule->cr);
+ ovs_rwlock_unlock(&table->cls.rwlock);
+
+ if (overlaps) {
+ cls_rule_destroy(&rule->cr);
+ ofproto->ofproto_class->rule_dealloc(rule);
+ return OFPERR_OFPFMFC_OVERLAP;
+ }
}
/* FIXME: Implement OFPFF12_RESET_COUNTS */
void ofproto_set_forward_bpdu(struct ofproto *, bool forward_bpdu);
void ofproto_set_mac_table_config(struct ofproto *, unsigned idle_time,
size_t max_entries);
+void ofproto_set_n_handler_threads(unsigned limit);
void ofproto_set_dp_desc(struct ofproto *, const char *dp_desc);
int ofproto_set_snoops(struct ofproto *, const struct sset *snoops);
int ofproto_set_netflow(struct ofproto *,
smap_get_int(&ovs_cfg->other_config, "flow-eviction-threshold",
OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT));
+ ofproto_set_n_handler_threads(
+ smap_get_int(&ovs_cfg->other_config, "n-handler-threads", 0));
+
bridge_configure_flow_miss_model(smap_get(&ovs_cfg->other_config,
"force-miss-model"));
</dl>
</p>
</column>
+
+ <column name="other_config" key="n-handler-threads"
+ type='{"type": "integer", "minInteger": 1}'>
+ <p>
+ Specifies the number of threads for software datapaths to use for
+ handling new flows. The default is one less than the number of
+ online CPU cores (but at least 1).
+ </p>
+ <p>
+ This configuration is per datapath. If you have more than one
+ software datapath (e.g. some <code>system</code> bridges and some
+ <code>netdev</code> bridges), then the total number of threads is
+ <code>n-handler-threads</code> times the number of software
+ datapaths.
+ </p>
+ </column>
</group>
<group title="Status">