#include "datapath.h"
#include "actions.h"
#include "flow.h"
+#include "table.h"
#include "vport-internal_dev.h"
#include "compat.h"
int i;
if (devnamep) {
- err = -EFAULT;
- if (strncpy_from_user(devname, devnamep, IFNAMSIZ - 1) < 0)
+ int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
+ if (retval < 0) {
+ err = -EFAULT;
goto err;
- devname[IFNAMSIZ - 1] = '\0';
+ } else if (retval >= IFNAMSIZ) {
+ err = -ENAMETOOLONG;
+ goto err;
+ }
} else {
snprintf(devname, sizeof devname, "of%d", dp_idx);
}
/* Allocate table. */
err = -ENOMEM;
- rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
+ rcu_assign_pointer(dp->table, tbl_create(0));
if (!dp->table)
goto err_free_dp;
/* Set up our datapath device. */
- strncpy(internal_dev_port.devname, devname, IFNAMSIZ - 1);
+ BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
+ strcpy(internal_dev_port.devname, devname);
internal_dev_port.flags = ODP_PORT_INTERNAL;
err = new_dp_port(dp, &internal_dev_port, ODPP_LOCAL);
if (err) {
err_destroy_local_port:
dp_detach_port(dp->ports[ODPP_LOCAL], 1);
err_destroy_table:
- dp_table_destroy(dp->table, 0);
+ tbl_destroy(dp->table, NULL);
err_free_dp:
kfree(dp);
err_put_module:
dp_detach_port(dp->ports[ODPP_LOCAL], 1);
- dp_table_destroy(dp->table, 1);
+ tbl_destroy(dp->table, flow_free_tbl);
for (i = 0; i < DP_N_QUEUES; i++)
skb_queue_purge(&dp->queues[i]);
if (err)
goto out_unlock_dp;
- if (!(port.flags & ODP_PORT_INTERNAL))
- set_internal_devs_mtu(dp);
+ set_internal_devs_mtu(dp);
dp_sysfs_add_if(dp->ports[port_no]);
err = __put_user(port_no, &portp->port);
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
struct odp_flow_key key;
- struct sw_flow *flow;
+ struct tbl_node *flow_node;
WARN_ON_ONCE(skb_shared(skb));
skb_warn_if_lro(skb);
OVS_CB(skb)->dp_port = p;
- compute_ip_summed(skb, false);
/* BHs are off so we don't have to use get_cpu()/put_cpu() here. */
stats = percpu_ptr(dp->stats_percpu, smp_processor_id());
}
}
- flow = dp_table_lookup(rcu_dereference(dp->table), &key);
- if (flow) {
+ flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
+ if (flow_node) {
+ struct sw_flow *flow = flow_cast(flow_node);
struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
flow_used(flow, skb);
execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
}
#if defined(CONFIG_XEN) && defined(HAVE_PROTO_DATA_VALID)
-/* This code is based on a skb_checksum_setup from net/dev/core.c from a
- * combination of Lenny's 2.6.26 Xen kernel and Xen's
- * linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call this function
- * directly because it isn't exported in all versions. */
-static int skb_pull_up_to(struct sk_buff *skb, void *ptr)
-{
- if (ptr < (void *)skb->tail)
- return 1;
- if (__pskb_pull_tail(skb,
- ptr - (void *)skb->data - skb_headlen(skb))) {
- return 1;
- } else {
- return 0;
- }
-}
-
+/* This code is based on skb_checksum_setup() from Xen's net/dev/core.c. We
+ * can't call this function directly because it isn't exported in all
+ * versions. */
int vswitch_skb_checksum_setup(struct sk_buff *skb)
{
struct iphdr *iph;
if (skb->protocol != htons(ETH_P_IP))
goto out;
- if (!skb_pull_up_to(skb, skb_network_header(skb) + sizeof(struct iphdr)))
+ if (!pskb_may_pull(skb, skb_network_header(skb) + sizeof(struct iphdr) - skb->data))
goto out;
iph = ip_hdr(skb);
goto out;
}
- if (!skb_pull_up_to(skb, th + csum_offset + 2))
+ if (!pskb_may_pull(skb, th + csum_offset + 2 - skb->data))
goto out;
skb->ip_summed = CHECKSUM_PARTIAL;
* be computed if it is sent off box. Unfortunately on earlier kernels,
* this case is impossible to distinguish from #2, despite having opposite
* meanings. Xen adds an extra field on earlier kernels (see #4) in order
- * to distinguish the different states. The only real user of this type
- * with bridging is Xen (on later kernels).
+ * to distinguish the different states.
* 4. CHECKSUM_UNNECESSARY (with proto_csum_blank true): This packet was
* generated locally by a Xen DomU and has a partial checksum. If it is
* handled on this machine (Dom0 or DomU), then the checksum will not be
* packet is processed by the local IP stack, in which case it will need to
* be reverified). If we receive a packet with CHECKSUM_HW that really means
* CHECKSUM_PARTIAL, it will be sent with the wrong checksum. However, there
- * shouldn't be any devices that do this with bridging.
- *
- * The bridge has similar behavior and this function closely resembles
- * skb_forward_csum(). It is slightly different because we are only concerned
- * with bridging and not other types of forwarding and can get away with
- * slightly more optimal behavior.*/
+ * shouldn't be any devices that do this with bridging. */
void
compute_ip_summed(struct sk_buff *skb, bool xmit)
{
break;
#ifdef CHECKSUM_HW
/* In theory this could be either CHECKSUM_PARTIAL or CHECKSUM_COMPLETE.
- * However, we should only get CHECKSUM_PARTIAL packets from Xen, which
- * uses some special fields to represent this (see below). Since we
- * can only make one type work, pick the one that actually happens in
- * practice.
+ * However, on the receive side we should only get CHECKSUM_PARTIAL
+ * packets from Xen, which uses some special fields to represent this
+ * (see below). Since we can only make one type work, pick the one
+ * that actually happens in practice.
*
- * The one exception to this is if we are on the transmit path
- * (basically after skb_checksum_setup() has been run) the type has
- * already been converted, so we should stay with that. */
+ * On the transmit side (basically after skb_checksum_setup()
+ * has been run or on internal dev transmit), packets with
+ * CHECKSUM_COMPLETE aren't generated, so assume CHECKSUM_PARTIAL. */
case CHECKSUM_HW:
if (!xmit)
OVS_CB(skb)->ip_summed = OVS_CSUM_COMPLETE;
#endif
}
+/* This function closely resembles skb_forward_csum() used by the bridge. It
+ * is slightly different because we are only concerned with bridging and not
+ * other types of forwarding and can get away with slightly more optimal
+ * behavior.*/
void
forward_ip_summed(struct sk_buff *skb)
{
skb->next = NULL;
/* If a checksum-deferred packet is forwarded to the
- * controller, correct the pointers and checksum. This happens
- * on a regular basis only on Xen, on which VMs can pass up
- * packets that do not have their checksum computed.
+ * controller, correct the pointers and checksum.
*/
err = vswitch_skb_checksum_setup(skb);
if (err)
goto err_kfree_skbs;
-#ifndef CHECKSUM_HW
+
if (skb->ip_summed == CHECKSUM_PARTIAL) {
+
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
/* Until 2.6.22, the start of the transport header was
* also the start of data to be checksummed. Linux
skb_set_transport_header(skb, skb->csum_start -
skb_headroom(skb));
#endif
+
err = skb_checksum_help(skb);
if (err)
goto err_kfree_skbs;
}
-#else
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(skb, 0);
- if (err)
- goto err_kfree_skbs;
- }
-#endif
err = skb_cow(skb, sizeof *header);
if (err)
static int flush_flows(struct datapath *dp)
{
- dp->n_flows = 0;
- return dp_table_flush(dp);
+ struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *new_table;
+
+ new_table = tbl_create(0);
+ if (!new_table)
+ return -ENOMEM;
+
+ rcu_assign_pointer(dp->table, new_table);
+
+ tbl_deferred_destroy(old_table, flow_free_tbl);
+
+ return 0;
}
static int validate_actions(const struct sw_flow_actions *actions)
flow->byte_count = 0;
}
+static int expand_table(struct datapath *dp)
+{
+ struct tbl *old_table = rcu_dereference(dp->table);
+ struct tbl *new_table;
+
+ new_table = tbl_expand(old_table);
+ if (IS_ERR(new_table))
+ return PTR_ERR(new_table);
+
+ rcu_assign_pointer(dp->table, new_table);
+ tbl_deferred_destroy(old_table, NULL);
+
+ return 0;
+}
+
static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
{
struct odp_flow_put uf;
+ struct tbl_node *flow_node;
struct sw_flow *flow;
- struct dp_table *table;
+ struct tbl *table;
struct odp_flow_stats stats;
int error;
memset(uf.flow.key.reserved, 0, sizeof uf.flow.key.reserved);
table = rcu_dereference(dp->table);
- flow = dp_table_lookup(table, &uf.flow.key);
- if (!flow) {
+ flow_node = tbl_lookup(table, &uf.flow.key, flow_hash(&uf.flow.key), flow_cmp);
+ if (!flow_node) {
/* No such flow. */
struct sw_flow_actions *acts;
goto error;
/* Expand table, if necessary, to make room. */
- if (dp->n_flows >= table->n_buckets) {
- error = -ENOSPC;
- if (table->n_buckets >= DP_MAX_BUCKETS)
- goto error;
-
- error = dp_table_expand(dp);
+ if (tbl_count(table) >= tbl_n_buckets(table)) {
+ error = expand_table(dp);
if (error)
goto error;
table = rcu_dereference(dp->table);
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- error = dp_table_insert(table, flow);
+ error = tbl_insert(table, &flow->tbl_node, flow_hash(&flow->key));
if (error)
goto error_free_flow_acts;
- dp->n_flows++;
+
memset(&stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts, *new_acts;
unsigned long int flags;
+ flow = flow_cast(flow_node);
+
/* Bail out if we're not allowed to modify an existing flow. */
error = -EEXIST;
if (!(uf.flags & ODPPF_MODIFY))
static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
{
- struct dp_table *table = rcu_dereference(dp->table);
+ struct tbl *table = rcu_dereference(dp->table);
struct odp_flow uf;
+ struct tbl_node *flow_node;
struct sw_flow *flow;
int error;
goto error;
memset(uf.key.reserved, 0, sizeof uf.key.reserved);
- flow = dp_table_lookup(table, &uf.key);
+ flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
error = -ENOENT;
- if (!flow)
+ if (!flow_node)
goto error;
- /* XXX redundant lookup */
- error = dp_table_delete(table, flow);
+ error = tbl_remove(table, flow_node);
if (error)
goto error;
* be using this flow. We used to synchronize_rcu() to make sure that
* we get completely accurate stats, but that blows our performance,
* badly. */
- dp->n_flows--;
+
+ flow = flow_cast(flow_node);
error = answer_query(flow, 0, ufp);
flow_deferred_free(flow);
static int query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
{
- struct dp_table *table = rcu_dereference(dp->table);
+ struct tbl *table = rcu_dereference(dp->table);
int i;
for (i = 0; i < flowvec->n_flows; i++) {
struct __user odp_flow *ufp = &flowvec->flows[i];
struct odp_flow uf;
- struct sw_flow *flow;
+ struct tbl_node *flow_node;
int error;
if (__copy_from_user(&uf, ufp, sizeof uf))
return -EFAULT;
memset(uf.key.reserved, 0, sizeof uf.key.reserved);
- flow = dp_table_lookup(table, &uf.key);
- if (!flow)
+ flow_node = tbl_lookup(table, &uf.key, flow_hash(&uf.key), flow_cmp);
+ if (!flow_node)
error = __put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, uf.flags, ufp);
+ error = answer_query(flow_cast(flow_node), uf.flags, ufp);
if (error)
return -EFAULT;
}
int listed_flows;
};
-static int list_flow(struct sw_flow *flow, void *cbdata_)
+static int list_flow(struct tbl_node *node, void *cbdata_)
{
+ struct sw_flow *flow = flow_cast(node);
struct list_flows_cbdata *cbdata = cbdata_;
struct odp_flow __user *ufp = &cbdata->uflows[cbdata->listed_flows++];
int error;
cbdata.uflows = flowvec->flows;
cbdata.n_flows = flowvec->n_flows;
cbdata.listed_flows = 0;
- error = dp_table_foreach(rcu_dereference(dp->table),
- list_flow, &cbdata);
+ error = tbl_foreach(rcu_dereference(dp->table), list_flow, &cbdata);
return error ? error : cbdata.listed_flows;
}
static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
{
+ struct tbl *table = rcu_dereference(dp->table);
struct odp_stats stats;
int i;
- stats.n_flows = dp->n_flows;
- stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
- stats.max_capacity = DP_MAX_BUCKETS;
+ stats.n_flows = tbl_count(table);
+ stats.cur_capacity = tbl_n_buckets(table);
+ stats.max_capacity = TBL_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
stats.max_groups = DP_MAX_GROUPS;
}
/* Sets the MTU of all datapath devices to the minimum of the ports. Must
- * be called with RTNL lock and dp_mutex. */
+ * be called with RTNL lock. */
void set_internal_devs_mtu(const struct datapath *dp)
{
struct dp_port *p;