EXPORT_SYMBOL(dp_ioctl_hook);
/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
- * by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
- * maintained by the Generic Netlink code, but the timeout path needs mutual
- * exclusion too.
+ * by dp_mutex.
*
* dp_mutex nests inside the RTNL lock: if you need both you must take the RTNL
* lock first.
for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
if (!dp->ports[port_no])
goto got_port_no;
- err = -EXFULL;
+ err = -EFBIG;
goto out_unlock_dp;
got_port_no:
int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; }
#endif /* CONFIG_XEN && linux == 2.6.18 */
+/* Append each packet in 'skb' list to 'queue'. There will be only one packet
+ * unless we broke up a GSO packet. */
+static int
+queue_control_packets(struct sk_buff *skb, struct sk_buff_head *queue,
+ int queue_no, u32 arg)
+{
+ struct sk_buff *nskb;
+ int port_no;
+ int err;
+
+ port_no = ODPP_LOCAL;
+ if (skb->dev) {
+ if (skb->dev->br_port)
+ port_no = skb->dev->br_port->port_no;
+ else if (is_dp_dev(skb->dev))
+ port_no = dp_dev_priv(skb->dev)->port_no;
+ }
+
+ do {
+ struct odp_msg *header;
+
+ nskb = skb->next;
+ skb->next = NULL;
+
+ /* If a checksum-deferred packet is forwarded to the
+ * controller, correct the pointers and checksum. This happens
+ * on a regular basis only on Xen, on which VMs can pass up
+ * packets that do not have their checksum computed.
+ */
+ err = vswitch_skb_checksum_setup(skb);
+ if (err)
+ goto err_kfree_skbs;
+#ifndef CHECKSUM_HW
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
+ /* Until 2.6.22, the start of the transport header was
+ * also the start of data to be checksummed. Linux
+ * 2.6.22 introduced the csum_start field for this
+ * purpose, but we should point the transport header to
+ * it anyway for backward compatibility, as
+ * dev_queue_xmit() does even in 2.6.28. */
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+#endif
+ err = skb_checksum_help(skb);
+ if (err)
+ goto err_kfree_skbs;
+ }
+#else
+ if (skb->ip_summed == CHECKSUM_HW) {
+ err = skb_checksum_help(skb, 0);
+ if (err)
+ goto err_kfree_skbs;
+ }
+#endif
+
+ err = skb_cow(skb, sizeof *header);
+ if (err)
+ goto err_kfree_skbs;
+
+ header = (struct odp_msg*)__skb_push(skb, sizeof *header);
+ header->type = queue_no;
+ header->length = skb->len;
+ header->port = port_no;
+ header->reserved = 0;
+ header->arg = arg;
+ skb_queue_tail(queue, skb);
+
+ skb = nskb;
+ } while (skb);
+ return 0;
+
+err_kfree_skbs:
+ kfree_skb(skb);
+ while ((skb = nskb) != NULL) {
+ nskb = skb->next;
+ kfree_skb(skb);
+ }
+ return err;
+}
+
int
dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
u32 arg)
{
struct dp_stats_percpu *stats;
struct sk_buff_head *queue;
- int port_no;
int err;
WARN_ON_ONCE(skb_shared(skb));
if (skb_queue_len(queue) >= DP_MAX_QUEUE_LEN)
goto err_kfree_skb;
- /* If a checksum-deferred packet is forwarded to the controller,
- * correct the pointers and checksum. This happens on a regular basis
- * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets
- * that do not have their checksum computed. We also implement it for
- * the non-Xen case, but it is difficult to trigger or test this case
- * there, hence the WARN_ON_ONCE().
- */
- err = vswitch_skb_checksum_setup(skb);
- if (err)
- goto err_kfree_skb;
-#ifndef CHECKSUM_HW
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- WARN_ON_ONCE(1);
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
- /* Until 2.6.22, the start of the transport header was also the
- * start of data to be checksummed. Linux 2.6.22 introduced
- * the csum_start field for this purpose, but we should point
- * the transport header to it anyway for backward
- * compatibility, as dev_queue_xmit() does even in 2.6.28. */
- skb_set_transport_header(skb, skb->csum_start -
- skb_headroom(skb));
-#endif
- err = skb_checksum_help(skb);
- if (err)
- goto err_kfree_skb;
- }
-#else
- if (skb->ip_summed == CHECKSUM_HW) {
- err = skb_checksum_help(skb, 0);
- if (err)
- goto err_kfree_skb;
- }
-#endif
-
/* Break apart GSO packets into their component pieces. Otherwise
* userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
if (skb_is_gso(skb)) {
}
}
- /* Figure out port number. */
- port_no = ODPP_LOCAL;
- if (skb->dev) {
- if (skb->dev->br_port)
- port_no = skb->dev->br_port->port_no;
- else if (is_dp_dev(skb->dev))
- port_no = dp_dev_priv(skb->dev)->port_no;
- }
-
- /* Append each packet to queue. There will be only one packet unless
- * we broke up a GSO packet above. */
- do {
- struct odp_msg *header;
- struct sk_buff *nskb = skb->next;
- skb->next = NULL;
-
- err = skb_cow(skb, sizeof *header);
- if (err) {
- while (nskb) {
- kfree_skb(skb);
- skb = nskb;
- nskb = skb->next;
- }
- goto err_kfree_skb;
- }
-
- header = (struct odp_msg*)__skb_push(skb, sizeof *header);
- header->type = queue_no;
- header->length = skb->len;
- header->port = port_no;
- header->reserved = 0;
- header->arg = arg;
- skb_queue_tail(queue, skb);
-
- skb = nskb;
- } while (skb);
-
+ err = queue_control_packets(skb, queue, queue_no, arg);
wake_up_interruptible(&dp->waitqueue);
- return 0;
+ return err;
err_kfree_skb:
kfree_skb(skb);
break;
case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp & ~VLAN_PCP_MASK)
+ if (a->vlan_pcp.vlan_pcp
+ & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
return -EINVAL;
break;
static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
{
struct odp_flow_put uf;
- struct sw_flow *flow, **bucket;
+ struct sw_flow *flow;
struct dp_table *table;
struct odp_flow_stats stats;
int error;
goto error;
uf.flow.key.reserved = 0;
-retry:
table = rcu_dereference(dp->table);
- bucket = dp_table_lookup_for_insert(table, &uf.flow.key);
- if (!bucket) {
- /* No such flow, and the slots where it could go are full. */
- error = uf.flags & ODPPF_CREATE ? -EXFULL : -ENOENT;
- goto error;
- } else if (!*bucket) {
- /* No such flow, but we found an available slot for it. */
+ flow = dp_table_lookup(table, &uf.flow.key);
+ if (!flow) {
+ /* No such flow. */
struct sw_flow_actions *acts;
error = -ENOENT;
goto error;
/* Expand table, if necessary, to make room. */
- if (dp->n_flows * 4 >= table->n_buckets &&
- table->n_buckets < DP_MAX_BUCKETS) {
+ if (dp->n_flows >= table->n_buckets) {
+ error = -ENOSPC;
+ if (table->n_buckets >= DP_MAX_BUCKETS)
+ goto error;
+
error = dp_table_expand(dp);
if (error)
goto error;
-
- /* The bucket's location has changed. Try again. */
- goto retry;
+ table = rcu_dereference(dp->table);
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- rcu_assign_pointer(*bucket, flow);
+ error = dp_table_insert(table, flow);
+ if (error)
+ goto error_free_flow_acts;
dp->n_flows++;
memset(&stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
- struct sw_flow *flow = *rcu_dereference(bucket);
struct sw_flow_actions *old_acts, *new_acts;
unsigned long int flags;
return -EFAULT;
return 0;
+error_free_flow_acts:
+ kfree(flow->sf_acts);
error_free_flow:
kmem_cache_free(flow_cache, flow);
error:
return 0;
}
-static int answer_query(struct sw_flow *flow, struct odp_flow __user *ufp)
+static int answer_query(struct sw_flow *flow, u32 query_flags,
+ struct odp_flow __user *ufp)
{
struct odp_flow_stats stats;
unsigned long int flags;
spin_lock_irqsave(&flow->lock, flags);
get_stats(flow, &stats);
+
+ if (query_flags & ODPFF_ZERO_TCP_FLAGS) {
+ flow->tcp_flags = 0;
+ }
spin_unlock_irqrestore(&flow->lock, flags);
if (__copy_to_user(&ufp->stats, &stats, sizeof(struct odp_flow_stats)))
* we get completely accurate stats, but that blows our performance,
* badly. */
dp->n_flows--;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, uf.flags, ufp);
flow_deferred_free(flow);
error:
if (!flow)
error = __put_user(ENOENT, &ufp->stats.error);
else
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
if (error)
return -EFAULT;
}
if (__copy_to_user(&ufp->key, &flow->key, sizeof flow->key))
return -EFAULT;
- error = answer_query(flow, ufp);
+ error = answer_query(flow, 0, ufp);
if (error)
return error;
struct odp_flow_key key;
struct sk_buff *skb;
struct sw_flow_actions *actions;
+ struct ethhdr *eth;
int err;
err = -EFAULT;
execute.length))
goto error_free_skb;
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
+ if (ntohs(eth->h_proto) >= 1536)
+ skb->protocol = eth->h_proto;
+ else
+ skb->protocol = htons(ETH_P_802_2);
+
flow_extract(skb, execute.in_port, &key);
err = execute_actions(dp, skb, &key, actions->actions,
actions->n_actions, GFP_KERNEL);
int i;
stats.n_flows = dp->n_flows;
- stats.cur_capacity = rcu_dereference(dp->table)->n_buckets * 2;
- stats.max_capacity = DP_MAX_BUCKETS * 2;
+ stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
+ stats.max_capacity = DP_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
stats.max_groups = DP_MAX_GROUPS;
return 0;
}
+static int get_listen_mask(const struct file *f)
+{
+ return (long)f->private_data;
+}
+
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+ f->private_data = (void*)(long)listen_mask;
+}
+
static long openvswitch_ioctl(struct file *f, unsigned int cmd,
unsigned long argp)
{
break;
case ODP_GET_LISTEN_MASK:
- err = put_user((int)f->private_data, (int __user *)argp);
+ err = put_user(get_listen_mask(f), (int __user *)argp);
break;
case ODP_SET_LISTEN_MASK:
if (listeners & ~ODPL_ALL)
break;
err = 0;
- f->private_data = (void*)listeners;
+ set_listen_mask(f, listeners);
break;
case ODP_PORT_QUERY:
loff_t *ppos)
{
/* XXX is there sufficient synchronization here? */
- int listeners = (int) f->private_data;
+ int listeners = get_listen_mask(f);
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
struct sk_buff *skb;
}
}
success:
- copy_bytes = min(skb->len, nbytes);
+ copy_bytes = min_t(size_t, skb->len, nbytes);
iov.iov_base = buf;
iov.iov_len = copy_bytes;
retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
if (dp) {
mask = 0;
poll_wait(file, &dp->waitqueue, wait);
- if (dp_has_packet_of_interest(dp, (int)file->private_data))
+ if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
mask |= POLLIN | POLLRDNORM;
} else {
mask = POLLIN | POLLRDNORM | POLLHUP;
};
static int major;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
static struct llc_sap *dp_stp_sap;
static int dp_stp_rcv(struct sk_buff *skb, struct net_device *dev,
return 0;
}
-static int __init dp_init(void)
+static int dp_avoid_bridge_init(void)
{
- int err;
-
- printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
-
/* Register to receive STP packets because the bridge module also
* attempts to do so. Since there can only be a single listener for a
* given protocol, this provides mutual exclusion against the bridge
printk(KERN_ERR "openvswitch: can't register sap for STP (probably the bridge module is loaded)\n");
return -EADDRINUSE;
}
+ return 0;
+}
+
+static void dp_avoid_bridge_exit(void)
+{
+ llc_sap_put(dp_stp_sap);
+}
+#else /* Linux 2.6.27 or later. */
+static int dp_avoid_bridge_init(void)
+{
+ /* Linux 2.6.27 introduces a way for multiple clients to register for
+ * STP packets, which interferes with what we try to do above.
+ * Instead, just check whether there's a bridge hook defined. This is
+ * not as safe--the bridge module is willing to load over the top of
+ * us--but it provides a little bit of protection. */
+ if (br_handle_frame_hook) {
+ printk(KERN_ERR "openvswitch: bridge module is loaded, cannot load over it\n");
+ return -EADDRINUSE;
+ }
+ return 0;
+}
+
+static void dp_avoid_bridge_exit(void)
+{
+ /* Nothing to do. */
+}
+#endif /* Linux 2.6.27 or later */
+
+static int __init dp_init(void)
+{
+ int err;
+
+ printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
+
+ err = dp_avoid_bridge_init();
+ if (err)
+ return err;
err = flow_init();
if (err)
unregister_netdevice_notifier(&dp_device_notifier);
flow_exit();
br_handle_frame_hook = NULL;
- llc_sap_put(dp_stp_sap);
+ dp_avoid_bridge_exit();
}
module_init(dp_init);