kfree_skb(skb);
goto errout;
}
- err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+ return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
if (!dp->table)
goto err_free_dp;
- /* Setup our datapath device */
+ /* Set up our datapath device. */
dp_dev = dp_dev_create(dp, devname, ODPP_LOCAL);
err = PTR_ERR(dp_dev);
if (IS_ERR(dp_dev))
break;
case ODPAT_SET_VLAN_PCP:
- if (a->vlan_pcp.vlan_pcp & ~VLAN_PCP_MASK)
+ if (a->vlan_pcp.vlan_pcp
+ & ~(VLAN_PCP_MASK >> VLAN_PCP_SHIFT))
return -EINVAL;
break;
static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
{
struct odp_flow_put uf;
- struct sw_flow *flow, **bucket;
+ struct sw_flow *flow;
struct dp_table *table;
struct odp_flow_stats stats;
int error;
goto error;
uf.flow.key.reserved = 0;
-retry:
table = rcu_dereference(dp->table);
- bucket = dp_table_lookup_for_insert(table, &uf.flow.key);
- if (!bucket) {
- /* No such flow, and the slots where it could go are full. */
- error = uf.flags & ODPPF_CREATE ? -EXFULL : -ENOENT;
- goto error;
- } else if (!*bucket) {
- /* No such flow, but we found an available slot for it. */
+ flow = dp_table_lookup(table, &uf.flow.key);
+ if (!flow) {
+ /* No such flow. */
struct sw_flow_actions *acts;
error = -ENOENT;
goto error;
/* Expand table, if necessary, to make room. */
- if (dp->n_flows * 4 >= table->n_buckets &&
- table->n_buckets < DP_MAX_BUCKETS) {
+ if (dp->n_flows >= table->n_buckets) {
+ error = -ENOSPC;
+ if (table->n_buckets >= DP_MAX_BUCKETS)
+ goto error;
+
error = dp_table_expand(dp);
if (error)
goto error;
-
- /* The bucket's location has changed. Try again. */
- goto retry;
+ table = rcu_dereference(dp->table);
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- rcu_assign_pointer(*bucket, flow);
+ error = dp_table_insert(table, flow);
+ if (error)
+ goto error_free_flow_acts;
dp->n_flows++;
memset(&stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
- struct sw_flow *flow = *rcu_dereference(bucket);
struct sw_flow_actions *old_acts, *new_acts;
unsigned long int flags;
return -EFAULT;
return 0;
+error_free_flow_acts:
+ kfree(flow->sf_acts);
error_free_flow:
kmem_cache_free(flow_cache, flow);
error:
struct odp_flow_key key;
struct sk_buff *skb;
struct sw_flow_actions *actions;
+ struct ethhdr *eth;
int err;
err = -EFAULT;
execute.length))
goto error_free_skb;
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
+ if (ntohs(eth->h_proto) >= 1536)
+ skb->protocol = eth->h_proto;
+ else
+ skb->protocol = htons(ETH_P_802_2);
+
flow_extract(skb, execute.in_port, &key);
err = execute_actions(dp, skb, &key, actions->actions,
actions->n_actions, GFP_KERNEL);
int i;
stats.n_flows = dp->n_flows;
- stats.cur_capacity = rcu_dereference(dp->table)->n_buckets * 2;
- stats.max_capacity = DP_MAX_BUCKETS * 2;
+ stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
+ stats.max_capacity = DP_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
stats.max_groups = DP_MAX_GROUPS;
return 0;
}
+static int get_listen_mask(const struct file *f)
+{
+ return (long)f->private_data;
+}
+
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+ f->private_data = (void*)(long)listen_mask;
+}
+
static long openvswitch_ioctl(struct file *f, unsigned int cmd,
unsigned long argp)
{
break;
case ODP_GET_LISTEN_MASK:
- err = put_user((int)f->private_data, (int __user *)argp);
+ err = put_user(get_listen_mask(f), (int __user *)argp);
break;
case ODP_SET_LISTEN_MASK:
if (listeners & ~ODPL_ALL)
break;
err = 0;
- f->private_data = (void*)listeners;
+ set_listen_mask(f, listeners);
break;
case ODP_PORT_QUERY:
loff_t *ppos)
{
/* XXX is there sufficient synchronization here? */
- int listeners = (int) f->private_data;
+ int listeners = get_listen_mask(f);
int dp_idx = iminor(f->f_dentry->d_inode);
struct datapath *dp = get_dp(dp_idx);
struct sk_buff *skb;
}
}
success:
- copy_bytes = min(skb->len, nbytes);
+ copy_bytes = min_t(size_t, skb->len, nbytes);
iov.iov_base = buf;
iov.iov_len = copy_bytes;
retval = skb_copy_datagram_iovec(skb, 0, &iov, iov.iov_len);
if (dp) {
mask = 0;
poll_wait(file, &dp->waitqueue, wait);
- if (dp_has_packet_of_interest(dp, (int)file->private_data))
+ if (dp_has_packet_of_interest(dp, get_listen_mask(file)))
mask |= POLLIN | POLLRDNORM;
} else {
mask = POLLIN | POLLRDNORM | POLLHUP;