int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
EXPORT_SYMBOL(dp_ioctl_hook);
-int (*dp_add_dp_hook)(struct datapath *dp);
-EXPORT_SYMBOL(dp_add_dp_hook);
-
-int (*dp_del_dp_hook)(struct datapath *dp);
-EXPORT_SYMBOL(dp_del_dp_hook);
-
-int (*dp_add_if_hook)(struct net_bridge_port *p);
-EXPORT_SYMBOL(dp_add_if_hook);
-
-int (*dp_del_if_hook)(struct net_bridge_port *p);
-EXPORT_SYMBOL(dp_del_if_hook);
-
/* Datapaths. Protected on the read side by rcu_read_lock, on the write side
* by dp_mutex. dp_mutex is almost completely redundant with genl_mutex
* maintained by the Generic Netlink code, but the timeout path needs mutual
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
+static void release_dp(struct kobject *kobj)
+{
+ struct datapath *dp = container_of(kobj, struct datapath, ifobj);
+ kfree(dp);
+}
+
+struct kobj_type dp_ktype = {
+ .release = release_dp
+};
+
static int create_dp(int dp_idx, const char __user *devnamep)
{
struct net_device *dp_dev;
skb_queue_head_init(&dp->queues[i]);
init_waitqueue_head(&dp->waitqueue);
+ /* Initialize kobject for bridge. This will be added as
+ * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
+ dp->ifobj.kset = NULL;
+ kobject_init(&dp->ifobj, &dp_ktype);
+
/* Allocate table. */
err = -ENOMEM;
rcu_assign_pointer(dp->table, dp_table_create(DP_L1_SIZE));
mutex_unlock(&dp_mutex);
rtnl_unlock();
- if (dp_add_dp_hook)
- dp_add_dp_hook(dp);
+ dp_sysfs_add_dp(dp);
return 0;
if (p->port_no != ODPP_LOCAL)
dp_del_port(p);
- if (dp_del_dp_hook)
- dp_del_dp_hook(dp);
+ dp_sysfs_del_dp(dp);
rcu_assign_pointer(dps[dp->dp_idx], NULL);
for (i = 0; i < DP_MAX_GROUPS; i++)
kfree(dp->groups[i]);
free_percpu(dp->stats_percpu);
- kfree(dp);
+ kobject_put(&dp->ifobj);
module_put(THIS_MODULE);
}
return err;
}
+static void release_nbp(struct kobject *kobj)
+{
+ struct net_bridge_port *p = container_of(kobj, struct net_bridge_port, kobj);
+ kfree(p);
+}
+
+struct kobj_type brport_ktype = {
+#ifdef CONFIG_SYSFS
+ .sysfs_ops = &brport_sysfs_ops,
+#endif
+ .release = release_nbp
+};
+
/* Called with RTNL lock and dp_mutex. */
static int new_nbp(struct datapath *dp, struct net_device *dev, int port_no)
{
list_add_rcu(&p->node, &dp->port_list);
dp->n_ports++;
+ /* Initialize kobject for bridge. This will be added as
+ * /sys/class/net/<devname>/brport later, if sysfs is enabled. */
+ p->kobj.kset = NULL;
+ kobject_init(&p->kobj, &brport_ktype);
+
dp_ifinfo_notify(RTM_NEWLINK, p);
return 0;
for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
if (!dp->ports[port_no])
goto got_port_no;
- err = -EXFULL;
+ err = -EFBIG;
goto out_unlock_dp;
got_port_no:
if (err)
goto out_put;
- if (dp_add_if_hook)
- dp_add_if_hook(dp->ports[port_no]);
+ dp_sysfs_add_if(dp->ports[port_no]);
err = __put_user(port_no, &port.port);
{
ASSERT_RTNL();
- if (p->port_no != ODPP_LOCAL && dp_del_if_hook) {
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
- sysfs_remove_link(&p->dp->ifobj, p->dev->name);
-#else
- sysfs_remove_link(p->dp->ifobj, p->dev->name);
-#endif
- }
+ if (p->port_no != ODPP_LOCAL)
+ dp_sysfs_del_if(p);
dp_ifinfo_notify(RTM_DELLINK, p);
p->dp->n_ports--;
/* Then wait until no one is still using it, and destroy it. */
synchronize_rcu();
- if (is_dp_dev(p->dev)) {
+ if (is_dp_dev(p->dev))
dp_dev_destroy(p->dev);
- }
- if (p->port_no != ODPP_LOCAL && dp_del_if_hook) {
- dp_del_if_hook(p);
- } else {
- dev_put(p->dev);
- kfree(p);
- }
+ dev_put(p->dev);
+ kobject_put(&p->kobj);
return 0;
}
#error
#endif
-#ifdef CONFIG_XEN
-#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
+#if defined(CONFIG_XEN) && LINUX_VERSION_CODE == KERNEL_VERSION(2,6,18)
/* This code is copied verbatim from net/dev/core.c in Xen's
* linux-2.6.18-92.1.10.el5.xs5.0.0.394.644. We can't call those functions
* directly because they aren't exported. */
}
}
-int skb_checksum_setup(struct sk_buff *skb)
+int vswitch_skb_checksum_setup(struct sk_buff *skb)
{
if (skb->proto_csum_blank) {
if (skb->protocol != htons(ETH_P_IP))
out:
return -EPROTO;
}
-#endif /* linux == 2.6.18 */
-#endif /* CONFIG_XEN */
+#else
+int vswitch_skb_checksum_setup(struct sk_buff *skb) { return 0; }
+#endif /* CONFIG_XEN && linux == 2.6.18 */
int
dp_output_control(struct datapath *dp, struct sk_buff *skb, int queue_no,
/* If a checksum-deferred packet is forwarded to the controller,
* correct the pointers and checksum. This happens on a regular basis
- * only on Xen (the CHECKSUM_HW case), on which VMs can pass up packets
- * that do not have their checksum computed. We also implement it for
- * the non-Xen case, but it is difficult to trigger or test this case
- * there, hence the WARN_ON_ONCE().
+ * only on Xen, on which VMs can pass up packets that do not have their
+ * checksum computed.
*/
- err = skb_checksum_setup(skb);
+ err = vswitch_skb_checksum_setup(skb);
if (err)
goto err_kfree_skb;
#ifndef CHECKSUM_HW
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- WARN_ON_ONCE(1);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
/* Until 2.6.22, the start of the transport header was also the
* start of data to be checksummed. Linux 2.6.22 introduced
static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
{
struct odp_flow_put uf;
- struct sw_flow *flow, **bucket;
+ struct sw_flow *flow;
struct dp_table *table;
struct odp_flow_stats stats;
int error;
goto error;
uf.flow.key.reserved = 0;
-retry:
table = rcu_dereference(dp->table);
- bucket = dp_table_lookup_for_insert(table, &uf.flow.key);
- if (!bucket) {
- /* No such flow, and the slots where it could go are full. */
- error = uf.flags & ODPPF_CREATE ? -EXFULL : -ENOENT;
- goto error;
- } else if (!*bucket) {
- /* No such flow, but we found an available slot for it. */
+ flow = dp_table_lookup(table, &uf.flow.key);
+ if (!flow) {
+ /* No such flow. */
struct sw_flow_actions *acts;
error = -ENOENT;
goto error;
/* Expand table, if necessary, to make room. */
- if (dp->n_flows * 4 >= table->n_buckets &&
- table->n_buckets < DP_MAX_BUCKETS) {
+ if (dp->n_flows >= table->n_buckets) {
+ error = -ENOSPC;
+ if (table->n_buckets >= DP_MAX_BUCKETS)
+ goto error;
+
error = dp_table_expand(dp);
if (error)
goto error;
-
- /* The bucket's location has changed. Try again. */
- goto retry;
+ table = rcu_dereference(dp->table);
}
/* Allocate flow. */
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- rcu_assign_pointer(*bucket, flow);
+ error = dp_table_insert(table, flow);
+ if (error)
+ goto error_free_flow_acts;
dp->n_flows++;
memset(&stats, 0, sizeof(struct odp_flow_stats));
} else {
/* We found a matching flow. */
- struct sw_flow *flow = *rcu_dereference(bucket);
struct sw_flow_actions *old_acts, *new_acts;
unsigned long int flags;
return -EFAULT;
return 0;
+error_free_flow_acts:
+ kfree(flow->sf_acts);
error_free_flow:
kmem_cache_free(flow_cache, flow);
error:
int i;
stats.n_flows = dp->n_flows;
- stats.cur_capacity = rcu_dereference(dp->table)->n_buckets * 2;
- stats.max_capacity = DP_MAX_BUCKETS * 2;
+ stats.cur_capacity = rcu_dereference(dp->table)->n_buckets;
+ stats.max_capacity = DP_MAX_BUCKETS;
stats.n_ports = dp->n_ports;
stats.max_ports = DP_MAX_PORTS;
stats.max_groups = DP_MAX_GROUPS;
return copy_to_user(statsp, &stats, sizeof stats) ? -EFAULT : 0;
}
+/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
+int dp_min_mtu(const struct datapath *dp)
+{
+ struct net_bridge_port *p;
+ int mtu = 0;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry_rcu (p, &dp->port_list, node) {
+ struct net_device *dev = p->dev;
+
+ /* Skip any internal ports, since that's what we're trying to
+ * set. */
+ if (is_dp_dev(dev))
+ continue;
+
+ if (!mtu || dev->mtu < mtu)
+ mtu = dev->mtu;
+ }
+
+ return mtu ? mtu : ETH_DATA_LEN;
+}
+
static int
put_port(const struct net_bridge_port *p, struct odp_port __user *uop)
{