Eliminate ODPL_* from userspace-facing interface.
[sliver-openvswitch.git] / datapath / datapath.c
index 3f108c0..94f908a 100644 (file)
@@ -50,7 +50,6 @@
 #include "actions.h"
 #include "flow.h"
 #include "loop_counter.h"
-#include "odp-compat.h"
 #include "table.h"
 #include "vport-internal_dev.h"
 
@@ -66,15 +65,15 @@ EXPORT_SYMBOL(dp_ioctl_hook);
  * It is safe to access the datapath and vport structures with just
  * dp_mutex.
  */
-static struct datapath __rcu *dps[ODP_MAX];
+static struct datapath __rcu *dps[256];
 static DEFINE_MUTEX(dp_mutex);
 
-static int new_vport(struct datapath *, struct odp_port *, int port_no);
+static struct vport *new_vport(const struct vport_parms *);
 
 /* Must be called with rcu_read_lock or dp_mutex. */
 struct datapath *get_dp(int dp_idx)
 {
-       if (dp_idx < 0 || dp_idx >= ODP_MAX)
+       if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
                return NULL;
        return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
                                         lockdep_is_held(&dp_mutex));
@@ -206,107 +205,6 @@ static struct kobj_type dp_ktype = {
        .release = release_dp
 };
 
-static int create_dp(int dp_idx, const char __user *devnamep)
-{
-       struct odp_port internal_dev_port;
-       char devname[IFNAMSIZ];
-       struct datapath *dp;
-       int err;
-       int i;
-
-       if (devnamep) {
-               int retval = strncpy_from_user(devname, devnamep, IFNAMSIZ);
-               if (retval < 0) {
-                       err = -EFAULT;
-                       goto err;
-               } else if (retval >= IFNAMSIZ) {
-                       err = -ENAMETOOLONG;
-                       goto err;
-               }
-       } else {
-               snprintf(devname, sizeof(devname), "of%d", dp_idx);
-       }
-
-       rtnl_lock();
-       mutex_lock(&dp_mutex);
-       err = -ENODEV;
-       if (!try_module_get(THIS_MODULE))
-               goto err_unlock;
-
-       /* Exit early if a datapath with that number already exists.
-        * (We don't use -EEXIST because that's ambiguous with 'devname'
-        * conflicting with an existing network device name.) */
-       err = -EBUSY;
-       if (get_dp(dp_idx))
-               goto err_put_module;
-
-       err = -ENOMEM;
-       dp = kzalloc(sizeof(*dp), GFP_KERNEL);
-       if (dp == NULL)
-               goto err_put_module;
-       INIT_LIST_HEAD(&dp->port_list);
-       mutex_init(&dp->mutex);
-       mutex_lock(&dp->mutex);
-       dp->dp_idx = dp_idx;
-       for (i = 0; i < DP_N_QUEUES; i++)
-               skb_queue_head_init(&dp->queues[i]);
-       init_waitqueue_head(&dp->waitqueue);
-
-       /* Initialize kobject for bridge.  This will be added as
-        * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
-       dp->ifobj.kset = NULL;
-       kobject_init(&dp->ifobj, &dp_ktype);
-
-       /* Allocate table. */
-       err = -ENOMEM;
-       rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
-       if (!dp->table)
-               goto err_free_dp;
-
-       /* Set up our datapath device. */
-       BUILD_BUG_ON(sizeof(internal_dev_port.devname) != sizeof(devname));
-       strcpy(internal_dev_port.devname, devname);
-       strcpy(internal_dev_port.type, "internal");
-       err = new_vport(dp, &internal_dev_port, ODPP_LOCAL);
-       if (err) {
-               if (err == -EBUSY)
-                       err = -EEXIST;
-
-               goto err_destroy_table;
-       }
-
-       dp->drop_frags = 0;
-       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
-       if (!dp->stats_percpu) {
-               err = -ENOMEM;
-               goto err_destroy_local_port;
-       }
-
-       rcu_assign_pointer(dps[dp_idx], dp);
-       dp_sysfs_add_dp(dp);
-
-       mutex_unlock(&dp->mutex);
-       mutex_unlock(&dp_mutex);
-       rtnl_unlock();
-
-       return 0;
-
-err_destroy_local_port:
-       dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
-err_destroy_table:
-       tbl_destroy(get_table_protected(dp), NULL);
-err_free_dp:
-       mutex_unlock(&dp->mutex);
-       kfree(dp);
-err_put_module:
-       module_put(THIS_MODULE);
-err_unlock:
-       mutex_unlock(&dp_mutex);
-       rtnl_unlock();
-err:
-       return err;
-}
-
 static void destroy_dp_rcu(struct rcu_head *rcu)
 {
        struct datapath *dp = container_of(rcu, struct datapath, rcu);
@@ -320,22 +218,11 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
        kobject_put(&dp->ifobj);
 }
 
-static int destroy_dp(int dp_idx)
+/* Caller must hold RTNL, dp_mutex, and dp->mutex. */
+static void destroy_dp(struct datapath *dp)
 {
-       struct datapath *dp;
-       int err = 0;
        struct vport *p, *n;
 
-       rtnl_lock();
-       mutex_lock(&dp_mutex);
-       dp = get_dp(dp_idx);
-       if (!dp) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       mutex_lock(&dp->mutex);
-
        list_for_each_entry_safe (p, n, &dp->port_list, node)
                if (p->port_no != ODPP_LOCAL)
                        dp_detach_port(p);
@@ -347,82 +234,26 @@ static int destroy_dp(int dp_idx)
        mutex_unlock(&dp->mutex);
        call_rcu(&dp->rcu, destroy_dp_rcu);
        module_put(THIS_MODULE);
-
-out:
-       mutex_unlock(&dp_mutex);
-       rtnl_unlock();
-       return err;
 }
 
 /* Called with RTNL lock and dp->mutex. */
-static int new_vport(struct datapath *dp, struct odp_port *odp_port, int port_no)
+static struct vport *new_vport(const struct vport_parms *parms)
 {
-       struct vport_parms parms;
        struct vport *vport;
 
-       parms.name = odp_port->devname;
-       parms.type = odp_port->type;
-       parms.config = odp_port->config;
-       parms.dp = dp;
-       parms.port_no = port_no;
-
        vport_lock();
-       vport = vport_add(&parms);
-       vport_unlock();
-
-       if (IS_ERR(vport))
-               return PTR_ERR(vport);
-
-       rcu_assign_pointer(dp->ports[port_no], vport);
-       list_add_rcu(&vport->node, &dp->port_list);
-       dp->n_ports++;
-
-       dp_ifinfo_notify(RTM_NEWLINK, vport);
-
-       return 0;
-}
-
-static int attach_port(int dp_idx, struct odp_port __user *portp)
-{
-       struct datapath *dp;
-       struct odp_port port;
-       int port_no;
-       int err;
-
-       err = -EFAULT;
-       if (copy_from_user(&port, portp, sizeof(port)))
-               goto out;
-       port.devname[IFNAMSIZ - 1] = '\0';
-       port.type[VPORT_TYPE_SIZE - 1] = '\0';
-
-       rtnl_lock();
-       dp = get_dp_locked(dp_idx);
-       err = -ENODEV;
-       if (!dp)
-               goto out_unlock_rtnl;
-
-       for (port_no = 1; port_no < DP_MAX_PORTS; port_no++)
-               if (!dp->ports[port_no])
-                       goto got_port_no;
-       err = -EFBIG;
-       goto out_unlock_dp;
-
-got_port_no:
-       err = new_vport(dp, &port, port_no);
-       if (err)
-               goto out_unlock_dp;
+       vport = vport_add(parms);
+       if (!IS_ERR(vport)) {
+               struct datapath *dp = parms->dp;
 
-       set_internal_devs_mtu(dp);
-       dp_sysfs_add_if(get_vport_protected(dp, port_no));
+               rcu_assign_pointer(dp->ports[parms->port_no], vport);
+               list_add_rcu(&vport->node, &dp->port_list);
 
-       err = put_user(port_no, &portp->port);
+               dp_ifinfo_notify(RTM_NEWLINK, vport);
+       }
+       vport_unlock();
 
-out_unlock_dp:
-       mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
-       rtnl_unlock();
-out:
-       return err;
+       return vport;
 }
 
 int dp_detach_port(struct vport *p)
@@ -436,7 +267,6 @@ int dp_detach_port(struct vport *p)
        dp_ifinfo_notify(RTM_DELLINK, p);
 
        /* First drop references to device. */
-       p->dp->n_ports--;
        list_del_rcu(&p->node);
        rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
 
@@ -448,37 +278,6 @@ int dp_detach_port(struct vport *p)
        return err;
 }
 
-static int detach_port(int dp_idx, int port_no)
-{
-       struct vport *p;
-       struct datapath *dp;
-       int err;
-
-       err = -EINVAL;
-       if (port_no < 0 || port_no >= DP_MAX_PORTS || port_no == ODPP_LOCAL)
-               goto out;
-
-       rtnl_lock();
-       dp = get_dp_locked(dp_idx);
-       err = -ENODEV;
-       if (!dp)
-               goto out_unlock_rtnl;
-
-       p = get_vport_protected(dp, port_no);
-       err = -ENOENT;
-       if (!p)
-               goto out_unlock_dp;
-
-       err = dp_detach_port(p);
-
-out_unlock_dp:
-       mutex_unlock(&dp->mutex);
-out_unlock_rtnl:
-       rtnl_unlock();
-out:
-       return err;
-}
-
 /* Must be called with rcu_read_lock. */
 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
 {
@@ -497,7 +296,7 @@ void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                bool is_frag;
 
                /* Extract flow from 'skb' into 'key'. */
-               error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key, &is_frag);
+               error = flow_extract(skb, p->port_no, &key, &is_frag);
                if (unlikely(error)) {
                        kfree_skb(skb);
                        return;
@@ -721,20 +520,34 @@ err:
        return err;
 }
 
-static int flush_flows(struct datapath *dp)
+static int flush_flows(int dp_idx)
 {
-       struct tbl *old_table = get_table_protected(dp);
+       struct tbl *old_table;
        struct tbl *new_table;
+       struct datapath *dp;
+       int err;
+
+       dp = get_dp_locked(dp_idx);
+       err = -ENODEV;
+       if (!dp)
+               goto exit;
 
+       old_table = get_table_protected(dp);
        new_table = tbl_create(TBL_MIN_BUCKETS);
+       err = -ENOMEM;
        if (!new_table)
-               return -ENOMEM;
+               goto exit_unlock;
 
        rcu_assign_pointer(dp->table, new_table);
 
        tbl_deferred_destroy(old_table, flow_free_tbl);
 
-       return 0;
+       err = 0;
+
+exit_unlock:
+       mutex_unlock(&dp->mutex);
+exit:
+       return err;
 }
 
 static int validate_actions(const struct nlattr *actions, u32 actions_len)
@@ -810,55 +623,25 @@ static int validate_actions(const struct nlattr *actions, u32 actions_len)
        return 0;
 }
 
-static struct sw_flow_actions *get_actions(const struct odp_flow *flow)
+struct dp_flowcmd {
+       u32 nlmsg_flags;
+       u32 dp_idx;
+       u32 total_len;
+       struct sw_flow_key key;
+       const struct nlattr *actions;
+       u32 actions_len;
+       bool clear;
+       u64 state;
+};
+
+static struct sw_flow_actions *get_actions(const struct dp_flowcmd *flowcmd)
 {
        struct sw_flow_actions *actions;
-       int error;
-
-       actions = flow_actions_alloc(flow->actions_len);
-       error = PTR_ERR(actions);
-       if (IS_ERR(actions))
-               goto error;
-
-       error = -EFAULT;
-       if (copy_from_user(actions->actions,
-                          (struct nlattr __user __force *)flow->actions,
-                          flow->actions_len))
-               goto error_free_actions;
-       error = validate_actions(actions->actions, actions->actions_len);
-       if (error)
-               goto error_free_actions;
 
+       actions = flow_actions_alloc(flowcmd->actions_len);
+       if (!IS_ERR(actions) && flowcmd->actions_len)
+               memcpy(actions->actions, flowcmd->actions, flowcmd->actions_len);
        return actions;
-
-error_free_actions:
-       kfree(actions);
-error:
-       return ERR_PTR(error);
-}
-
-static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats)
-{
-       if (flow->used) {
-               struct timespec offset_ts, used, now_mono;
-
-               ktime_get_ts(&now_mono);
-               jiffies_to_timespec(jiffies - flow->used, &offset_ts);
-               set_normalized_timespec(&used, now_mono.tv_sec - offset_ts.tv_sec,
-                                       now_mono.tv_nsec - offset_ts.tv_nsec);
-
-               stats->used_sec = used.tv_sec;
-               stats->used_nsec = used.tv_nsec;
-       } else {
-               stats->used_sec = 0;
-               stats->used_nsec = 0;
-       }
-
-       stats->n_packets = flow->packet_count;
-       stats->n_bytes = flow->byte_count;
-       stats->reserved = 0;
-       stats->tcp_flags = flow->tcp_flags;
-       stats->error = 0;
 }
 
 static void clear_stats(struct sw_flow *flow)
@@ -881,563 +664,1307 @@ static int expand_table(struct datapath *dp)
        rcu_assign_pointer(dp->table, new_table);
        tbl_deferred_destroy(old_table, NULL);
 
-       return 0;
+       return 0;
 }
 
-static int do_put_flow(struct datapath *dp, struct odp_flow_put *uf,
-                      struct odp_flow_stats *stats)
+static const struct nla_policy execute_policy[ODP_PACKET_ATTR_MAX + 1] = {
+       [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
+       [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+};
+
+static int execute_packet(const struct odp_packet __user *uodp_packet)
 {
-       struct tbl_node *flow_node;
+       struct nlattr *a[ODP_PACKET_ATTR_MAX + 1];
+       struct odp_packet *odp_packet;
+       struct sk_buff *skb, *packet;
+       unsigned int actions_len;
+       struct nlattr *actions;
        struct sw_flow_key key;
-       struct sw_flow *flow;
-       struct tbl *table;
-       struct sw_flow_actions *acts = NULL;
-       int error;
-       u32 hash;
+       struct datapath *dp;
+       struct ethhdr *eth;
+       bool is_frag;
+       u32 len;
+       int err;
 
-       error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf->flow.key,
-                                   uf->flow.key_len);
-       if (error)
-               return error;
+       if (get_user(len, &uodp_packet->len))
+               return -EFAULT;
+       if (len < sizeof(struct odp_packet))
+               return -EINVAL;
 
-       hash = flow_hash(&key);
-       table = get_table_protected(dp);
-       flow_node = tbl_lookup(table, &key, hash, flow_cmp);
-       if (!flow_node) {
-               /* No such flow. */
-               error = -ENOENT;
-               if (!(uf->flags & ODPPF_CREATE))
-                       goto error;
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
 
-               /* Expand table, if necessary, to make room. */
-               if (tbl_count(table) >= tbl_n_buckets(table)) {
-                       error = expand_table(dp);
-                       if (error)
-                               goto error;
-                       table = get_table_protected(dp);
-               }
+       err = -EFAULT;
+       if (copy_from_user(__skb_put(skb, len), uodp_packet, len))
+               goto exit_free_skb;
 
-               /* Allocate flow. */
-               flow = flow_alloc();
-               if (IS_ERR(flow)) {
-                       error = PTR_ERR(flow);
-                       goto error;
-               }
-               flow->key = key;
-               clear_stats(flow);
+       odp_packet = (struct odp_packet *)skb->data;
+       err = -EINVAL;
+       if (odp_packet->len != len)
+               goto exit_free_skb;
 
-               /* Obtain actions. */
-               acts = get_actions(&uf->flow);
-               error = PTR_ERR(acts);
-               if (IS_ERR(acts))
-                       goto error_free_flow;
-               rcu_assign_pointer(flow->sf_acts, acts);
+       __skb_pull(skb, sizeof(struct odp_packet));
+       err = nla_parse(a, ODP_PACKET_ATTR_MAX, (struct nlattr *)skb->data,
+                       skb->len, execute_policy);
+       if (err)
+               goto exit_free_skb;
 
-               /* Put flow in bucket. */
-               error = tbl_insert(table, &flow->tbl_node, hash);
-               if (error)
-                       goto error_free_flow_acts;
+       err = -EINVAL;
+       if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
+           nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
+               goto exit_free_skb;
 
-               memset(stats, 0, sizeof(struct odp_flow_stats));
-       } else {
-               /* We found a matching flow. */
-               struct sw_flow_actions *old_acts, *new_acts;
+       actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
+       actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
+       err = validate_actions(actions, actions_len);
+       if (err)
+               goto exit_free_skb;
 
-               flow = flow_cast(flow_node);
+       packet = skb_clone(skb, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!packet)
+               goto exit_free_skb;
+       packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
+       packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
 
-               /* Bail out if we're not allowed to modify an existing flow. */
-               error = -EEXIST;
-               if (!(uf->flags & ODPPF_MODIFY))
-                       goto error;
+       skb_reset_mac_header(packet);
+       eth = eth_hdr(packet);
 
-               /* Swap actions. */
-               new_acts = get_actions(&uf->flow);
-               error = PTR_ERR(new_acts);
-               if (IS_ERR(new_acts))
-                       goto error;
+       /* Normally, setting the skb 'protocol' field would be handled by a
+        * call to eth_type_trans(), but it assumes there's a sending
+        * device, which we may not have. */
+       if (ntohs(eth->h_proto) >= 1536)
+               packet->protocol = eth->h_proto;
+       else
+               packet->protocol = htons(ETH_P_802_2);
 
-               old_acts = rcu_dereference_protected(flow->sf_acts,
-                                                    lockdep_is_held(&dp->mutex));
-               if (old_acts->actions_len != new_acts->actions_len ||
-                   memcmp(old_acts->actions, new_acts->actions,
-                          old_acts->actions_len)) {
-                       rcu_assign_pointer(flow->sf_acts, new_acts);
-                       flow_deferred_free_acts(old_acts);
-               } else {
-                       kfree(new_acts);
-               }
+       err = flow_extract(packet, -1, &key, &is_frag);
+       if (err)
+               goto exit_free_skb;
 
-               /* Fetch stats, then clear them if necessary. */
-               spin_lock_bh(&flow->lock);
-               get_stats(flow, stats);
-               if (uf->flags & ODPPF_ZERO_STATS)
-                       clear_stats(flow);
-               spin_unlock_bh(&flow->lock);
-       }
+       rcu_read_lock();
+       dp = get_dp(odp_packet->dp_idx);
+       err = -ENODEV;
+       if (dp)
+               err = execute_actions(dp, packet, &key, actions, actions_len);
+       rcu_read_unlock();
 
-       return 0;
+exit_free_skb:
+       kfree_skb(skb);
+       return err;
+}
 
-error_free_flow_acts:
-       kfree(acts);
-error_free_flow:
-       flow->sf_acts = NULL;
-       flow_put(flow);
-error:
-       return error;
+static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
+{
+       int i;
+
+       stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
+       for_each_possible_cpu(i) {
+               const struct dp_stats_percpu *percpu_stats;
+               struct dp_stats_percpu local_stats;
+               unsigned seqcount;
+
+               percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+               do {
+                       seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+                       local_stats = *percpu_stats;
+               } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+               stats->n_frags += local_stats.n_frags;
+               stats->n_hit += local_stats.n_hit;
+               stats->n_missed += local_stats.n_missed;
+               stats->n_lost += local_stats.n_lost;
+       }
+}
+
+/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
+int dp_min_mtu(const struct datapath *dp)
+{
+       struct vport *p;
+       int mtu = 0;
+
+       ASSERT_RTNL();
+
+       list_for_each_entry_rcu (p, &dp->port_list, node) {
+               int dev_mtu;
+
+               /* Skip any internal ports, since that's what we're trying to
+                * set. */
+               if (is_internal_vport(p))
+                       continue;
+
+               dev_mtu = vport_get_mtu(p);
+               if (!mtu || dev_mtu < mtu)
+                       mtu = dev_mtu;
+       }
+
+       return mtu ? mtu : ETH_DATA_LEN;
+}
+
+/* Sets the MTU of all datapath devices to the minimum of the ports.  Must
+ * be called with RTNL lock. */
+void set_internal_devs_mtu(const struct datapath *dp)
+{
+       struct vport *p;
+       int mtu;
+
+       ASSERT_RTNL();
+
+       mtu = dp_min_mtu(dp);
+
+       list_for_each_entry_rcu (p, &dp->port_list, node) {
+               if (is_internal_vport(p))
+                       vport_set_mtu(p, mtu);
+       }
 }
 
-static int put_flow(struct datapath *dp, struct odp_flow_put __user *ufp)
+static int get_listen_mask(const struct file *f)
 {
+       return (long)f->private_data;
+}
+
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+       f->private_data = (void*)(long)listen_mask;
+}
+
+static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
+       [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+       [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+       [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+       [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
+};
+
+static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
+                            struct sw_flow *flow, u32 total_len, u64 state)
+{
+       const struct sw_flow_actions *sf_acts;
        struct odp_flow_stats stats;
-       struct odp_flow_put uf;
+       struct odp_flow *odp_flow;
+       struct sk_buff *skb;
+       struct nlattr *nla;
+       unsigned long used;
+       u8 tcp_flags;
+       int err;
+
+       sf_acts = rcu_dereference_protected(flow->sf_acts,
+                                           lockdep_is_held(&dp->mutex));
+
+       skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!skb)
+               goto exit;
+
+       rcu_read_lock();
+       odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
+       odp_flow->dp_idx = dp->dp_idx;
+       odp_flow->total_len = total_len;
+
+       nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
+       if (!nla)
+               goto nla_put_failure;
+       err = flow_to_nlattrs(&flow->key, skb);
+       if (err)
+               goto exit_unlock;
+       nla_nest_end(skb, nla);
+
+       nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
+       if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
+               goto nla_put_failure;
+       memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
+       nla_nest_end(skb, nla);
+
+       spin_lock_bh(&flow->lock);
+       used = flow->used;
+       stats.n_packets = flow->packet_count;
+       stats.n_bytes = flow->byte_count;
+       tcp_flags = flow->tcp_flags;
+       spin_unlock_bh(&flow->lock);
+
+       if (used)
+               NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
+
+       if (stats.n_packets)
+               NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
+
+       if (tcp_flags)
+               NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+       if (state)
+               NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
+
+       if (skb->len > total_len)
+               goto nla_put_failure;
+
+       odp_flow->len = skb->len;
+       err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+       goto exit_unlock;
+
+nla_put_failure:
+       err = -EMSGSIZE;
+exit_unlock:
+       rcu_read_unlock();
+       kfree_skb(skb);
+exit:
+       return err;
+}
+
+static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
+                                          struct dp_flowcmd *flowcmd)
+{
+       struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
+       struct odp_flow *odp_flow;
+       struct sk_buff *skb;
+       u32 len;
+       int err;
+
+       if (get_user(len, &uodp_flow->len))
+               return ERR_PTR(-EFAULT);
+       if (len < sizeof(struct odp_flow))
+               return ERR_PTR(-EINVAL);
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       err = -EFAULT;
+       if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
+               goto error_free_skb;
+
+       odp_flow = (struct odp_flow *)skb->data;
+       err = -EINVAL;
+       if (odp_flow->len != len)
+               goto error_free_skb;
+
+       flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
+       flowcmd->dp_idx = odp_flow->dp_idx;
+       flowcmd->total_len = odp_flow->total_len;
+
+       err = nla_parse(a, ODP_FLOW_ATTR_MAX,
+                       (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
+                       skb->len - sizeof(struct odp_flow), flow_policy);
+       if (err)
+               goto error_free_skb;
+
+       /* ODP_FLOW_ATTR_KEY. */
+       if (a[ODP_FLOW_ATTR_KEY]) {
+               err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
+               if (err)
+                       goto error_free_skb;
+       } else
+               memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
+
+       /* ODP_FLOW_ATTR_ACTIONS. */
+       if (a[ODP_FLOW_ATTR_ACTIONS]) {
+               flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
+               flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
+               err = validate_actions(flowcmd->actions, flowcmd->actions_len);
+               if (err)
+                       goto error_free_skb;
+       } else {
+               flowcmd->actions = NULL;
+               flowcmd->actions_len = 0;
+       }
+
+       flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
+
+       flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
+
+       return skb;
+
+error_free_skb:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+
+static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
+{
+       struct tbl_node *flow_node;
+       struct dp_flowcmd flowcmd;
+       struct sw_flow *flow;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       struct tbl *table;
+       u32 hash;
        int error;
 
-       if (copy_from_user(&uf, ufp, sizeof(struct odp_flow_put)))
-               return -EFAULT;
+       skb = copy_flow_from_user(uodp_flow, &flowcmd);
+       error = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+
+       dp = get_dp_locked(flowcmd.dp_idx);
+       error = -ENODEV;
+       if (!dp)
+               goto error_kfree_skb;
+
+       hash = flow_hash(&flowcmd.key);
+       table = get_table_protected(dp);
+       flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
+       if (!flow_node) {
+               struct sw_flow_actions *acts;
+
+               /* Bail out if we're not allowed to create a new flow. */
+               error = -ENOENT;
+               if (cmd == ODP_FLOW_SET)
+                       goto error_unlock_dp;
+
+               /* Expand table, if necessary, to make room. */
+               if (tbl_count(table) >= tbl_n_buckets(table)) {
+                       error = expand_table(dp);
+                       if (error)
+                               goto error_unlock_dp;
+                       table = get_table_protected(dp);
+               }
+
+               /* Allocate flow. */
+               flow = flow_alloc();
+               if (IS_ERR(flow)) {
+                       error = PTR_ERR(flow);
+                       goto error_unlock_dp;
+               }
+               flow->key = flowcmd.key;
+               clear_stats(flow);
+
+               /* Obtain actions. */
+               acts = get_actions(&flowcmd);
+               error = PTR_ERR(acts);
+               if (IS_ERR(acts))
+                       goto error_free_flow;
+               rcu_assign_pointer(flow->sf_acts, acts);
+
+               error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+               if (error)
+                       goto error_free_flow;
+
+               /* Put flow in bucket. */
+               error = tbl_insert(table, &flow->tbl_node, hash);
+               if (error)
+                       goto error_free_flow;
+       } else {
+               /* We found a matching flow. */
+               struct sw_flow_actions *old_acts;
+
+               /* Bail out if we're not allowed to modify an existing flow.
+                * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
+                * because Generic Netlink treats the latter as a dump
+                * request.  We also accept NLM_F_EXCL in case that bug ever
+                * gets fixed.
+                */
+               error = -EEXIST;
+               if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
+                       goto error_kfree_skb;
+
+               /* Update actions. */
+               flow = flow_cast(flow_node);
+               old_acts = rcu_dereference_protected(flow->sf_acts,
+                                                    lockdep_is_held(&dp->mutex));
+               if (flowcmd.actions &&
+                   (old_acts->actions_len != flowcmd.actions_len ||
+                    memcmp(old_acts->actions, flowcmd.actions,
+                           flowcmd.actions_len))) {
+                       struct sw_flow_actions *new_acts;
+
+                       new_acts = get_actions(&flowcmd);
+                       error = PTR_ERR(new_acts);
+                       if (IS_ERR(new_acts))
+                               goto error_kfree_skb;
+
+                       rcu_assign_pointer(flow->sf_acts, new_acts);
+                       flow_deferred_free_acts(old_acts);
+               }
+
+               error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+               if (error)
+                       goto error_kfree_skb;
+
+               /* Clear stats. */
+               if (flowcmd.clear) {
+                       spin_lock_bh(&flow->lock);
+                       clear_stats(flow);
+                       spin_unlock_bh(&flow->lock);
+               }
+       }
+       kfree_skb(skb);
+       mutex_unlock(&dp->mutex);
+       return 0;
+
+error_free_flow:
+       flow_put(flow);
+error_unlock_dp:
+       mutex_unlock(&dp->mutex);
+error_kfree_skb:
+       kfree_skb(skb);
+exit:
+       return error;
+}
+
+static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
+{
+       struct tbl_node *flow_node;
+       struct dp_flowcmd flowcmd;
+       struct sw_flow *flow;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       struct tbl *table;
+       int err;
+
+       skb = copy_flow_from_user(uodp_flow, &flowcmd);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+
+       dp = get_dp_locked(flowcmd.dp_idx);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_kfree_skb;
+
+       table = get_table_protected(dp);
+       flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
+       err = -ENOENT;
+       if (!flow_node)
+               goto exit_unlock_dp;
+
+       if (cmd == ODP_FLOW_DEL) {
+               err = tbl_remove(table, flow_node);
+               if (err)
+                       goto exit_unlock_dp;
+       }
+
+       flow = flow_cast(flow_node);
+       err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
+       if (!err && cmd == ODP_FLOW_DEL)
+               flow_deferred_free(flow);
+
+exit_unlock_dp:
+       mutex_unlock(&dp->mutex);
+exit_kfree_skb:
+       kfree_skb(skb);
+exit:
+       return err;
+}
+
+static int dump_flow(struct odp_flow __user *uodp_flow)
+{
+       struct tbl_node *flow_node;
+       struct dp_flowcmd flowcmd;
+       struct sw_flow *flow;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       u32 bucket, obj;
+       int err;
+
+       skb = copy_flow_from_user(uodp_flow, &flowcmd);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+
+       dp = get_dp_locked(flowcmd.dp_idx);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_free;
+
+       bucket = flowcmd.state >> 32;
+       obj = flowcmd.state;
+       flow_node = tbl_next(dp->table, &bucket, &obj);
+       err = -ENODEV;
+       if (!flow_node)
+               goto exit_unlock_dp;
+
+       flow = flow_cast(flow_node);
+       err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
+                               ((u64)bucket << 32) | obj);
+
+exit_unlock_dp:
+       mutex_unlock(&dp->mutex);
+exit_free:
+       kfree_skb(skb);
+exit:
+       return err;
+}
+
+static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
+       [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
+       [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
+};
+
+static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
+{
+       struct odp_datapath *odp_datapath;
+       struct sk_buff *skb;
+       struct nlattr *nla;
+       int err;
+
+       skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!skb)
+               goto exit;
+
+       odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
+       odp_datapath->dp_idx = dp->dp_idx;
+       odp_datapath->total_len = total_len;
+
+       rcu_read_lock();
+       err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
+       rcu_read_unlock();
+       if (err)
+               goto nla_put_failure;
 
-       error = do_put_flow(dp, &uf, &stats);
-       if (error)
-               return error;
+       nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
+       if (!nla)
+               goto nla_put_failure;
+       get_dp_stats(dp, nla_data(nla));
+
+       NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
+                   dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
+
+       if (dp->sflow_probability)
+               NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
+
+       if (skb->len > total_len)
+               goto nla_put_failure;
+
+       odp_datapath->len = skb->len;
+       err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+       goto exit_free_skb;
+
+nla_put_failure:
+       err = -EMSGSIZE;
+exit_free_skb:
+       kfree_skb(skb);
+exit:
+       return err;
+}
+
+static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+       struct odp_datapath *odp_datapath;
+       struct sk_buff *skb;
+       u32 len;
+       int err;
+
+       if (get_user(len, &uodp_datapath->len))
+               return ERR_PTR(-EFAULT);
+       if (len < sizeof(struct odp_datapath))
+               return ERR_PTR(-EINVAL);
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return ERR_PTR(-ENOMEM);
+
+       err = -EFAULT;
+       if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
+               goto error_free_skb;
+
+       odp_datapath = (struct odp_datapath *)skb->data;
+       err = -EINVAL;
+       if (odp_datapath->len != len)
+               goto error_free_skb;
+
+       err = nla_parse(a, ODP_DP_ATTR_MAX,
+                       (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
+                       skb->len - sizeof(struct odp_datapath), datapath_policy);
+       if (err)
+               goto error_free_skb;
+
+       if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
+               u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
+
+               err = -EINVAL;
+               if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
+                       goto error_free_skb;
+       }
+
+       err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
+       if (err)
+               goto error_free_skb;
+
+       return skb;
+
+error_free_skb:
+       kfree_skb(skb);
+       return ERR_PTR(err);
+}
+
+/* Called with dp_mutex and optionally with RTNL lock also.
+ * Holds the returned datapath's mutex on return.
+ */
+static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+       WARN_ON_ONCE(!mutex_is_locked(&dp_mutex));
+
+       if (!a[ODP_DP_ATTR_NAME]) {
+               struct datapath *dp;
+
+               dp = get_dp(odp_datapath->dp_idx);
+               if (!dp)
+                       return ERR_PTR(-ENODEV);
+               mutex_lock(&dp->mutex);
+               return dp;
+       } else {
+               struct datapath *dp;
+               struct vport *vport;
+               int dp_idx;
+
+               vport_lock();
+               vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
+               dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
+               vport_unlock();
+
+               if (dp_idx < 0)
+                       return ERR_PTR(-ENODEV);
+
+               dp = get_dp(dp_idx);
+               mutex_lock(&dp->mutex);
+               return dp;
+       }
+}
+
+static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
+{
+       if (a[ODP_DP_ATTR_IPV4_FRAGS])
+               dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
+       if (a[ODP_DP_ATTR_SAMPLING])
+               dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
+}
+
+static int new_datapath(struct odp_datapath __user *uodp_datapath)
+{
+       struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+       struct odp_datapath *odp_datapath;
+       struct vport_parms parms;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       struct vport *vport;
+       int dp_idx;
+       int err;
+       int i;
+
+       skb = copy_datapath_from_user(uodp_datapath, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto err;
+       odp_datapath = (struct odp_datapath *)skb->data;
+
+       err = -EINVAL;
+       if (!a[ODP_DP_ATTR_NAME])
+               goto err_free_skb;
+
+       rtnl_lock();
+       mutex_lock(&dp_mutex);
+       err = -ENODEV;
+       if (!try_module_get(THIS_MODULE))
+               goto err_unlock_dp_mutex;
+
+       dp_idx = odp_datapath->dp_idx;
+       if (dp_idx < 0) {
+               err = -EFBIG;
+               for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
+                       if (get_dp(dp_idx))
+                               continue;
+                       err = 0;
+                       break;
+               }
+       } else if (dp_idx < ARRAY_SIZE(dps))
+               err = get_dp(dp_idx) ? -EBUSY : 0;
+       else
+               err = -EINVAL;
+       if (err)
+               goto err_put_module;
+
+       err = -ENOMEM;
+       dp = kzalloc(sizeof(*dp), GFP_KERNEL);
+       if (dp == NULL)
+               goto err_put_module;
+       INIT_LIST_HEAD(&dp->port_list);
+       mutex_init(&dp->mutex);
+       mutex_lock(&dp->mutex);
+       dp->dp_idx = dp_idx;
+       for (i = 0; i < DP_N_QUEUES; i++)
+               skb_queue_head_init(&dp->queues[i]);
+       init_waitqueue_head(&dp->waitqueue);
 
-       if (copy_to_user(&ufp->flow.stats, &stats,
-                        sizeof(struct odp_flow_stats)))
-               return -EFAULT;
+       /* Initialize kobject for bridge.  This will be added as
+        * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
+       dp->ifobj.kset = NULL;
+       kobject_init(&dp->ifobj, &dp_ktype);
 
-       return 0;
-}
+       /* Allocate table. */
+       err = -ENOMEM;
+       rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
+       if (!dp->table)
+               goto err_free_dp;
 
-static int do_answer_query(struct datapath *dp, struct sw_flow *flow,
-                          u32 query_flags,
-                          struct odp_flow_stats __user *ustats,
-                          struct nlattr __user *actions,
-                          u32 __user *actions_lenp)
-{
-       struct sw_flow_actions *sf_acts;
-       struct odp_flow_stats stats;
-       u32 actions_len;
+       /* Set up our datapath device. */
+       parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
+       parms.type = ODP_VPORT_TYPE_INTERNAL;
+       parms.options = NULL;
+       parms.dp = dp;
+       parms.port_no = ODPP_LOCAL;
+       vport = new_vport(&parms);
+       if (IS_ERR(vport)) {
+               err = PTR_ERR(vport);
+               if (err == -EBUSY)
+                       err = -EEXIST;
 
-       spin_lock_bh(&flow->lock);
-       get_stats(flow, &stats);
-       if (query_flags & ODPFF_ZERO_TCP_FLAGS)
-               flow->tcp_flags = 0;
+               goto err_destroy_table;
+       }
 
-       spin_unlock_bh(&flow->lock);
+       dp->drop_frags = 0;
+       dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+       if (!dp->stats_percpu) {
+               err = -ENOMEM;
+               goto err_destroy_local_port;
+       }
 
-       if (copy_to_user(ustats, &stats, sizeof(struct odp_flow_stats)) ||
-           get_user(actions_len, actions_lenp))
-               return -EFAULT;
+       change_datapath(dp, a);
 
-       if (!actions_len)
-               return 0;
+       rcu_assign_pointer(dps[dp_idx], dp);
+       dp_sysfs_add_dp(dp);
 
-       sf_acts = rcu_dereference_protected(flow->sf_acts,
-                                           lockdep_is_held(&dp->mutex));
-       if (put_user(sf_acts->actions_len, actions_lenp) ||
-           (actions && copy_to_user(actions, sf_acts->actions,
-                                    min(sf_acts->actions_len, actions_len))))
-               return -EFAULT;
+       mutex_unlock(&dp->mutex);
+       mutex_unlock(&dp_mutex);
+       rtnl_unlock();
 
        return 0;
-}
-
-static int answer_query(struct datapath *dp, struct sw_flow *flow,
-                       u32 query_flags, struct odp_flow __user *ufp)
-{
-       struct nlattr __user *actions;
-
-       if (get_user(actions, (struct nlattr __user * __user *)&ufp->actions))
-               return -EFAULT;
 
-       return do_answer_query(dp, flow, query_flags, 
-                              &ufp->stats, actions, &ufp->actions_len);
+err_destroy_local_port:
+       dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
+err_destroy_table:
+       tbl_destroy(get_table_protected(dp), NULL);
+err_free_dp:
+       mutex_unlock(&dp->mutex);
+       kfree(dp);
+err_put_module:
+       module_put(THIS_MODULE);
+err_unlock_dp_mutex:
+       mutex_unlock(&dp_mutex);
+       rtnl_unlock();
+err_free_skb:
+       kfree_skb(skb);
+err:
+       return err;
 }
 
-static struct sw_flow *do_del_flow(struct datapath *dp, const struct nlattr __user *key, u32 key_len)
+static int del_datapath(struct odp_datapath __user *uodp_datapath)
 {
-       struct tbl *table = get_table_protected(dp);
-       struct tbl_node *flow_node;
-       struct sw_flow_key swkey;
-       int error;
+       struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+       struct datapath *dp;
+       struct sk_buff *skb;
+       int err;
 
-       error = flow_copy_from_user(&swkey, key, key_len);
-       if (error)
-               return ERR_PTR(error);
+       skb = copy_datapath_from_user(uodp_datapath, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
 
-       flow_node = tbl_lookup(table, &swkey, flow_hash(&swkey), flow_cmp);
-       if (!flow_node)
-               return ERR_PTR(-ENOENT);
+       rtnl_lock();
+       mutex_lock(&dp_mutex);
+       dp = lookup_datapath((struct odp_datapath *)skb->data, a);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               goto exit_free;
 
-       error = tbl_remove(table, flow_node);
-       if (error)
-               return ERR_PTR(error);
+       destroy_dp(dp);
+       err = 0;
 
-       /* XXX Returned flow_node's statistics might lose a few packets, since
-        * other CPUs can be using this flow.  We used to synchronize_rcu() to
-        * make sure that we get completely accurate stats, but that blows our
-        * performance, badly. */
-       return flow_cast(flow_node);
+exit_free:
+       kfree_skb(skb);
+       mutex_unlock(&dp_mutex);
+       rtnl_unlock();
+exit:
+       return err;
 }
 
-static int del_flow(struct datapath *dp, struct odp_flow __user *ufp)
+static int set_datapath(struct odp_datapath __user *uodp_datapath)
 {
-       struct sw_flow *flow;
-       struct odp_flow uf;
-       int error;
+       struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+       struct datapath *dp;
+       struct sk_buff *skb;
+       int err;
 
-       if (copy_from_user(&uf, ufp, sizeof(uf)))
-               return -EFAULT;
+       skb = copy_datapath_from_user(uodp_datapath, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
 
-       flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
-       if (IS_ERR(flow))
-               return PTR_ERR(flow);
+       mutex_lock(&dp_mutex);
+       dp = lookup_datapath((struct odp_datapath *)skb->data, a);
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               goto exit_free;
 
-       error = answer_query(dp, flow, 0, ufp);
-       flow_deferred_free(flow);
-       return error;
+       change_datapath(dp, a);
+       mutex_unlock(&dp->mutex);
+       err = 0;
+
+exit_free:
+       kfree_skb(skb);
+       mutex_unlock(&dp_mutex);
+exit:
+       return err;
 }
 
-static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+static int get_datapath(struct odp_datapath __user *uodp_datapath)
 {
-       struct tbl *table = get_table_protected(dp);
-       u32 i;
+       struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+       struct odp_datapath *odp_datapath;
+       struct datapath *dp;
+       struct sk_buff *skb;
+       int err;
 
-       for (i = 0; i < flowvec->n_flows; i++) {
-               struct odp_flow __user *ufp = (struct odp_flow __user __force *)&flowvec->flows[i];
-               struct sw_flow_key key;
-               struct odp_flow uf;
-               struct tbl_node *flow_node;
-               int error;
+       skb = copy_datapath_from_user(uodp_datapath, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+       odp_datapath = (struct odp_datapath *)skb->data;
 
-               if (copy_from_user(&uf, ufp, sizeof(uf)))
-                       return -EFAULT;
+       mutex_lock(&dp_mutex);
+       dp = lookup_datapath(odp_datapath, a);
+       mutex_unlock(&dp_mutex);
 
-               error = flow_copy_from_user(&key, (const struct nlattr __force __user *)uf.key, uf.key_len);
-               if (error)
-                       return error;
+       err = PTR_ERR(dp);
+       if (IS_ERR(dp))
+               goto exit_free;
 
-               flow_node = tbl_lookup(table, &uf.key, flow_hash(&key), flow_cmp);
-               if (!flow_node)
-                       error = put_user(ENOENT, &ufp->stats.error);
-               else
-                       error = answer_query(dp, flow_cast(flow_node), uf.flags, ufp);
-               if (error)
-                       return -EFAULT;
-       }
-       return flowvec->n_flows;
+       err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
+       mutex_unlock(&dp->mutex);
+exit_free:
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
-static int do_flowvec_ioctl(struct datapath *dp, unsigned long argp,
-                           int (*function)(struct datapath *,
-                                           const struct odp_flowvec *))
+static int dump_datapath(struct odp_datapath __user *uodp_datapath)
 {
-       struct odp_flowvec __user *uflowvec;
-       struct odp_flowvec flowvec;
-       int retval;
+       struct nlattr *a[ODP_DP_ATTR_MAX + 1];
+       struct odp_datapath *odp_datapath;
+       struct sk_buff *skb;
+       u32 dp_idx;
+       int err;
 
-       uflowvec = (struct odp_flowvec __user *)argp;
-       if (copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
-               return -EFAULT;
+       skb = copy_datapath_from_user(uodp_datapath, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+       odp_datapath = (struct odp_datapath *)skb->data;
 
-       if (flowvec.n_flows > INT_MAX / sizeof(struct odp_flow))
-               return -EINVAL;
+       mutex_lock(&dp_mutex);
+       for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
+               struct datapath *dp = get_dp(dp_idx);
+               if (!dp)
+                       continue;
+
+               mutex_lock(&dp->mutex);
+               mutex_unlock(&dp_mutex);
+               err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
+               mutex_unlock(&dp->mutex);
+               goto exit_free;
+       }
+       mutex_unlock(&dp_mutex);
+       err = -ENODEV;
 
-       retval = function(dp, &flowvec);
-       return (retval < 0 ? retval
-               : retval == flowvec.n_flows ? 0
-               : put_user(retval, &uflowvec->n_flows));
+exit_free:
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
-static struct sw_flow *do_dump_flow(struct datapath *dp, u32 __user *state)
+static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
+       [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
+       [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
+       [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+       [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
+       [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
+       [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
+       [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+};
+
+static int copy_vport_to_user(void __user *dst, struct vport *vport, uint32_t total_len)
 {
-       struct tbl *table = get_table_protected(dp);
-       struct tbl_node *tbl_node;
-       u32 bucket, obj;
+       struct odp_vport *odp_vport;
+       struct sk_buff *skb;
+       struct nlattr *nla;
+       int ifindex, iflink;
+       int err;
 
-       if (get_user(bucket, &state[0]) || get_user(obj, &state[1]))
-               return ERR_PTR(-EFAULT);
+       skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+       err = -ENOMEM;
+       if (!skb)
+               goto exit;
+
+       rcu_read_lock();
+       odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
+       odp_vport->dp_idx = vport->dp->dp_idx;
+       odp_vport->total_len = total_len;
 
-       tbl_node = tbl_next(table, &bucket, &obj);
+       NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
+       NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
+       NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
 
-       if (put_user(bucket, &state[0]) || put_user(obj, &state[1]))
-               return ERR_PTR(-EFAULT);
+       nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
+       if (!nla)
+               goto nla_put_failure;
+       if (vport_get_stats(vport, nla_data(nla)))
+               __skb_trim(skb, skb->len - nla->nla_len);
 
-       return tbl_node ? flow_cast(tbl_node) : NULL;
-}
+       NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
 
-static int dump_flow(struct datapath *dp, struct odp_flow_dump __user *udumpp)
-{
-       struct odp_flow __user *uflowp;
-       struct nlattr __user *ukey;
-       struct sw_flow *flow;
-       u32 key_len;
+       NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
 
-       flow = do_dump_flow(dp, udumpp->state);
-       if (IS_ERR(flow))
-               return PTR_ERR(flow);
+       err = vport_get_options(vport, skb);
 
-       if (get_user(uflowp, (struct odp_flow __user *__user*)&udumpp->flow))
-               return -EFAULT;
+       ifindex = vport_get_ifindex(vport);
+       if (ifindex > 0)
+               NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
 
-       if (!flow)
-               return put_user(ODPFF_EOF, &uflowp->flags);
+       iflink = vport_get_iflink(vport);
+       if (iflink > 0)
+               NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
 
-       if (put_user(0, &uflowp->flags) ||
-           get_user(ukey, (struct nlattr __user * __user*)&uflowp->key) ||
-           get_user(key_len, &uflowp->key_len))
-               return -EFAULT;
+       err = -EMSGSIZE;
+       if (skb->len > total_len)
+               goto exit_unlock;
 
-       key_len = flow_copy_to_user(ukey, &flow->key, key_len);
-       if (key_len < 0)
-               return key_len;
-       if (put_user(key_len, &uflowp->key_len))
-               return -EFAULT;
+       odp_vport->len = skb->len;
+       err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+       goto exit_unlock;
 
-       return answer_query(dp, flow, 0, uflowp);
+nla_put_failure:
+       err = -EMSGSIZE;
+exit_unlock:
+       rcu_read_unlock();
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
-static int do_execute(struct datapath *dp, const struct odp_execute *execute)
+static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
+                                           struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
 {
-       struct sw_flow_key key;
+       struct odp_vport *odp_vport;
        struct sk_buff *skb;
-       struct sw_flow_actions *actions;
-       struct ethhdr *eth;
-       bool is_frag;
+       u32 len;
        int err;
 
-       err = -EINVAL;
-       if (execute->length < ETH_HLEN || execute->length > 65535)
-               goto error;
-
-       actions = flow_actions_alloc(execute->actions_len);
-       if (IS_ERR(actions)) {
-               err = PTR_ERR(actions);
-               goto error;
-       }
-
-       err = -EFAULT;
-       if (copy_from_user(actions->actions,
-           (struct nlattr __user __force *)execute->actions, execute->actions_len))
-               goto error_free_actions;
-
-       err = validate_actions(actions->actions, execute->actions_len);
-       if (err)
-               goto error_free_actions;
+       if (get_user(len, &uodp_vport->len))
+               return ERR_PTR(-EFAULT);
+       if (len < sizeof(struct odp_vport))
+               return ERR_PTR(-EINVAL);
 
-       err = -ENOMEM;
-       skb = alloc_skb(execute->length, GFP_KERNEL);
+       skb = alloc_skb(len, GFP_KERNEL);
        if (!skb)
-               goto error_free_actions;
+               return ERR_PTR(-ENOMEM);
 
        err = -EFAULT;
-       if (copy_from_user(skb_put(skb, execute->length),
-                          (const void __user __force *)execute->data,
-                          execute->length))
+       if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
                goto error_free_skb;
 
-       skb_reset_mac_header(skb);
-       eth = eth_hdr(skb);
-
-       /* Normally, setting the skb 'protocol' field would be handled by a
-        * call to eth_type_trans(), but it assumes there's a sending
-        * device, which we may not have. */
-       if (ntohs(eth->h_proto) >= 1536)
-               skb->protocol = eth->h_proto;
-       else
-               skb->protocol = htons(ETH_P_802_2);
+       odp_vport = (struct odp_vport *)skb->data;
+       err = -EINVAL;
+       if (odp_vport->len != len)
+               goto error_free_skb;
 
-       err = flow_extract(skb, -1, &key, &is_frag);
+       err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
+                       skb->len - sizeof(struct odp_vport), vport_policy);
        if (err)
                goto error_free_skb;
 
-       rcu_read_lock();
-       err = execute_actions(dp, skb, &key, actions->actions, actions->actions_len);
-       rcu_read_unlock();
+       err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
+       if (err)
+               goto error_free_skb;
 
-       kfree(actions);
-       return err;
+       return skb;
 
 error_free_skb:
        kfree_skb(skb);
-error_free_actions:
-       kfree(actions);
-error:
-       return err;
+       return ERR_PTR(err);
 }
 
-static int execute_packet(struct datapath *dp, const struct odp_execute __user *executep)
+
+/* Called without any locks (or with RTNL lock).
+ * Returns holding vport->dp->mutex.
+ */
+static struct vport *lookup_vport(struct odp_vport *odp_vport,
+                                 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
 {
-       struct odp_execute execute;
+       struct datapath *dp;
+       struct vport *vport;
 
-       if (copy_from_user(&execute, executep, sizeof(execute)))
-               return -EFAULT;
+       if (a[ODP_VPORT_ATTR_NAME]) {
+               int dp_idx, port_no;
 
-       return do_execute(dp, &execute);
-}
+       retry:
+               vport_lock();
+               vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
+               if (!vport) {
+                       vport_unlock();
+                       return ERR_PTR(-ENODEV);
+               }
+               dp_idx = vport->dp->dp_idx;
+               port_no = vport->port_no;
+               vport_unlock();
 
-static int get_dp_stats(struct datapath *dp, struct odp_stats __user *statsp)
-{
-       struct tbl *table = get_table_protected(dp);
-       struct odp_stats stats;
-       int i;
+               dp = get_dp_locked(dp_idx);
+               if (!dp)
+                       goto retry;
 
-       stats.n_flows = tbl_count(table);
-       stats.cur_capacity = tbl_n_buckets(table);
-       stats.max_capacity = TBL_MAX_BUCKETS;
-       stats.n_ports = dp->n_ports;
-       stats.max_ports = DP_MAX_PORTS;
-       stats.n_frags = stats.n_hit = stats.n_missed = stats.n_lost = 0;
-       for_each_possible_cpu(i) {
-               const struct dp_stats_percpu *percpu_stats;
-               struct dp_stats_percpu local_stats;
-               unsigned seqcount;
+               vport = get_vport_protected(dp, port_no);
+               if (!vport ||
+                   strcmp(vport_get_name(vport), nla_data(a[ODP_VPORT_ATTR_NAME]))) {
+                       mutex_unlock(&dp->mutex);
+                       goto retry;
+               }
 
-               percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+               return vport;
+       } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
+               u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
 
-               do {
-                       seqcount = read_seqcount_begin(&percpu_stats->seqlock);
-                       local_stats = *percpu_stats;
-               } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+               if (port_no >= DP_MAX_PORTS)
+                       return ERR_PTR(-EINVAL);
 
-               stats.n_frags += local_stats.n_frags;
-               stats.n_hit += local_stats.n_hit;
-               stats.n_missed += local_stats.n_missed;
-               stats.n_lost += local_stats.n_lost;
-       }
-       stats.max_miss_queue = DP_MAX_QUEUE_LEN;
-       stats.max_action_queue = DP_MAX_QUEUE_LEN;
-       return copy_to_user(statsp, &stats, sizeof(stats)) ? -EFAULT : 0;
+               dp = get_dp_locked(odp_vport->dp_idx);
+               if (!dp)
+                       return ERR_PTR(-ENODEV);
+
+               vport = get_vport_protected(dp, port_no);
+               if (!vport) {
+                       mutex_unlock(&dp->mutex);
+                       return ERR_PTR(-ENOENT);
+               }
+               return vport;
+       } else
+               return ERR_PTR(-EINVAL);
 }
 
-/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
-int dp_min_mtu(const struct datapath *dp)
+static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
 {
-       struct vport *p;
-       int mtu = 0;
+       int err = 0;
+       if (a[ODP_VPORT_ATTR_STATS])
+               err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
+       if (!err && a[ODP_VPORT_ATTR_ADDRESS])
+               err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
+       if (!err && a[ODP_VPORT_ATTR_MTU])
+               err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
+       return err;
+}
 
-       ASSERT_RTNL();
+static int attach_vport(struct odp_vport __user *uodp_vport)
+{
+       struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+       struct odp_vport *odp_vport;
+       struct vport_parms parms;
+       struct vport *vport;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       u32 port_no;
+       int err;
 
-       list_for_each_entry_rcu (p, &dp->port_list, node) {
-               int dev_mtu;
+       skb = copy_vport_from_user(uodp_vport, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+       odp_vport = (struct odp_vport *)skb->data;
 
-               /* Skip any internal ports, since that's what we're trying to
-                * set. */
-               if (is_internal_vport(p))
-                       continue;
+       err = -EINVAL;
+       if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
+               goto exit_kfree_skb;
 
-               dev_mtu = vport_get_mtu(p);
-               if (!mtu || dev_mtu < mtu)
-                       mtu = dev_mtu;
-       }
+       rtnl_lock();
 
-       return mtu ? mtu : ETH_DATA_LEN;
-}
+       dp = get_dp_locked(odp_vport->dp_idx);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_unlock_rtnl;
 
-/* Sets the MTU of all datapath devices to the minimum of the ports.  Must
- * be called with RTNL lock. */
-void set_internal_devs_mtu(const struct datapath *dp)
-{
-       struct vport *p;
-       int mtu;
+       if (a[ODP_VPORT_ATTR_PORT_NO]) {
+               port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
 
-       ASSERT_RTNL();
+               err = -EFBIG;
+               if (port_no >= DP_MAX_PORTS)
+                       goto exit_unlock_dp;
+
+               vport = get_vport_protected(dp, port_no);
+               err = -EBUSY;
+               if (vport)
+                       goto exit_unlock_dp;
+       } else {
+               for (port_no = 1; ; port_no++) {
+                       if (port_no >= DP_MAX_PORTS) {
+                               err = -EFBIG;
+                               goto exit_unlock_dp;
+                       }
+                       vport = get_vport_protected(dp, port_no);
+                       if (!vport)
+                               break;
+               }
+       }
+
+       parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
+       parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
+       parms.options = a[ODP_VPORT_ATTR_OPTIONS];
+       parms.dp = dp;
+       parms.port_no = port_no;
+
+       vport = new_vport(&parms);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_unlock_dp;
 
-       mtu = dp_min_mtu(dp);
+       set_internal_devs_mtu(dp);
+       dp_sysfs_add_if(vport);
 
-       list_for_each_entry_rcu (p, &dp->port_list, node) {
-               if (is_internal_vport(p))
-                       vport_set_mtu(p, mtu);
+       err = change_vport(vport, a);
+       if (err) {
+               dp_detach_port(vport);
+               goto exit_unlock_dp;
        }
-}
 
-static void compose_odp_port(const struct vport *vport, struct odp_port *odp_port)
-{
-       rcu_read_lock();
-       strncpy(odp_port->devname, vport_get_name(vport), sizeof(odp_port->devname));
-       strncpy(odp_port->type, vport_get_type(vport), sizeof(odp_port->type));
-       vport_get_config(vport, odp_port->config);
-       odp_port->port = vport->port_no;
-       odp_port->dp_idx = vport->dp->dp_idx;
-       rcu_read_unlock();
+       err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+
+exit_unlock_dp:
+       mutex_unlock(&dp->mutex);
+exit_unlock_rtnl:
+       rtnl_unlock();
+exit_kfree_skb:
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
-static int query_port(int dp_idx, struct odp_port __user *uport)
+static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
 {
-       struct odp_port port;
-
-       if (copy_from_user(&port, uport, sizeof(port)))
-               return -EFAULT;
+       struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+       struct vport *vport;
+       struct sk_buff *skb;
+       int err;
 
-       if (port.devname[0]) {
-               struct vport *vport;
+       skb = copy_vport_from_user(uodp_vport, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
 
-               port.devname[IFNAMSIZ - 1] = '\0';
+       rtnl_lock();
+       vport = lookup_vport((struct odp_vport *)skb->data, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_free;
 
-               vport_lock();
-               vport = vport_locate(port.devname);
-               if (vport)
-                       compose_odp_port(vport, &port);
-               vport_unlock();
+       err = 0;
+       if (a[ODP_VPORT_ATTR_OPTIONS])
+               err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
+       if (!err)
+               err = change_vport(vport, a);
 
-               if (!vport)
-                       return -ENODEV;
-       } else {
-               struct vport *vport;
-               struct datapath *dp;
+       mutex_unlock(&vport->dp->mutex);
+exit_free:
+       kfree_skb(skb);
+       rtnl_unlock();
+exit:
+       return err;
+}
 
-               if (port.port >= DP_MAX_PORTS)
-                       return -EINVAL;
+static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
+{
+       struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+       struct datapath *dp;
+       struct vport *vport;
+       struct sk_buff *skb;
+       int err;
 
-               dp = get_dp_locked(dp_idx);
-               if (!dp)
-                       return -ENODEV;
+       skb = copy_vport_from_user(uodp_vport, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
 
-               vport = get_vport_protected(dp, port.port);
-               if (vport)
-                       compose_odp_port(vport, &port);
-               mutex_unlock(&dp->mutex);
+       rtnl_lock();
+       vport = lookup_vport((struct odp_vport *)skb->data, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_free;
+       dp = vport->dp;
 
-               if (!vport)
-                       return -ENOENT;
-       }
+       err = -EINVAL;
+       if (vport->port_no == ODPP_LOCAL)
+               goto exit_free;
 
-       return copy_to_user(uport, &port, sizeof(struct odp_port));
+       err = dp_detach_port(vport);
+       mutex_unlock(&dp->mutex);
+exit_free:
+       kfree_skb(skb);
+       rtnl_unlock();
+exit:
+       return err;
 }
 
-static int do_dump_port(struct datapath *dp, struct odp_vport_dump *dump)
+static int get_vport(struct odp_vport __user *uodp_vport)
 {
-       u32 port_no;
+       struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+       struct odp_vport *odp_vport;
+       struct vport *vport;
+       struct sk_buff *skb;
+       int err;
 
-       for (port_no = dump->port_no; port_no < DP_MAX_PORTS; port_no++) {
-               struct vport *vport = get_vport_protected(dp, port_no);
-               if (vport) {
-                       struct odp_port odp_port;
+       skb = copy_vport_from_user(uodp_vport, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+       odp_vport = (struct odp_vport *)skb->data;
 
-                       compose_odp_port(vport, &odp_port);
-                       return copy_to_user((struct odp_port __force __user*)dump->port, &odp_port, sizeof(struct odp_port));
-               }
-       }
+       vport = lookup_vport(odp_vport, a);
+       err = PTR_ERR(vport);
+       if (IS_ERR(vport))
+               goto exit_free;
 
-       return put_user('\0', (char __force __user*)&dump->port->devname[0]);
+       err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+       mutex_unlock(&vport->dp->mutex);
+exit_free:
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
-static int dump_port(struct datapath *dp, struct odp_vport_dump __user *udump)
+static int dump_vport(struct odp_vport __user *uodp_vport)
 {
-       struct odp_vport_dump dump;
+       struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
+       struct odp_vport *odp_vport;
+       struct sk_buff *skb;
+       struct datapath *dp;
+       u32 port_no;
+       int err;
 
-       if (copy_from_user(&dump, udump, sizeof(dump)))
-               return -EFAULT;
+       skb = copy_vport_from_user(uodp_vport, a);
+       err = PTR_ERR(skb);
+       if (IS_ERR(skb))
+               goto exit;
+       odp_vport = (struct odp_vport *)skb->data;
 
-       return do_dump_port(dp, &dump);
-}
+       dp = get_dp_locked(odp_vport->dp_idx);
+       err = -ENODEV;
+       if (!dp)
+               goto exit_free;
 
-static int get_listen_mask(const struct file *f)
-{
-       return (long)f->private_data;
-}
+       port_no = 0;
+       if (a[ODP_VPORT_ATTR_PORT_NO])
+               port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
+       for (; port_no < DP_MAX_PORTS; port_no++) {
+               struct vport *vport = get_vport_protected(dp, port_no);
+               if (vport) {
+                       err = copy_vport_to_user(uodp_vport, vport, odp_vport->total_len);
+                       goto exit_unlock_dp;
+               }
+       }
+       err = -ENODEV;
 
-static void set_listen_mask(struct file *f, int listen_mask)
-{
-       f->private_data = (void*)(long)listen_mask;
+exit_unlock_dp:
+       mutex_unlock(&dp->mutex);
+exit_free:
+       kfree_skb(skb);
+exit:
+       return err;
 }
 
 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
@@ -1445,60 +1972,71 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
 {
        int dp_idx = iminor(f->f_dentry->d_inode);
        struct datapath *dp;
-       int drop_frags, listeners, port_no;
-       unsigned int sflow_probability;
+       int listeners;
        int err;
 
        /* Handle commands with special locking requirements up front. */
        switch (cmd) {
-       case ODP_DP_CREATE:
-               err = create_dp(dp_idx, (char __user *)argp);
+       case ODP_DP_NEW:
+               err = new_datapath((struct odp_datapath __user *)argp);
+               goto exit;
+
+       case ODP_DP_GET:
+               err = get_datapath((struct odp_datapath __user *)argp);
+               goto exit;
+
+       case ODP_DP_DEL:
+               err = del_datapath((struct odp_datapath __user *)argp);
+               goto exit;
+
+       case ODP_DP_SET:
+               err = set_datapath((struct odp_datapath __user *)argp);
                goto exit;
 
-       case ODP_DP_DESTROY:
-               err = destroy_dp(dp_idx);
+       case ODP_DP_DUMP:
+               err = dump_datapath((struct odp_datapath __user *)argp);
                goto exit;
 
-       case ODP_VPORT_ATTACH:
-               err = attach_port(dp_idx, (struct odp_port __user *)argp);
+       case ODP_VPORT_NEW:
+               err = attach_vport((struct odp_vport __user *)argp);
                goto exit;
 
-       case ODP_VPORT_DETACH:
-               err = get_user(port_no, (int __user *)argp);
-               if (!err)
-                       err = detach_port(dp_idx, port_no);
+       case ODP_VPORT_GET:
+               err = get_vport((struct odp_vport __user *)argp);
                goto exit;
 
-       case ODP_VPORT_QUERY:
-               err = query_port(dp_idx, (struct odp_port __user *)argp);
+       case ODP_VPORT_DEL:
+               err = del_vport(cmd, (struct odp_vport __user *)argp);
                goto exit;
 
-       case ODP_VPORT_MOD:
-               err = vport_user_mod((struct odp_port __user *)argp);
+       case ODP_VPORT_SET:
+               err = set_vport(cmd, (struct odp_vport __user *)argp);
                goto exit;
 
-       case ODP_VPORT_STATS_GET:
-               err = vport_user_stats_get((struct odp_vport_stats_req __user *)argp);
+       case ODP_VPORT_DUMP:
+               err = dump_vport((struct odp_vport __user *)argp);
                goto exit;
 
-       case ODP_VPORT_STATS_SET:
-               err = vport_user_stats_set((struct odp_vport_stats_req __user *)argp);
+       case ODP_FLOW_FLUSH:
+               err = flush_flows(argp);
                goto exit;
 
-       case ODP_VPORT_ETHER_GET:
-               err = vport_user_ether_get((struct odp_vport_ether __user *)argp);
+       case ODP_FLOW_NEW:
+       case ODP_FLOW_SET:
+               err = new_flow(cmd, (struct odp_flow __user *)argp);
                goto exit;
 
-       case ODP_VPORT_ETHER_SET:
-               err = vport_user_ether_set((struct odp_vport_ether __user *)argp);
+       case ODP_FLOW_GET:
+       case ODP_FLOW_DEL:
+               err = get_or_del_flow(cmd, (struct odp_flow __user *)argp);
                goto exit;
 
-       case ODP_VPORT_MTU_GET:
-               err = vport_user_mtu_get((struct odp_vport_mtu __user *)argp);
+       case ODP_FLOW_DUMP:
+               err = dump_flow((struct odp_flow __user *)argp);
                goto exit;
 
-       case ODP_VPORT_MTU_SET:
-               err = vport_user_mtu_set((struct odp_vport_mtu __user *)argp);
+       case ODP_EXECUTE:
+               err = execute_packet((struct odp_packet __user *)argp);
                goto exit;
        }
 
@@ -1508,25 +2046,6 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
                goto exit;
 
        switch (cmd) {
-       case ODP_DP_STATS:
-               err = get_dp_stats(dp, (struct odp_stats __user *)argp);
-               break;
-
-       case ODP_GET_DROP_FRAGS:
-               err = put_user(dp->drop_frags, (int __user *)argp);
-               break;
-
-       case ODP_SET_DROP_FRAGS:
-               err = get_user(drop_frags, (int __user *)argp);
-               if (err)
-                       break;
-               err = -EINVAL;
-               if (drop_frags != 0 && drop_frags != 1)
-                       break;
-               dp->drop_frags = drop_frags;
-               err = 0;
-               break;
-
        case ODP_GET_LISTEN_MASK:
                err = put_user(get_listen_mask(f), (int __user *)argp);
                break;
@@ -1542,44 +2061,6 @@ static long openvswitch_ioctl(struct file *f, unsigned int cmd,
                set_listen_mask(f, listeners);
                break;
 
-       case ODP_GET_SFLOW_PROBABILITY:
-               err = put_user(dp->sflow_probability, (unsigned int __user *)argp);
-               break;
-
-       case ODP_SET_SFLOW_PROBABILITY:
-               err = get_user(sflow_probability, (unsigned int __user *)argp);
-               if (!err)
-                       dp->sflow_probability = sflow_probability;
-               break;
-
-       case ODP_VPORT_DUMP:
-               err = dump_port(dp, (struct odp_vport_dump __user *)argp);
-               break;
-
-       case ODP_FLOW_FLUSH:
-               err = flush_flows(dp);
-               break;
-
-       case ODP_FLOW_PUT:
-               err = put_flow(dp, (struct odp_flow_put __user *)argp);
-               break;
-
-       case ODP_FLOW_DEL:
-               err = del_flow(dp, (struct odp_flow __user *)argp);
-               break;
-
-       case ODP_FLOW_GET:
-               err = do_flowvec_ioctl(dp, argp, do_query_flows);
-               break;
-
-       case ODP_FLOW_DUMP:
-               err = dump_flow(dp, (struct odp_flow_dump __user *)argp);
-               break;
-
-       case ODP_EXECUTE:
-               err = execute_packet(dp, (struct odp_execute __user *)argp);
-               break;
-
        default:
                err = -ENOIOCTLCMD;
                break;
@@ -1600,275 +2081,37 @@ static int dp_has_packet_of_interest(struct datapath *dp, int listeners)
 }
 
 #ifdef CONFIG_COMPAT
-static int compat_dump_port(struct datapath *dp, struct compat_odp_vport_dump __user *compat)
-{
-       struct odp_vport_dump dump;
-       compat_uptr_t port;
-
-       if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_vport_dump)) ||
-           __get_user(port, &compat->port) ||
-           __get_user(dump.port_no, &compat->port_no))
-               return -EFAULT;
-
-       dump.port = (struct odp_port __force *)compat_ptr(port);
-       return do_dump_port(dp, &dump);
-}
-
-static int compat_get_flow(struct odp_flow *flow, const struct compat_odp_flow __user *compat)
-{
-       compat_uptr_t key, actions;
-
-       if (!access_ok(VERIFY_READ, compat, sizeof(struct compat_odp_flow)) ||
-           __copy_from_user(&flow->stats, &compat->stats, sizeof(struct odp_flow_stats)) ||
-           __get_user(key, &compat->key) ||
-           __get_user(flow->key_len, &compat->key_len) ||
-           __get_user(actions, &compat->actions) ||
-           __get_user(flow->actions_len, &compat->actions_len) ||
-           __get_user(flow->flags, &compat->flags))
-               return -EFAULT;
-
-       flow->key = (struct nlattr __force *)compat_ptr(key);
-       flow->actions = (struct nlattr __force *)compat_ptr(actions);
-       return 0;
-}
-
-static int compat_put_flow(struct datapath *dp, struct compat_odp_flow_put __user *ufp)
-{
-       struct odp_flow_stats stats;
-       struct odp_flow_put fp;
-       int error;
-
-       if (compat_get_flow(&fp.flow, &ufp->flow) ||
-           get_user(fp.flags, &ufp->flags))
-               return -EFAULT;
-
-       error = do_put_flow(dp, &fp, &stats);
-       if (error)
-               return error;
-
-       if (copy_to_user(&ufp->flow.stats, &stats,
-                        sizeof(struct odp_flow_stats)))
-               return -EFAULT;
-
-       return 0;
-}
-
-static int compat_answer_query(struct datapath *dp, struct sw_flow *flow,
-                              u32 query_flags,
-                              struct compat_odp_flow __user *ufp)
-{
-       compat_uptr_t actions;
-
-       if (get_user(actions, &ufp->actions))
-               return -EFAULT;
-
-       return do_answer_query(dp, flow, query_flags, &ufp->stats,
-                              compat_ptr(actions), &ufp->actions_len);
-}
-
-static int compat_del_flow(struct datapath *dp, struct compat_odp_flow __user *ufp)
-{
-       struct sw_flow *flow;
-       struct odp_flow uf;
-       int error;
-
-       if (compat_get_flow(&uf, ufp))
-               return -EFAULT;
-
-       flow = do_del_flow(dp, (const struct nlattr __force __user *)uf.key, uf.key_len);
-       if (IS_ERR(flow))
-               return PTR_ERR(flow);
-
-       error = compat_answer_query(dp, flow, 0, ufp);
-       flow_deferred_free(flow);
-       return error;
-}
-
-static int compat_query_flows(struct datapath *dp,
-                             struct compat_odp_flow __user *flows,
-                             u32 n_flows)
-{
-       struct tbl *table = get_table_protected(dp);
-       u32 i;
-
-       for (i = 0; i < n_flows; i++) {
-               struct compat_odp_flow __user *ufp = &flows[i];
-               struct odp_flow uf;
-               struct tbl_node *flow_node;
-               struct sw_flow_key key;
-               int error;
-
-               if (compat_get_flow(&uf, ufp))
-                       return -EFAULT;
-
-               error = flow_copy_from_user(&key, (const struct nlattr __force __user *) uf.key, uf.key_len);
-               if (error)
-                       return error;
-
-               flow_node = tbl_lookup(table, &key, flow_hash(&key), flow_cmp);
-               if (!flow_node)
-                       error = put_user(ENOENT, &ufp->stats.error);
-               else
-                       error = compat_answer_query(dp, flow_cast(flow_node),
-                                                   uf.flags, ufp);
-               if (error)
-                       return -EFAULT;
-       }
-       return n_flows;
-}
-
-static int compat_dump_flow(struct datapath *dp, struct compat_odp_flow_dump __user *udumpp)
-{
-       struct compat_odp_flow __user *uflowp;
-       compat_uptr_t compat_ufp;
-       struct sw_flow *flow;
-       compat_uptr_t ukey;
-       u32 key_len;
-
-       flow = do_dump_flow(dp, udumpp->state);
-       if (IS_ERR(flow))
-               return PTR_ERR(flow);
-
-       if (get_user(compat_ufp, &udumpp->flow))
-               return -EFAULT;
-       uflowp = compat_ptr(compat_ufp);
-
-       if (!flow)
-               return put_user(ODPFF_EOF, &uflowp->flags);
-
-       if (put_user(0, &uflowp->flags) ||
-           get_user(ukey, &uflowp->key) ||
-           get_user(key_len, &uflowp->key_len))
-               return -EFAULT;
-
-       key_len = flow_copy_to_user(compat_ptr(ukey), &flow->key, key_len);
-       if (key_len < 0)
-               return key_len;
-       if (put_user(key_len, &uflowp->key_len))
-               return -EFAULT;
-
-       return compat_answer_query(dp, flow, 0, uflowp);
-}
-
-static int compat_flowvec_ioctl(struct datapath *dp, unsigned long argp,
-                               int (*function)(struct datapath *,
-                                               struct compat_odp_flow __user *,
-                                               u32 n_flows))
-{
-       struct compat_odp_flowvec __user *uflowvec;
-       struct compat_odp_flow __user *flows;
-       struct compat_odp_flowvec flowvec;
-       int retval;
-
-       uflowvec = compat_ptr(argp);
-       if (!access_ok(VERIFY_WRITE, uflowvec, sizeof(*uflowvec)) ||
-           copy_from_user(&flowvec, uflowvec, sizeof(flowvec)))
-               return -EFAULT;
-
-       if (flowvec.n_flows > INT_MAX / sizeof(struct compat_odp_flow))
-               return -EINVAL;
-
-       flows = compat_ptr(flowvec.flows);
-       if (!access_ok(VERIFY_WRITE, flows,
-                      flowvec.n_flows * sizeof(struct compat_odp_flow)))
-               return -EFAULT;
-
-       retval = function(dp, flows, flowvec.n_flows);
-       return (retval < 0 ? retval
-               : retval == flowvec.n_flows ? 0
-               : put_user(retval, &uflowvec->n_flows));
-}
-
-static int compat_execute(struct datapath *dp, const struct compat_odp_execute __user *uexecute)
-{
-       struct odp_execute execute;
-       compat_uptr_t actions;
-       compat_uptr_t data;
-
-       if (!access_ok(VERIFY_READ, uexecute, sizeof(struct compat_odp_execute)) ||
-           __get_user(actions, &uexecute->actions) ||
-           __get_user(execute.actions_len, &uexecute->actions_len) ||
-           __get_user(data, &uexecute->data) ||
-           __get_user(execute.length, &uexecute->length))
-               return -EFAULT;
-
-       execute.actions = (struct nlattr __force *)compat_ptr(actions);
-       execute.data = (const void __force *)compat_ptr(data);
-
-       return do_execute(dp, &execute);
-}
-
 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
 {
-       int dp_idx = iminor(f->f_dentry->d_inode);
-       struct datapath *dp;
-       int err;
-
        switch (cmd) {
-       case ODP_DP_DESTROY:
        case ODP_FLOW_FLUSH:
                /* Ioctls that don't need any translation at all. */
                return openvswitch_ioctl(f, cmd, argp);
 
-       case ODP_DP_CREATE:
-       case ODP_VPORT_ATTACH:
-       case ODP_VPORT_DETACH:
-       case ODP_VPORT_MOD:
-       case ODP_VPORT_MTU_SET:
-       case ODP_VPORT_MTU_GET:
-       case ODP_VPORT_ETHER_SET:
-       case ODP_VPORT_ETHER_GET:
-       case ODP_VPORT_STATS_SET:
-       case ODP_VPORT_STATS_GET:
-       case ODP_DP_STATS:
-       case ODP_GET_DROP_FRAGS:
-       case ODP_SET_DROP_FRAGS:
+       case ODP_DP_NEW:
+       case ODP_DP_GET:
+       case ODP_DP_DEL:
+       case ODP_DP_SET:
+       case ODP_DP_DUMP:
+       case ODP_VPORT_NEW:
+       case ODP_VPORT_DEL:
+       case ODP_VPORT_GET:
+       case ODP_VPORT_SET:
+       case ODP_VPORT_DUMP:
+       case ODP_FLOW_NEW:
+       case ODP_FLOW_DEL:
+       case ODP_FLOW_GET:
+       case ODP_FLOW_SET:
+       case ODP_FLOW_DUMP:
        case ODP_SET_LISTEN_MASK:
        case ODP_GET_LISTEN_MASK:
-       case ODP_SET_SFLOW_PROBABILITY:
-       case ODP_GET_SFLOW_PROBABILITY:
-       case ODP_VPORT_QUERY:
+       case ODP_EXECUTE:
                /* Ioctls that just need their pointer argument extended. */
                return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
-       }
-
-       dp = get_dp_locked(dp_idx);
-       err = -ENODEV;
-       if (!dp)
-               goto exit;
-
-       switch (cmd) {
-       case ODP_VPORT_DUMP32:
-               err = compat_dump_port(dp, compat_ptr(argp));
-               break;
-
-       case ODP_FLOW_PUT32:
-               err = compat_put_flow(dp, compat_ptr(argp));
-               break;
-
-       case ODP_FLOW_DEL32:
-               err = compat_del_flow(dp, compat_ptr(argp));
-               break;
-
-       case ODP_FLOW_GET32:
-               err = compat_flowvec_ioctl(dp, argp, compat_query_flows);
-               break;
-
-       case ODP_FLOW_DUMP32:
-               err = compat_dump_flow(dp, compat_ptr(argp));
-               break;
-
-       case ODP_EXECUTE32:
-               err = compat_execute(dp, compat_ptr(argp));
-               break;
 
        default:
-               err = -ENOIOCTLCMD;
-               break;
+               return -ENOIOCTLCMD;
        }
-       mutex_unlock(&dp->mutex);
-exit:
-       return err;
 }
 #endif