+ err = -EINVAL;
+ if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
+ nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
+ goto exit_free_skb;
+
+ actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
+ actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
+ err = validate_actions(actions, actions_len);
+ if (err)
+ goto exit_free_skb;
+
+ packet = skb_clone(skb, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!packet)
+ goto exit_free_skb;
+ packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
+ packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
+
+ skb_reset_mac_header(packet);
+ eth = eth_hdr(packet);
+
+ /* Normally, setting the skb 'protocol' field would be handled by a
+ * call to eth_type_trans(), but it assumes there's a sending
+ * device, which we may not have. */
+ if (ntohs(eth->h_proto) >= 1536)
+ packet->protocol = eth->h_proto;
+ else
+ packet->protocol = htons(ETH_P_802_2);
+
+ err = flow_extract(packet, -1, &key, &is_frag);
+ if (err)
+ goto exit_free_skb;
+
+ rcu_read_lock();
+ dp = get_dp(odp_upcall->dp_idx);
+ err = -ENODEV;
+ if (dp)
+ err = execute_actions(dp, packet, &key, actions, actions_len);
+ rcu_read_unlock();
+
+exit_free_skb:
+ kfree_skb(skb);
+ return err;
+}
+
+static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
+{
+ int i;
+
+ stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
+ for_each_possible_cpu(i) {
+ const struct dp_stats_percpu *percpu_stats;
+ struct dp_stats_percpu local_stats;
+ unsigned seqcount;
+
+ percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
+
+ do {
+ seqcount = read_seqcount_begin(&percpu_stats->seqlock);
+ local_stats = *percpu_stats;
+ } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
+
+ stats->n_frags += local_stats.n_frags;
+ stats->n_hit += local_stats.n_hit;
+ stats->n_missed += local_stats.n_missed;
+ stats->n_lost += local_stats.n_lost;
+ }
+}
+
+/* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports */
+int dp_min_mtu(const struct datapath *dp)
+{
+ struct vport *p;
+ int mtu = 0;
+
+ ASSERT_RTNL();
+
+ list_for_each_entry_rcu (p, &dp->port_list, node) {
+ int dev_mtu;
+
+ /* Skip any internal ports, since that's what we're trying to
+ * set. */
+ if (is_internal_vport(p))
+ continue;
+
+ dev_mtu = vport_get_mtu(p);
+ if (!mtu || dev_mtu < mtu)
+ mtu = dev_mtu;
+ }
+
+ return mtu ? mtu : ETH_DATA_LEN;
+}
+
+/* Sets the MTU of all datapath devices to the minimum of the ports. Must
+ * be called with RTNL lock. */
+void set_internal_devs_mtu(const struct datapath *dp)
+{
+ struct vport *p;
+ int mtu;
+
+ ASSERT_RTNL();
+
+ mtu = dp_min_mtu(dp);
+
+ list_for_each_entry_rcu (p, &dp->port_list, node) {
+ if (is_internal_vport(p))
+ vport_set_mtu(p, mtu);
+ }
+}
+
+static int get_listen_mask(const struct file *f)
+{
+ return (long)f->private_data;
+}
+
+static void set_listen_mask(struct file *f, int listen_mask)
+{
+ f->private_data = (void*)(long)listen_mask;
+}
+
+static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
+ [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+ [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
+};
+
+static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
+ struct sw_flow *flow, u32 total_len, u64 state)
+{
+ const struct sw_flow_actions *sf_acts;
+ struct odp_flow_stats stats;
+ struct odp_flow *odp_flow;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ unsigned long used;
+ u8 tcp_flags;
+ int err;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_is_held(&dp->mutex));
+
+ skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!skb)
+ goto exit;
+
+ rcu_read_lock();
+ odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
+ odp_flow->dp_idx = dp->dp_idx;
+ odp_flow->total_len = total_len;
+
+ nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
+ if (!nla)
+ goto nla_put_failure;
+ err = flow_to_nlattrs(&flow->key, skb);
+ if (err)
+ goto exit_unlock;
+ nla_nest_end(skb, nla);
+
+ nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
+ if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
+ goto nla_put_failure;
+ memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
+ nla_nest_end(skb, nla);
+
+ spin_lock_bh(&flow->lock);
+ used = flow->used;
+ stats.n_packets = flow->packet_count;
+ stats.n_bytes = flow->byte_count;
+ tcp_flags = flow->tcp_flags;
+ spin_unlock_bh(&flow->lock);
+
+ if (used)
+ NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
+
+ if (stats.n_packets)
+ NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
+
+ if (tcp_flags)
+ NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+ if (state)
+ NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
+
+ if (skb->len > total_len)
+ goto nla_put_failure;
+
+ odp_flow->len = skb->len;
+ err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+ goto exit_unlock;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+exit_unlock:
+ rcu_read_unlock();
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
+ struct dp_flowcmd *flowcmd)
+{
+ struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
+ struct odp_flow *odp_flow;
+ struct sk_buff *skb;
+ u32 len;
+ int err;
+
+ if (get_user(len, &uodp_flow->len))
+ return ERR_PTR(-EFAULT);
+ if (len < sizeof(struct odp_flow))
+ return ERR_PTR(-EINVAL);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ err = -EFAULT;
+ if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
+ goto error_free_skb;
+
+ odp_flow = (struct odp_flow *)skb->data;
+ err = -EINVAL;
+ if (odp_flow->len != len)
+ goto error_free_skb;
+
+ flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
+ flowcmd->dp_idx = odp_flow->dp_idx;
+ flowcmd->total_len = odp_flow->total_len;
+
+ err = nla_parse(a, ODP_FLOW_ATTR_MAX,
+ (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
+ skb->len - sizeof(struct odp_flow), flow_policy);
+ if (err)
+ goto error_free_skb;
+
+ /* ODP_FLOW_ATTR_KEY. */
+ if (a[ODP_FLOW_ATTR_KEY]) {
+ err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
+ if (err)
+ goto error_free_skb;
+ } else
+ memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
+
+ /* ODP_FLOW_ATTR_ACTIONS. */
+ if (a[ODP_FLOW_ATTR_ACTIONS]) {
+ flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
+ flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
+ err = validate_actions(flowcmd->actions, flowcmd->actions_len);
+ if (err)
+ goto error_free_skb;
+ } else {
+ flowcmd->actions = NULL;
+ flowcmd->actions_len = 0;
+ }
+
+ flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
+
+ flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
+
+ return skb;
+
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
+{
+ struct tbl_node *flow_node;
+ struct dp_flowcmd flowcmd;
+ struct sw_flow *flow;
+ struct sk_buff *skb;
+ struct datapath *dp;
+ struct tbl *table;
+ u32 hash;
+ int error;
+
+ skb = copy_flow_from_user(uodp_flow, &flowcmd);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto exit;
+
+ dp = get_dp_locked(flowcmd.dp_idx);
+ error = -ENODEV;
+ if (!dp)
+ goto error_kfree_skb;
+
+ hash = flow_hash(&flowcmd.key);
+ table = get_table_protected(dp);
+ flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
+ if (!flow_node) {
+ struct sw_flow_actions *acts;
+
+ /* Bail out if we're not allowed to create a new flow. */
+ error = -ENOENT;
+ if (cmd == ODP_FLOW_SET)
+ goto error_unlock_dp;
+
+ /* Expand table, if necessary, to make room. */
+ if (tbl_count(table) >= tbl_n_buckets(table)) {
+ error = expand_table(dp);
+ if (error)
+ goto error_unlock_dp;
+ table = get_table_protected(dp);
+ }
+
+ /* Allocate flow. */
+ flow = flow_alloc();
+ if (IS_ERR(flow)) {
+ error = PTR_ERR(flow);
+ goto error_unlock_dp;
+ }
+ flow->key = flowcmd.key;
+ clear_stats(flow);
+
+ /* Obtain actions. */
+ acts = get_actions(&flowcmd);
+ error = PTR_ERR(acts);
+ if (IS_ERR(acts))
+ goto error_free_flow;
+ rcu_assign_pointer(flow->sf_acts, acts);
+
+ error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);