+static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
+ [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
+ [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
+};
+
+
+static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
+ struct sw_flow *flow, u32 total_len, u64 state)
+{
+ const struct sw_flow_actions *sf_acts;
+ struct odp_flow_stats stats;
+ struct odp_flow *odp_flow;
+ struct sk_buff *skb;
+ struct nlattr *nla;
+ unsigned long used;
+ u8 tcp_flags;
+ int err;
+
+ sf_acts = rcu_dereference_protected(flow->sf_acts,
+ lockdep_genl_is_held());
+
+ skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
+ err = -ENOMEM;
+ if (!skb)
+ goto exit;
+
+ odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
+ odp_flow->dp_idx = dp->dp_idx;
+ odp_flow->total_len = total_len;
+
+ nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
+ if (!nla)
+ goto nla_put_failure;
+ err = flow_to_nlattrs(&flow->key, skb);
+ if (err)
+ goto exit_free;
+ nla_nest_end(skb, nla);
+
+ nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
+ if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
+ goto nla_put_failure;
+ memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
+ nla_nest_end(skb, nla);
+
+ spin_lock_bh(&flow->lock);
+ used = flow->used;
+ stats.n_packets = flow->packet_count;
+ stats.n_bytes = flow->byte_count;
+ tcp_flags = flow->tcp_flags;
+ spin_unlock_bh(&flow->lock);
+
+ if (used)
+ NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
+
+ if (stats.n_packets)
+ NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
+
+ if (tcp_flags)
+ NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
+
+ if (state)
+ NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
+
+ if (skb->len > total_len)
+ goto nla_put_failure;
+
+ odp_flow->len = skb->len;
+ err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
+ goto exit_free;
+
+nla_put_failure:
+ err = -EMSGSIZE;
+exit_free:
+ kfree_skb(skb);
+exit:
+ return err;
+}
+
+/* Called with genl_mutex. */
+static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
+ struct dp_flowcmd *flowcmd)
+{
+ struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
+ struct odp_flow *odp_flow;
+ struct sk_buff *skb;
+ u32 len;
+ int err;
+
+ if (get_user(len, &uodp_flow->len))
+ return ERR_PTR(-EFAULT);
+ if (len < sizeof(struct odp_flow))
+ return ERR_PTR(-EINVAL);
+
+ skb = alloc_skb(len, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ err = -EFAULT;
+ if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
+ goto error_free_skb;
+
+ odp_flow = (struct odp_flow *)skb->data;
+ err = -EINVAL;
+ if (odp_flow->len != len)
+ goto error_free_skb;
+
+ flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
+ flowcmd->dp_idx = odp_flow->dp_idx;
+ flowcmd->total_len = odp_flow->total_len;
+
+ err = nla_parse(a, ODP_FLOW_ATTR_MAX,
+ (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
+ skb->len - sizeof(struct odp_flow), flow_policy);
+ if (err)
+ goto error_free_skb;
+
+ /* ODP_FLOW_ATTR_KEY. */
+ if (a[ODP_FLOW_ATTR_KEY]) {
+ err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
+ if (err)
+ goto error_free_skb;
+ } else
+ memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
+
+ /* ODP_FLOW_ATTR_ACTIONS. */
+ if (a[ODP_FLOW_ATTR_ACTIONS]) {
+ flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
+ flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
+ err = validate_actions(flowcmd->actions, flowcmd->actions_len);
+ if (err)
+ goto error_free_skb;
+ } else {
+ flowcmd->actions = NULL;
+ flowcmd->actions_len = 0;
+ }
+
+ flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
+
+ flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
+
+ return skb;
+
+error_free_skb:
+ kfree_skb(skb);
+ return ERR_PTR(err);
+}
+
+static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)