Fix problem with identifying SNAP frames when extracting flows.
[sliver-openvswitch.git] / datapath / datapath.c
index e75aaa5..3853195 100644 (file)
@@ -77,7 +77,7 @@ static DEFINE_MUTEX(dp_mutex);
 
 static int dp_maint_func(void *data);
 static int send_port_status(struct net_bridge_port *p, uint8_t status);
-
+static int dp_genl_openflow_done(struct netlink_callback *);
 
 /* nla_shrink - reduce amount of space reserved by nla_reserve
  * @skb: socket buffer from which to recover room
@@ -172,7 +172,7 @@ alloc_openflow_skb(struct datapath *dp, size_t openflow_len, uint8_t type,
 
        if ((openflow_len + sizeof(struct ofp_header)) > UINT16_MAX) {
                if (net_ratelimit())
-                       printk("alloc_openflow_skb: openflow message too large: %d\n", 
+                       printk("alloc_openflow_skb: openflow message too large: %zu\n", 
                                        openflow_len);
                return NULL;
        }
@@ -507,13 +507,17 @@ static void dp_frame_hook(struct sk_buff *skb)
 /* Forwarding output path.
  * Based on net/bridge/br_forward.c. */
 
-/* Don't forward packets to originating port or with flooding disabled */
+/* Don't forward packets to originating port.  If we're flooding,
+ * then don't send out ports with flooding disabled.
+ */
 static inline int should_deliver(const struct net_bridge_port *p,
-                       const struct sk_buff *skb)
+                       const struct sk_buff *skb, int flood)
 {
-       if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
+       if (skb->dev == p->dev)
+               return 0;
+
+       if (flood && (p->flags & BRIDGE_PORT_NO_FLOOD))
                return 0;
-       } 
 
        return 1;
 }
@@ -526,15 +530,18 @@ static inline unsigned packet_length(const struct sk_buff *skb)
        return length;
 }
 
+/* Send packets out all the ports except the originating one.  If the
+ * "flood" argument is set, only send along the minimum spanning tree.
+ */
 static int
-flood(struct datapath *dp, struct sk_buff *skb)
+output_all(struct datapath *dp, struct sk_buff *skb, int flood)
 {
        struct net_bridge_port *p;
        int prev_port;
 
        prev_port = -1;
        list_for_each_entry_rcu (p, &dp->port_list, node) {
-               if (!should_deliver(p, skb))
+               if (!should_deliver(p, skb, flood))
                        continue;
                if (prev_port != -1) {
                        struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
@@ -575,7 +582,9 @@ int dp_output_port(struct datapath *dp, struct sk_buff *skb, int out_port)
 
        BUG_ON(!skb);
        if (out_port == OFPP_FLOOD)
-               return flood(dp, skb);
+               return output_all(dp, skb, 1);
+       else if (out_port == OFPP_ALL)
+               return output_all(dp, skb, 0);
        else if (out_port == OFPP_CONTROLLER)
                return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
                                                  OFPR_ACTION);
@@ -838,31 +847,6 @@ dp_send_error_msg(struct datapath *dp, const struct sender *sender,
        return send_openflow_skb(skb, sender);
 }
 
-static void
-fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
-               int table_idx)
-{
-       ofs->match.wildcards = htons(flow->key.wildcards);
-       ofs->match.in_port   = flow->key.in_port;
-       memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
-       memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
-       ofs->match.dl_vlan   = flow->key.dl_vlan;
-       ofs->match.dl_type   = flow->key.dl_type;
-       ofs->match.nw_src    = flow->key.nw_src;
-       ofs->match.nw_dst    = flow->key.nw_dst;
-       ofs->match.nw_proto  = flow->key.nw_proto;
-       memset(ofs->match.pad, 0, sizeof ofs->match.pad);
-       ofs->match.tp_src    = flow->key.tp_src;
-       ofs->match.tp_dst    = flow->key.tp_dst;
-       ofs->duration        = htonl((jiffies - flow->init_time) / HZ);
-       ofs->packet_count    = cpu_to_be64(flow->packet_count);
-       ofs->byte_count      = cpu_to_be64(flow->byte_count);
-       ofs->priority        = htons(flow->priority);
-       ofs->max_idle        = htons(flow->max_idle);
-       ofs->table_id        = table_idx;
-       memset(ofs->pad, 0, sizeof ofs->pad);
-}
-
 /* Generic Netlink interface.
  *
  * See netlink(7) for an introduction to netlink.  See
@@ -1100,8 +1084,8 @@ struct flow_stats_state {
        struct sw_table_position position;
        const struct ofp_flow_stats_request *rq;
 
-       struct ofp_flow_stats *flows;
-       int n_flows, max_flows;
+       void *body;
+       int bytes_used, bytes_allocated;
 };
 
 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
@@ -1121,39 +1105,75 @@ static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
 {
        struct flow_stats_state *s = private;
+       struct ofp_flow_stats *ofs;
+       int actions_length;
+       int length;
+
+       actions_length = sizeof *ofs->actions * flow->n_actions;
+       length = sizeof *ofs + sizeof *ofs->actions * flow->n_actions;
+       if (length + s->bytes_used > s->bytes_allocated)
+               return 1;
+
+       ofs = s->body + s->bytes_used;
+       ofs->length          = htons(length);
+       ofs->table_id        = s->table_idx;
+       ofs->pad             = 0;
+       ofs->match.wildcards = htons(flow->key.wildcards);
+       ofs->match.in_port   = flow->key.in_port;
+       memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
+       memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
+       ofs->match.dl_vlan   = flow->key.dl_vlan;
+       ofs->match.dl_type   = flow->key.dl_type;
+       ofs->match.nw_src    = flow->key.nw_src;
+       ofs->match.nw_dst    = flow->key.nw_dst;
+       ofs->match.nw_proto  = flow->key.nw_proto;
+       memset(ofs->match.pad, 0, sizeof ofs->match.pad);
+       ofs->match.tp_src    = flow->key.tp_src;
+       ofs->match.tp_dst    = flow->key.tp_dst;
+       ofs->duration        = htonl((jiffies - flow->init_time) / HZ);
+       ofs->packet_count    = cpu_to_be64(flow->packet_count);
+       ofs->byte_count      = cpu_to_be64(flow->byte_count);
+       ofs->priority        = htons(flow->priority);
+       ofs->max_idle        = htons(flow->max_idle);
+       memcpy(ofs->actions, flow->actions, actions_length);
 
-       fill_flow_stats(&s->flows[s->n_flows], flow, s->table_idx);
-       return ++s->n_flows >= s->max_flows;
+       s->bytes_used += length;
+       return 0;
 }
 
 static int flow_stats_dump(struct datapath *dp, void *state,
                           void *body, int *body_len)
 {
        struct flow_stats_state *s = state;
-       struct ofp_flow_stats *ofs;
        struct sw_flow_key match_key;
+       int error = 0;
 
-       s->max_flows = *body_len / sizeof *ofs;
-       if (!s->max_flows)
-               return -ENOMEM;
-       s->flows = body;
+       s->bytes_used = 0;
+       s->bytes_allocated = *body_len;
+       s->body = body;
 
        flow_extract_match(&match_key, &s->rq->match);
-       s->n_flows = 0;
        while (s->table_idx < dp->chain->n_tables
               && (s->rq->table_id == 0xff || s->rq->table_id == s->table_idx))
        {
                struct sw_table *table = dp->chain->tables[s->table_idx];
 
-               if (table->iterate(table, &match_key, &s->position,
-                                  flow_stats_dump_callback, s))
+               error = table->iterate(table, &match_key, &s->position,
+                                      flow_stats_dump_callback, s);
+               if (error)
                        break;
 
                s->table_idx++;
                memset(&s->position, 0, sizeof s->position);
        }
-       *body_len = sizeof *ofs * s->n_flows;
-       return s->n_flows >= s->max_flows;
+       *body_len = s->bytes_used;
+
+       /* If error is 0, we're done.
+        * Otherwise, if some bytes were used, there are more flows to come.
+        * Otherwise, we were not able to fit even a single flow in the body,
+        * which indicates that we have a single flow with too many actions to
+        * fit.  We won't ever make any progress at that rate, so give up. */
+       return !error ? 0 : s->bytes_used ? 1 : -ENOMEM;
 }
 
 static void flow_stats_done(void *state)
@@ -1161,6 +1181,63 @@ static void flow_stats_done(void *state)
        kfree(state);
 }
 
+static int aggregate_stats_init(struct datapath *dp,
+                               const void *body, int body_len,
+                               void **state)
+{
+       *state = (void *)body;
+       return 0;
+}
+
+static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
+{
+       struct ofp_aggregate_stats_reply *rpy = private;
+       rpy->packet_count += flow->packet_count;
+       rpy->byte_count += flow->byte_count;
+       rpy->flow_count++;
+       return 0;
+}
+
+static int aggregate_stats_dump(struct datapath *dp, void *state,
+                               void *body, int *body_len)
+{
+       struct ofp_aggregate_stats_request *rq = state;
+       struct ofp_aggregate_stats_reply *rpy;
+       struct sw_table_position position;
+       struct sw_flow_key match_key;
+       int table_idx;
+
+       if (*body_len < sizeof *rpy)
+               return -ENOBUFS;
+       rpy = body;
+       *body_len = sizeof *rpy;
+
+       memset(rpy, 0, sizeof *rpy);
+
+       flow_extract_match(&match_key, &rq->match);
+       table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
+       memset(&position, 0, sizeof position);
+       while (table_idx < dp->chain->n_tables
+              && (rq->table_id == 0xff || rq->table_id == table_idx))
+       {
+               struct sw_table *table = dp->chain->tables[table_idx];
+               int error;
+
+               error = table->iterate(table, &match_key, &position,
+                                      aggregate_stats_dump_callback, rpy);
+               if (error)
+                       return error;
+
+               table_idx++;
+               memset(&position, 0, sizeof position);
+       }
+
+       rpy->packet_count = cpu_to_be64(rpy->packet_count);
+       rpy->byte_count = cpu_to_be64(rpy->byte_count);
+       rpy->flow_count = htonl(rpy->flow_count);
+       return 0;
+}
+
 static int table_stats_dump(struct datapath *dp, void *state,
                            void *body, int *body_len)
 {
@@ -1270,6 +1347,13 @@ static const struct stats_type stats[] = {
                flow_stats_dump,
                flow_stats_done
        },
+       [OFPST_AGGREGATE] = {
+               sizeof(struct ofp_aggregate_stats_request),
+               sizeof(struct ofp_aggregate_stats_request),
+               aggregate_stats_init,
+               aggregate_stats_dump,
+               NULL
+       },
        [OFPST_TABLE] = {
                0,
                0,
@@ -1298,6 +1382,11 @@ dp_genl_openflow_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
        void *body;
        int err;
 
+       /* Set up the cleanup function for this dump.  Linux 2.6.20 and later
+        * support setting up cleanup functions via the .doneit member of
+        * struct genl_ops.  This kluge supports earlier versions also. */
+       cb->done = dp_genl_openflow_done;
+
        rcu_read_lock();
        if (!cb->args[0]) {
                struct nlattr *attrs[DP_GENL_A_MAX + 1];
@@ -1416,7 +1505,6 @@ static struct genl_ops dp_genl_ops_openflow = {
        .policy = dp_genl_openflow_policy,
        .doit = dp_genl_openflow,
        .dumpit = dp_genl_openflow_dumpit,
-       .done = dp_genl_openflow_done,
 };
 
 static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {