static int dp_maint_func(void *data);
static int send_port_status(struct net_bridge_port *p, uint8_t status);
-
+static int dp_genl_openflow_done(struct netlink_callback *);
/* nla_shrink - reduce amount of space reserved by nla_reserve
* @skb: socket buffer from which to recover room
if ((openflow_len + sizeof(struct ofp_header)) > UINT16_MAX) {
if (net_ratelimit())
- printk("alloc_openflow_skb: openflow message too large: %d\n",
+ printk("alloc_openflow_skb: openflow message too large: %zu\n",
openflow_len);
return NULL;
}
/* Forwarding output path.
* Based on net/bridge/br_forward.c. */
-/* Don't forward packets to originating port or with flooding disabled */
+/* Don't forward packets to originating port. If we're flooding,
+ * then don't send out ports with flooding disabled.
+ */
static inline int should_deliver(const struct net_bridge_port *p,
- const struct sk_buff *skb)
+ const struct sk_buff *skb, int flood)
{
- if ((skb->dev == p->dev) || (p->flags & BRIDGE_PORT_NO_FLOOD)) {
+ if (skb->dev == p->dev)
+ return 0;
+
+ if (flood && (p->flags & BRIDGE_PORT_NO_FLOOD))
return 0;
- }
return 1;
}
return length;
}
+/* Send packets out all the ports except the originating one. If the
+ * "flood" argument is set, only send along the minimum spanning tree.
+ */
static int
-flood(struct datapath *dp, struct sk_buff *skb)
+output_all(struct datapath *dp, struct sk_buff *skb, int flood)
{
struct net_bridge_port *p;
int prev_port;
prev_port = -1;
list_for_each_entry_rcu (p, &dp->port_list, node) {
- if (!should_deliver(p, skb))
+ if (!should_deliver(p, skb, flood))
continue;
if (prev_port != -1) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
BUG_ON(!skb);
if (out_port == OFPP_FLOOD)
- return flood(dp, skb);
+ return output_all(dp, skb, 1);
+ else if (out_port == OFPP_ALL)
+ return output_all(dp, skb, 0);
else if (out_port == OFPP_CONTROLLER)
return dp_output_control(dp, skb, fwd_save_skb(skb), 0,
OFPR_ACTION);
return send_openflow_skb(skb, sender);
}
-static void
-fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
- int table_idx)
-{
- ofs->match.wildcards = htons(flow->key.wildcards);
- ofs->match.in_port = flow->key.in_port;
- memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
- memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
- ofs->match.dl_vlan = flow->key.dl_vlan;
- ofs->match.dl_type = flow->key.dl_type;
- ofs->match.nw_src = flow->key.nw_src;
- ofs->match.nw_dst = flow->key.nw_dst;
- ofs->match.nw_proto = flow->key.nw_proto;
- memset(ofs->match.pad, 0, sizeof ofs->match.pad);
- ofs->match.tp_src = flow->key.tp_src;
- ofs->match.tp_dst = flow->key.tp_dst;
- ofs->duration = htonl((jiffies - flow->init_time) / HZ);
- ofs->packet_count = cpu_to_be64(flow->packet_count);
- ofs->byte_count = cpu_to_be64(flow->byte_count);
- ofs->priority = htons(flow->priority);
- ofs->max_idle = htons(flow->max_idle);
- ofs->table_id = table_idx;
- memset(ofs->pad, 0, sizeof ofs->pad);
-}
-
/* Generic Netlink interface.
*
* See netlink(7) for an introduction to netlink. See
struct sw_table_position position;
const struct ofp_flow_stats_request *rq;
- struct ofp_flow_stats *flows;
- int n_flows, max_flows;
+ void *body;
+ int bytes_used, bytes_allocated;
};
static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
{
struct flow_stats_state *s = private;
+ struct ofp_flow_stats *ofs;
+ int actions_length;
+ int length;
+
+ actions_length = sizeof *ofs->actions * flow->n_actions;
+ length = sizeof *ofs + sizeof *ofs->actions * flow->n_actions;
+ if (length + s->bytes_used > s->bytes_allocated)
+ return 1;
+
+ ofs = s->body + s->bytes_used;
+ ofs->length = htons(length);
+ ofs->table_id = s->table_idx;
+ ofs->pad = 0;
+ ofs->match.wildcards = htons(flow->key.wildcards);
+ ofs->match.in_port = flow->key.in_port;
+ memcpy(ofs->match.dl_src, flow->key.dl_src, ETH_ALEN);
+ memcpy(ofs->match.dl_dst, flow->key.dl_dst, ETH_ALEN);
+ ofs->match.dl_vlan = flow->key.dl_vlan;
+ ofs->match.dl_type = flow->key.dl_type;
+ ofs->match.nw_src = flow->key.nw_src;
+ ofs->match.nw_dst = flow->key.nw_dst;
+ ofs->match.nw_proto = flow->key.nw_proto;
+ memset(ofs->match.pad, 0, sizeof ofs->match.pad);
+ ofs->match.tp_src = flow->key.tp_src;
+ ofs->match.tp_dst = flow->key.tp_dst;
+ ofs->duration = htonl((jiffies - flow->init_time) / HZ);
+ ofs->packet_count = cpu_to_be64(flow->packet_count);
+ ofs->byte_count = cpu_to_be64(flow->byte_count);
+ ofs->priority = htons(flow->priority);
+ ofs->max_idle = htons(flow->max_idle);
+ memcpy(ofs->actions, flow->actions, actions_length);
- fill_flow_stats(&s->flows[s->n_flows], flow, s->table_idx);
- return ++s->n_flows >= s->max_flows;
+ s->bytes_used += length;
+ return 0;
}
static int flow_stats_dump(struct datapath *dp, void *state,
void *body, int *body_len)
{
struct flow_stats_state *s = state;
- struct ofp_flow_stats *ofs;
struct sw_flow_key match_key;
+ int error = 0;
- s->max_flows = *body_len / sizeof *ofs;
- if (!s->max_flows)
- return -ENOMEM;
- s->flows = body;
+ s->bytes_used = 0;
+ s->bytes_allocated = *body_len;
+ s->body = body;
flow_extract_match(&match_key, &s->rq->match);
- s->n_flows = 0;
while (s->table_idx < dp->chain->n_tables
&& (s->rq->table_id == 0xff || s->rq->table_id == s->table_idx))
{
struct sw_table *table = dp->chain->tables[s->table_idx];
- if (table->iterate(table, &match_key, &s->position,
- flow_stats_dump_callback, s))
+ error = table->iterate(table, &match_key, &s->position,
+ flow_stats_dump_callback, s);
+ if (error)
break;
s->table_idx++;
memset(&s->position, 0, sizeof s->position);
}
- *body_len = sizeof *ofs * s->n_flows;
- return s->n_flows >= s->max_flows;
+ *body_len = s->bytes_used;
+
+ /* If error is 0, we're done.
+ * Otherwise, if some bytes were used, there are more flows to come.
+ * Otherwise, we were not able to fit even a single flow in the body,
+ * which indicates that we have a single flow with too many actions to
+ * fit. We won't ever make any progress at that rate, so give up. */
+ return !error ? 0 : s->bytes_used ? 1 : -ENOMEM;
}
static void flow_stats_done(void *state)
kfree(state);
}
+static int aggregate_stats_init(struct datapath *dp,
+ const void *body, int body_len,
+ void **state)
+{
+ *state = (void *)body;
+ return 0;
+}
+
+static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
+{
+ struct ofp_aggregate_stats_reply *rpy = private;
+ rpy->packet_count += flow->packet_count;
+ rpy->byte_count += flow->byte_count;
+ rpy->flow_count++;
+ return 0;
+}
+
+static int aggregate_stats_dump(struct datapath *dp, void *state,
+ void *body, int *body_len)
+{
+ struct ofp_aggregate_stats_request *rq = state;
+ struct ofp_aggregate_stats_reply *rpy;
+ struct sw_table_position position;
+ struct sw_flow_key match_key;
+ int table_idx;
+
+ if (*body_len < sizeof *rpy)
+ return -ENOBUFS;
+ rpy = body;
+ *body_len = sizeof *rpy;
+
+ memset(rpy, 0, sizeof *rpy);
+
+ flow_extract_match(&match_key, &rq->match);
+ table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
+ memset(&position, 0, sizeof position);
+ while (table_idx < dp->chain->n_tables
+ && (rq->table_id == 0xff || rq->table_id == table_idx))
+ {
+ struct sw_table *table = dp->chain->tables[table_idx];
+ int error;
+
+ error = table->iterate(table, &match_key, &position,
+ aggregate_stats_dump_callback, rpy);
+ if (error)
+ return error;
+
+ table_idx++;
+ memset(&position, 0, sizeof position);
+ }
+
+ rpy->packet_count = cpu_to_be64(rpy->packet_count);
+ rpy->byte_count = cpu_to_be64(rpy->byte_count);
+ rpy->flow_count = htonl(rpy->flow_count);
+ return 0;
+}
+
static int table_stats_dump(struct datapath *dp, void *state,
void *body, int *body_len)
{
flow_stats_dump,
flow_stats_done
},
+ [OFPST_AGGREGATE] = {
+ sizeof(struct ofp_aggregate_stats_request),
+ sizeof(struct ofp_aggregate_stats_request),
+ aggregate_stats_init,
+ aggregate_stats_dump,
+ NULL
+ },
[OFPST_TABLE] = {
0,
0,
void *body;
int err;
+ /* Set up the cleanup function for this dump. Linux 2.6.20 and later
+ * support setting up cleanup functions via the .doneit member of
+ * struct genl_ops. This kluge supports earlier versions also. */
+ cb->done = dp_genl_openflow_done;
+
rcu_read_lock();
if (!cb->args[0]) {
struct nlattr *attrs[DP_GENL_A_MAX + 1];
.policy = dp_genl_openflow_policy,
.doit = dp_genl_openflow,
.dumpit = dp_genl_openflow_dumpit,
- .done = dp_genl_openflow_done,
};
static struct nla_policy dp_genl_benchmark_policy[DP_GENL_A_MAX + 1] = {