lib/xflow-util.c \
ofproto/ofproto.c \
ofproto/pktbuf.c \
+ ofproto/wdp.c \
+ ofproto/wdp-xflow.c \
vswitchd/bridge.c \
vswitchd/ovs-brcompatd.c
lib/coverage-counters.c: $(COVERAGE_FILES) lib/coverage-scan.pl
/* Converts the flow in 'flow' into a cls_rule in 'rule', with the given
* 'wildcards' and 'priority'.*/
void
-cls_rule_from_flow(struct cls_rule *rule, const flow_t *flow,
- uint32_t wildcards, unsigned int priority)
+cls_rule_from_flow(struct cls_rule *rule, const flow_t *flow)
{
rule->flow = *flow;
- flow_wildcards_init(&rule->wc, wildcards);
- rule->priority = priority;
- rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
+ if (!rule->flow.wildcards && rule->flow.priority < UINT16_MAX) {
+ rule->flow.priority = UINT16_MAX;
+ }
+ flow_wildcards_init(&rule->wc, flow->wildcards);
+ rule->table_idx = table_idx_from_wildcards(flow->wildcards);
}
/* Converts the ofp_match in 'match' into a cls_rule in 'rule', with the given
* 'priority'. */
void
-cls_rule_from_match(struct cls_rule *rule, const struct ofp_match *match,
- unsigned int priority)
+cls_rule_from_match(struct cls_rule *rule, unsigned int priority,
+ const struct ofp_match *match)
{
- uint32_t wildcards;
- flow_from_match(&rule->flow, &wildcards, match);
- flow_wildcards_init(&rule->wc, wildcards);
- rule->priority = rule->wc.wildcards ? priority : UINT16_MAX;
- rule->table_idx = table_idx_from_wildcards(rule->wc.wildcards);
+ flow_from_match(&rule->flow,
+ rule->flow.wildcards ? priority : UINT16_MAX,
+ match);
+ flow_wildcards_init(&rule->wc, rule->flow.wildcards);
+ rule->table_idx = table_idx_from_wildcards(rule->flow.wildcards);
}
/* Converts 'rule' to a string and returns the string. The caller must free
{
struct ds s = DS_EMPTY_INITIALIZER;
ds_put_format(&s, "wildcards=%x priority=%u ",
- rule->wc.wildcards, rule->priority);
+ rule->flow.wildcards, rule->flow.priority);
flow_format(&s, &rule->flow);
return ds_cstr(&s);
}
void
cls_rule_print(const struct cls_rule *rule)
{
- printf("wildcards=%x priority=%u ", rule->wc.wildcards, rule->priority);
+ printf("wildcards=%x priority=%u ",
+ rule->flow.wildcards, rule->flow.priority);
flow_print(stdout, &rule->flow);
putc('\n', stdout);
}
struct cls_rule *new)
{
if (old != new) {
- if (new->wc.wildcards) {
+ if (new->flow.wildcards) {
list_moved(&new->node.list);
} else {
hmap_node_moved(&cls->exact_table,
struct cls_rule *new)
{
assert(old != new);
- assert(old->wc.wildcards == new->wc.wildcards);
- assert(old->priority == new->priority);
+ assert(old->flow.wildcards == new->flow.wildcards);
+ assert(old->flow.priority == new->flow.priority);
- if (new->wc.wildcards) {
+ if (new->flow.wildcards) {
list_replace(&new->node.list, &old->node.list);
} else {
hmap_replace(&cls->exact_table, &old->node.hmap, &new->node.hmap);
return hmap_count(&cls->exact_table);
}
+/* Returns the number of rules in 'classifier' that have at least one
+ * wildcard. */
+int
+classifier_count_wild(const struct classifier *cls)
+{
+ return classifier_count(cls) - classifier_count_exact(cls);
+}
+
/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
*
* If 'cls' already contains an identical rule (including wildcards, values of
classifier_insert(struct classifier *cls, struct cls_rule *rule)
{
struct cls_rule *old;
- assert((rule->wc.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
- old = (rule->wc.wildcards
+ assert((rule->flow.wildcards == 0) == (rule->table_idx == CLS_F_IDX_EXACT));
+ old = (rule->flow.wildcards
? table_insert(&cls->tables[rule->table_idx], rule)
: insert_exact_rule(cls, rule));
if (!old) {
/* Inserts 'rule' into 'cls'. Transfers ownership of 'rule' to 'cls'.
*
- * 'rule' must be an exact-match rule (rule->wc.wildcards must be 0) and 'cls'
+ * 'rule' must be an exact-match rule (rule->flow.wildcards must be 0) and 'cls'
* must not contain any rule with an identical key. */
void
classifier_insert_exact(struct classifier *cls, struct cls_rule *rule)
void
classifier_remove(struct classifier *cls, struct cls_rule *rule)
{
- if (rule->wc.wildcards) {
+ if (rule->flow.wildcards) {
/* Remove 'rule' from bucket. If that empties the bucket, remove the
* bucket from its table. */
struct hmap *table = &cls->tables[rule->table_idx];
struct cls_rule target;
int i;
- cls_rule_from_flow(&target, flow, 0, 0);
+ cls_rule_from_flow(&target, flow);
for (i = 0; i < CLS_N_FIELDS; i++) {
struct cls_rule *rule = search_table(&cls->tables[i], i, &target);
- if (rule && (!best || rule->priority > best->priority)) {
+ if (rule && (!best || rule->flow.priority > best->flow.priority)) {
best = rule;
}
}
struct cls_rule *
classifier_find_rule_exactly(const struct classifier *cls,
- const flow_t *target, uint32_t wildcards,
- unsigned int priority)
+ const flow_t *target)
{
struct cls_bucket *bucket;
int table_idx;
uint32_t hash;
- if (!wildcards) {
+ if (!target->wildcards) {
/* Ignores 'priority'. */
return search_exact_table(cls, flow_hash(target, 0), target);
}
- assert(wildcards == (wildcards & OFPFW_ALL));
- table_idx = table_idx_from_wildcards(wildcards);
+ assert(target->wildcards == (target->wildcards & OFPFW_ALL));
+ table_idx = table_idx_from_wildcards(target->wildcards);
hash = hash_fields(target, table_idx);
HMAP_FOR_EACH_WITH_HASH (bucket, struct cls_bucket, hmap_node, hash,
&cls->tables[table_idx]) {
if (equal_fields(&bucket->fixed, target, table_idx)) {
struct cls_rule *pos;
LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
- if (pos->priority < priority) {
+ if (pos->flow.priority < target->priority) {
return NULL;
- } else if (pos->priority == priority &&
- pos->wc.wildcards == wildcards &&
+ } else if (pos->flow.priority == target->priority &&
+ pos->flow.wildcards == target->wildcards &&
flow_equal(target, &pos->flow)) {
return pos;
}
return NULL;
}
-/* Checks if the flow defined by 'target' with 'wildcards' at 'priority'
- * overlaps with any other rule at the same priority in the classifier.
- * Two rules are considered overlapping if a packet could match both. */
+/* Checks if the flow defined by 'target' overlaps with any other rule at the
+ * same priority in the classifier. Two rules are considered overlapping if a
+ * packet could match both. */
bool
-classifier_rule_overlaps(const struct classifier *cls,
- const flow_t *target, uint32_t wildcards,
- unsigned int priority)
+classifier_rule_overlaps(const struct classifier *cls, const flow_t *target)
{
struct cls_rule target_rule;
const struct hmap *tbl;
- if (!wildcards) {
+ if (!target->wildcards) {
return search_exact_table(cls, flow_hash(target, 0), target) ?
true : false;
}
- cls_rule_from_flow(&target_rule, target, wildcards, priority);
+ cls_rule_from_flow(&target_rule, target);
for (tbl = &cls->tables[0]; tbl < &cls->tables[CLS_N_FIELDS]; tbl++) {
struct cls_bucket *bucket;
LIST_FOR_EACH (rule, struct cls_rule, node.list,
&bucket->rules) {
- if (rule->priority == priority
+ if (rule->flow.priority == target->priority
&& rules_match_2wild(rule, &target_rule, 0)) {
return true;
}
return false;
}
-/* Ignores target->priority.
+/* Ignores target->flow.priority.
*
* 'callback' is allowed to delete the rule that is passed as its argument, but
* it must not delete (or move) any other rules in 'cls' that are in the same
* wildcards and an exact-match rule will never be in the same table. */
void
classifier_for_each_match(const struct classifier *cls,
- const struct cls_rule *target,
+ const flow_t *target_flow,
int include, cls_cb_func *callback, void *aux)
{
+ struct cls_rule target;
+
+ cls_rule_from_flow(&target, target_flow);
if (include & CLS_INC_WILD) {
const struct hmap *table;
prev_rule = NULL;
LIST_FOR_EACH (rule, struct cls_rule, node.list,
&bucket->rules) {
- if (rules_match_1wild(rule, target, 0)) {
+ if (rules_match_1wild(rule, &target, 0)) {
if (prev_rule) {
callback(prev_rule, aux);
}
}
if (include & CLS_INC_EXACT) {
- if (target->wc.wildcards) {
+ if (target.flow.wildcards) {
struct cls_rule *rule, *next_rule;
HMAP_FOR_EACH_SAFE (rule, next_rule, struct cls_rule, node.hmap,
&cls->exact_table) {
- if (rules_match_1wild(rule, target, 0)) {
+ if (rules_match_1wild(rule, &target, 0)) {
callback(rule, aux);
}
}
} else {
/* Optimization: there can be at most one match in the exact
* table. */
- size_t hash = flow_hash(&target->flow, 0);
+ size_t hash = flow_hash(&target.flow, 0);
struct cls_rule *rule = search_exact_table(cls, hash,
- &target->flow);
+ &target.flow);
if (rule) {
callback(rule, aux);
}
{
struct cls_rule *pos;
LIST_FOR_EACH (pos, struct cls_rule, node.list, &bucket->rules) {
- if (pos->priority == rule->priority) {
- if (pos->wc.wildcards == rule->wc.wildcards
+ if (pos->flow.priority == rule->flow.priority) {
+ if (pos->flow.wildcards == rule->flow.wildcards
&& rules_match_1wild(pos, rule, rule->table_idx))
{
list_replace(&rule->node.list, &pos->node.list);
return pos;
}
- } else if (pos->priority < rule->priority) {
+ } else if (pos->flow.priority < rule->flow.priority) {
break;
}
}
rules_match_1wild(const struct cls_rule *fixed, const struct cls_rule *wild,
int field_idx)
{
- return rules_match(fixed, wild, wild->wc.wildcards, wild->wc.nw_src_mask,
+ return rules_match(fixed, wild, wild->flow.wildcards, wild->wc.nw_src_mask,
wild->wc.nw_dst_mask, field_idx);
}
int field_idx)
{
return rules_match(wild1, wild2,
- wild1->wc.wildcards | wild2->wc.wildcards,
+ wild1->flow.wildcards | wild2->flow.wildcards,
wild1->wc.nw_src_mask & wild2->wc.nw_src_mask,
wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask,
field_idx);
} node;
flow_t flow; /* All field values. */
struct flow_wildcards wc; /* Wildcards for fields. */
- unsigned int priority; /* Larger numbers are higher priorities. */
unsigned int table_idx; /* Index into struct classifier 'tables'. */
};
-void cls_rule_from_flow(struct cls_rule *, const flow_t *, uint32_t wildcards,
- unsigned int priority);
-void cls_rule_from_match(struct cls_rule *, const struct ofp_match *,
- unsigned int priority);
+void cls_rule_from_flow(struct cls_rule *, const flow_t *);
+void cls_rule_from_match(struct cls_rule *, unsigned int priority,
+ const struct ofp_match *);
char *cls_rule_to_string(const struct cls_rule *);
void cls_rule_print(const struct cls_rule *);
void cls_rule_moved(struct classifier *,
bool classifier_is_empty(const struct classifier *);
int classifier_count(const struct classifier *);
int classifier_count_exact(const struct classifier *);
+int classifier_count_wild(const struct classifier *);
struct cls_rule *classifier_insert(struct classifier *, struct cls_rule *);
void classifier_insert_exact(struct classifier *, struct cls_rule *);
void classifier_remove(struct classifier *, struct cls_rule *);
const flow_t *);
struct cls_rule *classifier_lookup_exact(const struct classifier *,
const flow_t *);
-bool classifier_rule_overlaps(const struct classifier *, const flow_t *,
- uint32_t wildcards, unsigned int priority);
+bool classifier_rule_overlaps(const struct classifier *, const flow_t *);
typedef void cls_cb_func(struct cls_rule *, void *aux);
};
void classifier_for_each(const struct classifier *, int include,
cls_cb_func *, void *aux);
-void classifier_for_each_match(const struct classifier *,
- const struct cls_rule *,
+void classifier_for_each_match(const struct classifier *, const flow_t *,
int include, cls_cb_func *, void *aux);
struct cls_rule *classifier_find_rule_exactly(const struct classifier *,
- const flow_t *target,
- uint32_t wildcards,
- unsigned int priority);
+ const flow_t *target);
#endif /* classifier.h */
/* Extract 'flow' with 'wildcards' into the OpenFlow match structure
* 'match'. */
void
-flow_to_match(const flow_t *flow, uint32_t wildcards, struct ofp_match *match)
+flow_to_match(const flow_t *flow, struct ofp_match *match)
{
- match->wildcards = htonl(wildcards);
+ match->wildcards = htonl(flow->wildcards);
match->in_port = htons(flow->in_port == XFLOWP_LOCAL ? OFPP_LOCAL
: flow->in_port);
match->dl_vlan = flow->dl_vlan;
}
void
-flow_from_match(flow_t *flow, uint32_t *wildcards,
+flow_from_match(flow_t *flow, unsigned int priority,
const struct ofp_match *match)
{
- if (wildcards) {
- *wildcards = ntohl(match->wildcards);
- }
+ flow->wildcards = ntohl(match->wildcards);
+ flow->priority = priority;
flow->nw_src = match->nw_src;
flow->nw_dst = match->nw_dst;
flow->in_port = (match->in_port == htons(OFPP_LOCAL) ? XFLOWP_LOCAL
void
flow_format(struct ds *ds, const flow_t *flow)
{
- ds_put_format(ds, "in_port%04x:vlan%d:pcp%d mac"ETH_ADDR_FMT
+ ds_put_format(ds, "wild%08"PRIx32" pri%"PRIu32" "
+ "in_port%04x:vlan%d:pcp%d mac"ETH_ADDR_FMT
"->"ETH_ADDR_FMT" type%04x proto%"PRId8" tos%"PRIu8
" ip"IP_FMT"->"IP_FMT" port%d->%d",
+ flow->wildcards, flow->priority,
flow->in_port, ntohs(flow->dl_vlan), flow->dl_vlan_pcp,
ETH_ADDR_ARGS(flow->dl_src), ETH_ADDR_ARGS(flow->dl_dst),
ntohs(flow->dl_type), flow->nw_proto, flow->nw_tos,
typedef struct flow flow_t;
struct flow {
+ uint32_t wildcards; /* Wildcards. */
+ uint32_t priority; /* Priority. */
uint32_t nw_src; /* IP source address. */
uint32_t nw_dst; /* IP destination address. */
uint16_t in_port; /* Input switch port. */
/* Assert that there are FLOW_SIG_SIZE bytes of significant data in "struct
* flow", followed by FLOW_PAD_SIZE bytes of padding. */
-#define FLOW_SIG_SIZE 33
+#define FLOW_SIG_SIZE 41
#define FLOW_PAD_SIZE 3
BUILD_ASSERT_DECL(offsetof(struct flow, nw_tos) == FLOW_SIG_SIZE - 1);
BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->nw_tos) == 1);
int flow_extract(struct ofpbuf *, uint16_t in_port, flow_t *);
void flow_extract_stats(const flow_t *flow, struct ofpbuf *packet,
struct xflow_flow_stats *stats);
-void flow_to_match(const flow_t *, uint32_t wildcards, struct ofp_match *);
-void flow_from_match(flow_t *, uint32_t *wildcards, const struct ofp_match *);
+void flow_to_match(const flow_t *, struct ofp_match *);
+void flow_from_match(flow_t *, unsigned int priority,
+ const struct ofp_match *);
char *flow_to_string(const flow_t *);
void flow_format(struct ds *, const flow_t *);
void flow_print(FILE *, const flow_t *);
/* Information on wildcards for a flow, as a supplement to flow_t. */
struct flow_wildcards {
- uint32_t wildcards; /* enum ofp_flow_wildcards (in host order). */
uint32_t nw_src_mask; /* 1-bit in each significant nw_src bit. */
uint32_t nw_dst_mask; /* 1-bit in each significant nw_dst bit. */
};
static inline void
flow_wildcards_init(struct flow_wildcards *wc, uint32_t wildcards)
{
- wc->wildcards = wildcards & OFPFW_ALL;
- wc->nw_src_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_SRC_SHIFT);
- wc->nw_dst_mask = flow_nw_bits_to_mask(wc->wildcards, OFPFW_NW_DST_SHIFT);
+ wildcards &= OFPFW_ALL;
+ wc->nw_src_mask = flow_nw_bits_to_mask(wildcards, OFPFW_NW_SRC_SHIFT);
+ wc->nw_dst_mask = flow_nw_bits_to_mask(wildcards, OFPFW_NW_DST_SHIFT);
}
#endif /* flow.h */
packet.data = (void *) op->data;
packet.size = data_len;
flow_extract(&packet, ntohs(op->in_port), &flow);
- flow_to_match(&flow, 0, &match);
+ flow_to_match(&flow, &match);
ofp_print_match(string, &match, verbosity);
ds_put_char(string, '\n');
}
/*
- * Copyright (c) 2008, 2009 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
ofpbuf_trim(struct ofpbuf *b)
{
/* XXX These could be supported, but the current client doesn't care. */
- assert(b->data == b->base);
assert(b->l2 == NULL && b->l3 == NULL && b->l4 == NULL && b->l7 == NULL);
if (b->allocated > b->size) {
- b->base = b->data = xrealloc(b->base, b->size);
+ b->base = b->data = xrealloc(b->data, b->size);
b->allocated = b->size;
}
}
/*
- * Copyright (c) 2008, 2009 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
}
}
+void
+svec_move(struct svec *svec, struct svec *other)
+{
+ size_t i;
+ for (i = 0; i < other->n; i++) {
+ svec_add_nocopy(svec, other->names[i]);
+ }
+ other->n = 0;
+}
+
void
svec_terminate(struct svec *svec)
{
/*
- * Copyright (c) 2008, 2009 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
void svec_add_nocopy(struct svec *, char *);
void svec_del(struct svec *, const char *);
void svec_append(struct svec *, const struct svec *);
+void svec_move(struct svec *, struct svec *);
void svec_terminate(struct svec *);
void svec_sort(struct svec *);
void svec_sort_unique(struct svec *);
VLOG_MODULE(vswitchd)
VLOG_MODULE(vt)
VLOG_MODULE(wcelim)
+VLOG_MODULE(wdp)
+VLOG_MODULE(wdp_xflow)
VLOG_MODULE(xenserver)
VLOG_MODULE(xfif)
VLOG_MODULE(xfif_linux)
/* Attempts to receive a message from 'xfif'. If successful, stores the
* message into '*packetp'. The message, if one is received, must begin
* with 'struct xflow_msg' as a header. Only messages of the types
- * selected with the set_listen_mask member function should be received.
+ * selected with the recv_set_mask member function should be received.
*
* This function must not block. If no message is ready to be received
* when it is called, it should return EAGAIN without blocking. */
*
* Normally this function only needs to be called from xfif_close().
* However, it may be called by providers due to an error on opening
- * that occurs after initialization. It this case xfif_close() would
+ * that occurs after initialization. In this case xfif_close() would
* never be called. */
void
xfif_uninit(struct xfif *xfif, bool close)
xflow_key_from_flow(struct xflow_key *key, const struct flow *flow)
{
key->nw_src = flow->nw_src;
- key->nw_dst = flow->nw_dst;
+ key->nw_dst = ofp_port_to_xflow_port(flow->nw_dst);
key->in_port = flow->in_port;
if (flow->dl_vlan == htons(OFP_VLAN_NONE)) {
key->dl_tci = htons(0);
void
xflow_key_to_flow(const struct xflow_key *key, struct flow *flow)
{
+ flow->wildcards = 0;
+ flow->priority = 0xffff;
flow->nw_src = key->nw_src;
flow->nw_dst = key->nw_dst;
- flow->in_port = key->in_port;
+ flow->in_port = xflow_port_to_ofp_port(key->in_port);
if (key->dl_tci) {
flow->dl_vlan = htons(vlan_tci_to_vid(key->dl_tci));
flow->dl_vlan_pcp = vlan_tci_to_pcp(key->dl_tci);
-# Copyright (C) 2009 Nicira Networks, Inc.
+# Copyright (C) 2009, 2010 Nicira Networks, Inc.
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
ofproto/pinsched.c \
ofproto/pinsched.h \
ofproto/status.c \
- ofproto/status.h
+ ofproto/status.h \
+ ofproto/wdp-provider.h \
+ ofproto/wdp-xflow.c \
+ ofproto/wdp.c \
+ ofproto/wdp.h
#include "packets.h"
#include "status.h"
#include "stream-ssl.h"
+#include "wdp.h"
#include "xfif.h"
#define THIS_MODULE VLM_discovery
int
discovery_create(const char *re, bool update_resolv_conf,
- struct xfif *xfif, struct switch_status *ss,
+ struct wdp *wdp, struct switch_status *ss,
struct discovery **discoveryp)
{
struct discovery *d;
- char local_name[IF_NAMESIZE];
+ char *local_name;
int error;
d = xzalloc(sizeof *d);
d->update_resolv_conf = update_resolv_conf;
/* Initialize DHCP client. */
- error = xfif_port_get_name(xfif, XFLOWP_LOCAL,
- local_name, sizeof local_name);
+ error = wdp_port_get_name(wdp, OFPP_LOCAL, &local_name);
if (error) {
VLOG_ERR("failed to query datapath local port: %s", strerror(error));
goto error_regfree;
}
error = dhclient_create(local_name, modify_dhcp_request,
validate_dhcp_offer, d, &d->dhcp);
+ free(local_name);
if (error) {
VLOG_ERR("failed to initialize DHCP client: %s", strerror(error));
goto error_regfree;
#include <stdbool.h>
-struct xfif;
struct discovery;
struct settings;
struct switch_status;
+struct wdp;
int discovery_create(const char *accept_controller_re, bool update_resolv_conf,
- struct xfif *, struct switch_status *,
+ struct wdp *, struct switch_status *,
struct discovery **);
void discovery_destroy(struct discovery *);
void discovery_set_update_resolv_conf(struct discovery *,
#include <stdlib.h>
#include "flow.h"
#include "mac-learning.h"
-#include "xflow-util.h"
#include "ofpbuf.h"
#include "ofproto.h"
#include "pktbuf.h"
fo->next_bogus_packet_in = LLONG_MAX;
memset(&flow, 0, sizeof flow);
- ofproto_delete_flow(fo->ofproto, &flow, OFPFW_ALL, FAIL_OPEN_PRIORITY);
+ flow.wildcards = OFPFW_ALL;
+ flow.priority = FAIL_OPEN_PRIORITY;
+ ofproto_delete_flow(fo->ofproto, &flow);
}
}
action.output.len = htons(sizeof action);
action.output.port = htons(OFPP_NORMAL);
memset(&flow, 0, sizeof flow);
- ofproto_add_flow(fo->ofproto, &flow, OFPFW_ALL, FAIL_OPEN_PRIORITY,
- &action, 1, 0);
+ flow.wildcards = OFPFW_ALL;
+ flow.priority = FAIL_OPEN_PRIORITY;
+ ofproto_add_flow(fo->ofproto, &flow, &action, 1, 0);
}
}
#include "status.h"
#include "timeval.h"
#include "vconn.h"
+#include "wdp.h"
#include "xfif.h"
#define THIS_MODULE VLM_in_band
struct ib_rule {
bool installed;
flow_t flow;
- uint32_t wildcards;
- unsigned int priority;
};
struct in_band {
if (rule->installed) {
rule->installed = false;
- ofproto_delete_flow(in_band->ofproto, &rule->flow, rule->wildcards,
- rule->priority);
+ ofproto_delete_flow(in_band->ofproto, &rule->flow);
}
}
rule->installed = true;
rule->flow = *flow;
- rule->wildcards = OFPFW_ALL & ~fixed_fields;
- rule->priority = IB_BASE_PRIORITY + (N_IB_RULES - rule_idx);
+ rule->flow.wildcards = OFPFW_ALL & ~fixed_fields;
+ rule->flow.priority = IB_BASE_PRIORITY + (N_IB_RULES - rule_idx);
action.type = htons(OFPAT_OUTPUT);
action.output.len = htons(sizeof action);
action.output.port = htons(out_port);
action.output.max_len = htons(0);
- ofproto_add_flow(in_band->ofproto, &rule->flow, rule->wildcards,
- rule->priority, &action, 1, 0);
+ ofproto_add_flow(in_band->ofproto, &rule->flow, &action, 1, 0);
}
}
-/* Returns true if 'packet' should be sent to the local port regardless
- * of the flow table. */
-bool
-in_band_msg_in_hook(struct in_band *in_band, const flow_t *flow,
- const struct ofpbuf *packet)
-{
- if (!in_band) {
- return false;
- }
-
- /* Regardless of how the flow table is configured, we want to be
- * able to see replies to our DHCP requests. */
- if (flow->dl_type == htons(ETH_TYPE_IP)
- && flow->nw_proto == IP_TYPE_UDP
- && flow->tp_src == htons(DHCP_SERVER_PORT)
- && flow->tp_dst == htons(DHCP_CLIENT_PORT)
- && packet->l7) {
- struct dhcp_header *dhcp;
- const uint8_t *local_mac;
-
- dhcp = ofpbuf_at(packet, (char *)packet->l7 - (char *)packet->data,
- sizeof *dhcp);
- if (!dhcp) {
- return false;
- }
-
- local_mac = get_local_mac(in_band);
- if (eth_addr_equals(dhcp->chaddr, local_mac)) {
- return true;
- }
- }
-
- return false;
-}
-
-/* Returns true if the rule that would match 'flow' with 'actions' is
- * allowed to be set up in the datapath. */
-bool
-in_band_rule_check(struct in_band *in_band, const flow_t *flow,
- const struct xflow_actions *actions)
-{
- if (!in_band) {
- return true;
- }
-
- /* Don't allow flows that would prevent DHCP replies from being seen
- * by the local port. */
- if (flow->dl_type == htons(ETH_TYPE_IP)
- && flow->nw_proto == IP_TYPE_UDP
- && flow->tp_src == htons(DHCP_SERVER_PORT)
- && flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
- int i;
-
- for (i=0; i<actions->n_actions; i++) {
- if (actions->actions[i].output.type == XFLOWAT_OUTPUT
- && actions->actions[i].output.port == XFLOWP_LOCAL) {
- return true;
- }
- }
- return false;
- }
-
- return true;
-}
-
void
in_band_run(struct in_band *in_band)
{
if (local_mac) {
/* Allow DHCP requests to be sent from the local port. */
memset(&flow, 0, sizeof flow);
- flow.in_port = XFLOWP_LOCAL;
+ flow.in_port = OFPP_LOCAL;
flow.dl_type = htons(ETH_TYPE_IP);
memcpy(flow.dl_src, local_mac, ETH_ADDR_LEN);
flow.nw_proto = IP_TYPE_UDP;
}
int
-in_band_create(struct ofproto *ofproto, struct xfif *xfif,
+in_band_create(struct ofproto *ofproto, struct wdp *wdp,
struct switch_status *ss, struct rconn *controller,
struct in_band **in_bandp)
{
struct in_band *in_band;
- char local_name[IF_NAMESIZE];
+ char *local_name;
struct netdev *local_netdev;
int error;
- error = xfif_port_get_name(xfif, XFLOWP_LOCAL,
- local_name, sizeof local_name);
+ error = wdp_port_get_name(wdp, OFPP_LOCAL, &local_name);
if (error) {
VLOG_ERR("failed to initialize in-band control: cannot get name "
"of datapath local port (%s)", strerror(error));
if (error) {
VLOG_ERR("failed to initialize in-band control: cannot open "
"datapath local port %s (%s)", local_name, strerror(error));
+ free(local_name);
return error;
}
+ free(local_name);
in_band = xzalloc(sizeof *in_band);
in_band->ofproto = ofproto;
#include "flow.h"
-struct xfif;
struct in_band;
struct xflow_actions;
struct ofproto;
struct rconn;
struct settings;
struct switch_status;
+struct wdp;
-int in_band_create(struct ofproto *, struct xfif *, struct switch_status *,
+int in_band_create(struct ofproto *, struct wdp *, struct switch_status *,
struct rconn *controller, struct in_band **);
void in_band_destroy(struct in_band *);
void in_band_run(struct in_band *);
-bool in_band_msg_in_hook(struct in_band *, const flow_t *,
- const struct ofpbuf *packet);
-bool in_band_rule_check(struct in_band *, const flow_t *,
- const struct xflow_actions *);
void in_band_wait(struct in_band *);
void in_band_flushed(struct in_band *);
#include "sflow_api.h"
#include "socket-util.h"
#include "timeval.h"
+#include "wdp.h"
#include "xfif.h"
#define THIS_MODULE VLM_sflow
struct collectors *collectors;
SFLAgent *sflow_agent;
struct ofproto_sflow_options *options;
- struct xfif *xfif;
+ struct wdp *wdp;
time_t next_tick;
size_t n_flood, n_all;
struct port_array ports; /* Indexed by XFLOW port number. */
port_array_clear(&os->ports);
/* Turn off sampling to save CPU cycles. */
- xfif_set_sflow_probability(os->xfif, 0);
+ wdp_set_sflow_probability(os->wdp, 0);
}
bool
}
struct ofproto_sflow *
-ofproto_sflow_create(struct xfif *xfif)
+ofproto_sflow_create(struct wdp *wdp)
{
struct ofproto_sflow *os;
os = xcalloc(1, sizeof *os);
- os->xfif = xfif;
+ os->wdp = wdp;
os->next_tick = time_now() + 1;
port_array_init(&os->ports);
return os;
sfl_receiver_set_sFlowRcvrTimeout(receiver, 0xffffffff);
/* Set the sampling_rate down in the datapath. */
- xfif_set_sflow_probability(os->xfif,
- MAX(1, UINT32_MAX / options->sampling_rate));
+ wdp_set_sflow_probability(os->wdp,
+ MAX(1, UINT32_MAX / options->sampling_rate));
/* Add samplers and pollers for the currently known ports. */
PORT_ARRAY_FOR_EACH (osp, &os->ports, xflow_port) {
break;
case XFLOWAT_OUTPUT_GROUP:
+#if 0
n_outputs += (a->output_group.group == DP_GROUP_FLOOD ? os->n_flood
: a->output_group.group == DP_GROUP_ALL ? os->n_all
: 0);
+#endif
break;
case XFLOWAT_SET_DL_TCI:
#include <stdint.h>
#include "svec.h"
-struct xfif;
-struct xflow_msg;
struct ofproto_sflow_options;
+struct wdp;
+struct xflow_msg;
-struct ofproto_sflow *ofproto_sflow_create(struct xfif *);
+struct ofproto_sflow *ofproto_sflow_create(struct wdp *);
void ofproto_sflow_destroy(struct ofproto_sflow *);
void ofproto_sflow_set_options(struct ofproto_sflow *,
const struct ofproto_sflow_options *);
#include "mac-learning.h"
#include "netdev.h"
#include "netflow.h"
-#include "xflow-util.h"
#include "ofp-print.h"
#include "ofproto-sflow.h"
#include "ofpbuf.h"
#include "timeval.h"
#include "unixctl.h"
#include "vconn.h"
+#include "wdp.h"
#include "xfif.h"
#include "xtoxll.h"
TABLEID_CLASSIFIER = 1
};
-struct ofport {
- struct netdev *netdev;
- struct ofp_phy_port opp; /* In host byte order. */
-};
-
-static void ofport_free(struct ofport *);
-static void hton_ofp_phy_port(struct ofp_phy_port *);
-
-static int xlate_actions(const union ofp_action *in, size_t n_in,
- const flow_t *flow, struct ofproto *ofproto,
- const struct ofpbuf *packet,
- struct xflow_actions *out, tag_type *tags,
- bool *may_set_up_flow, uint16_t *nf_output_iface);
-
-struct rule {
- struct cls_rule cr;
-
+struct ofproto_rule {
uint64_t flow_cookie; /* Controller-issued identifier.
(Kept in network-byte order.) */
- uint16_t idle_timeout; /* In seconds from time of last use. */
- uint16_t hard_timeout; /* In seconds from time of creation. */
bool send_flow_removed; /* Send a flow removed message? */
- long long int used; /* Last-used time (0 if never used). */
- long long int created; /* Creation time. */
- uint64_t packet_count; /* Number of packets received. */
- uint64_t byte_count; /* Number of bytes received. */
- uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
tag_type tags; /* Tags (set only by hooks). */
- struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
-
- /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
- * exact-match rule (having cr.wc.wildcards of 0) generated from the
- * wildcard rule 'super'. In this case, 'list' is an element of the
- * super-rule's list.
- *
- * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
- * a list of subrules. A super-rule with no wildcards (where
- * cr.wc.wildcards is 0) will never have any subrules. */
- struct rule *super;
- struct list list;
-
- /* OpenFlow actions.
- *
- * A subrule has no actions (it uses the super-rule's actions). */
- int n_actions;
- union ofp_action *actions;
-
- /* Datapath actions.
- *
- * A super-rule with wildcard fields never has XFLOW actions (since the
- * datapath only supports exact-match flows). */
- bool installed; /* Installed in datapath? */
- bool may_install; /* True ordinarily; false if actions must
- * be reassessed for every packet. */
- int n_xflow_actions;
- union xflow_action *xflow_actions;
};
-static inline bool
-rule_is_hidden(const struct rule *rule)
+static struct ofproto_rule *
+ofproto_rule_cast(const struct wdp_rule *wdp_rule)
{
- /* Subrules are merely an implementation detail, so hide them from the
- * controller. */
- if (rule->super != NULL) {
- return true;
- }
+ return wdp_rule->client_data;
+}
+
+static void
+ofproto_rule_init(struct wdp_rule *wdp_rule)
+{
+ wdp_rule->client_data = xzalloc(sizeof(struct ofproto_rule));
+}
+
+static inline bool
+rule_is_hidden(const struct wdp_rule *rule)
+{
/* Rules with priority higher than UINT16_MAX are set up by ofproto itself
* (e.g. by in-band control) and are intentionally hidden from the
* controller. */
- if (rule->cr.priority > UINT16_MAX) {
+ if (rule->cr.flow.priority > UINT16_MAX) {
return true;
}
return false;
}
-static struct rule *rule_create(struct ofproto *, struct rule *super,
- const union ofp_action *, size_t n_actions,
- uint16_t idle_timeout, uint16_t hard_timeout,
- uint64_t flow_cookie, bool send_flow_removed);
-static void rule_free(struct rule *);
-static void rule_destroy(struct ofproto *, struct rule *);
-static struct rule *rule_from_cls_rule(const struct cls_rule *);
-static void rule_insert(struct ofproto *, struct rule *,
- struct ofpbuf *packet, uint16_t in_port);
-static void rule_remove(struct ofproto *, struct rule *);
-static bool rule_make_actions(struct ofproto *, struct rule *,
- const struct ofpbuf *packet);
-static void rule_install(struct ofproto *, struct rule *,
- struct rule *displaced_rule);
-static void rule_uninstall(struct ofproto *, struct rule *);
-static void rule_post_uninstall(struct ofproto *, struct rule *);
-static void send_flow_removed(struct ofproto *p, struct rule *rule,
- uint8_t reason);
+static void delete_flow(struct ofproto *, struct wdp_rule *, uint8_t reason);
struct ofconn {
struct list node;
char *dp_desc; /* Datapath description. */
/* Datapath. */
- struct xfif *xfif;
- struct netdev_monitor *netdev_monitor;
- struct port_array ports; /* Index is XFLOW port nr; ofport->opp.port_no is
- * OFP port nr. */
- struct shash port_by_name;
+ struct wdp *wdp;
uint32_t max_ports;
/* Configuration. */
struct netflow *netflow;
struct ofproto_sflow *sflow;
- /* Flow table. */
- struct classifier cls;
- bool need_revalidate;
- long long int next_expiration;
- struct tag_set revalidate_set;
-
/* OpenFlow connections. */
struct list all_conns;
struct ofconn *controller;
static uint64_t pick_datapath_id(const struct ofproto *);
static uint64_t pick_fallback_dpid(void);
-static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
-static void send_packet_in_action(struct ofpbuf *, void *ofproto);
-static void update_used(struct ofproto *);
-static void update_stats(struct ofproto *, struct rule *,
- const struct xflow_flow_stats *);
-static void expire_rule(struct cls_rule *, void *ofproto);
-static void active_timeout(struct ofproto *ofproto, struct rule *rule);
-static bool revalidate_rule(struct ofproto *p, struct rule *rule);
-static void revalidate_cb(struct cls_rule *rule_, void *p_);
-
-static void handle_xflow_msg(struct ofproto *, struct ofpbuf *);
+static void send_packet_in_miss(struct wdp_packet *, void *ofproto);
+static void send_packet_in_action(struct wdp_packet *, void *ofproto);
+
+static void handle_wdp_packet(struct ofproto *, struct wdp_packet *);
static void handle_openflow(struct ofconn *, struct ofproto *,
struct ofpbuf *);
-static void refresh_port_groups(struct ofproto *);
-
-static void update_port(struct ofproto *, const char *devname);
-static int init_ports(struct ofproto *);
-static void reinit_ports(struct ofproto *);
-
int
ofproto_create(const char *datapath, const char *datapath_type,
const struct ofhooks *ofhooks, void *aux,
struct ofproto **ofprotop)
{
- struct xflow_stats stats;
+ struct wdp_stats stats;
struct ofproto *p;
- struct xfif *xfif;
+ struct wdp *wdp;
int error;
*ofprotop = NULL;
/* Connect to datapath and start listening for messages. */
- error = xfif_open(datapath, datapath_type, &xfif);
+ error = wdp_open(datapath, datapath_type, &wdp);
if (error) {
VLOG_ERR("failed to open datapath %s: %s", datapath, strerror(error));
return error;
}
- error = xfif_get_xf_stats(xfif, &stats);
+ error = wdp_get_wdp_stats(wdp, &stats);
if (error) {
VLOG_ERR("failed to obtain stats for datapath %s: %s",
datapath, strerror(error));
- xfif_close(xfif);
+ wdp_close(wdp);
return error;
}
- error = xfif_recv_set_mask(xfif, XFLOWL_MISS | XFLOWL_ACTION | XFLOWL_SFLOW);
+ error = wdp_recv_set_mask(wdp, ((1 << WDP_CHAN_MISS)
+ | (1 << WDP_CHAN_ACTION)
+ | (1 << WDP_CHAN_SFLOW)));
if (error) {
VLOG_ERR("failed to listen on datapath %s: %s",
datapath, strerror(error));
- xfif_close(xfif);
+ wdp_close(wdp);
return error;
}
- xfif_flow_flush(xfif);
- xfif_recv_purge(xfif);
+ wdp_flow_flush(wdp);
+ wdp_recv_purge(wdp);
/* Initialize settings. */
p = xzalloc(sizeof *p);
p->dp_desc = xstrdup(DEFAULT_DP_DESC);
/* Initialize datapath. */
- p->xfif = xfif;
- p->netdev_monitor = netdev_monitor_create();
- port_array_init(&p->ports);
- shash_init(&p->port_by_name);
+ p->wdp = wdp;
p->max_ports = stats.max_ports;
/* Initialize submodules. */
p->netflow = NULL;
p->sflow = NULL;
- /* Initialize flow table. */
- classifier_init(&p->cls);
- p->need_revalidate = false;
- p->next_expiration = time_msec() + 1000;
- tag_set_init(&p->revalidate_set);
-
/* Initialize OpenFlow connections. */
list_init(&p->all_conns);
p->controller = ofconn_create(p, rconn_create(5, 8));
{
if (in_band != (p->in_band != NULL)) {
if (in_band) {
- return in_band_create(p, p->xfif, p->switch_status,
+ return in_band_create(p, p->wdp, p->switch_status,
p->controller->rconn, &p->in_band);
} else {
ofproto_set_discovery(p, false, NULL, true);
return error;
}
error = discovery_create(re, update_resolv_conf,
- p->xfif, p->switch_status,
+ p->wdp, p->switch_status,
&p->discovery);
if (error) {
return error;
struct ofproto_sflow *os = ofproto->sflow;
if (oso) {
if (!os) {
- struct ofport *ofport;
- unsigned int xflow_port;
-
- os = ofproto->sflow = ofproto_sflow_create(ofproto->xfif);
- refresh_port_groups(ofproto);
- PORT_ARRAY_FOR_EACH (ofport, &ofproto->ports, xflow_port) {
- ofproto_sflow_add_port(os, xflow_port,
- netdev_get_name(ofport->netdev));
- }
+ os = ofproto->sflow = ofproto_sflow_create(ofproto->wdp);
+ /* XXX ofport */
}
ofproto_sflow_set_options(os, oso);
} else {
ofproto_destroy(struct ofproto *p)
{
struct ofconn *ofconn, *next_ofconn;
- struct ofport *ofport;
- unsigned int port_no;
size_t i;
if (!p) {
ofproto_set_failure(p, false);
ofproto_flush_flows(p);
- classifier_destroy(&p->cls);
LIST_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, node,
&p->all_conns) {
ofconn_destroy(ofconn);
}
- xfif_close(p->xfif);
- netdev_monitor_destroy(p->netdev_monitor);
- PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
- ofport_free(ofport);
- }
- shash_destroy(&p->port_by_name);
+ wdp_close(p->wdp);
switch_status_destroy(p->switch_status);
in_band_destroy(p->in_band);
free(p->serial_desc);
free(p->dp_desc);
- port_array_destroy(&p->ports);
-
free(p);
}
return error;
}
-static void
-process_port_change(struct ofproto *ofproto, int error, char *devname)
-{
- if (error == ENOBUFS) {
- reinit_ports(ofproto);
- } else if (!error) {
- update_port(ofproto, devname);
- free(devname);
- }
-}
-
int
ofproto_run1(struct ofproto *p)
{
struct ofconn *ofconn, *next_ofconn;
- char *devname;
- int error;
int i;
- if (shash_is_empty(&p->port_by_name)) {
- init_ports(p);
- }
-
for (i = 0; i < 50; i++) {
- struct ofpbuf *buf;
+ struct wdp_packet packet;
int error;
- error = xfif_recv(p->xfif, &buf);
+ error = wdp_recv(p->wdp, &packet);
if (error) {
if (error == ENODEV) {
/* Someone destroyed the datapath behind our back. The caller
* spin from here on out. */
static struct vlog_rate_limit rl2 = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_ERR_RL(&rl2, "%s: datapath was destroyed externally",
- xfif_name(p->xfif));
+ wdp_name(p->wdp));
return ENODEV;
}
break;
}
- handle_xflow_msg(p, buf);
- }
-
- while ((error = xfif_port_poll(p->xfif, &devname)) != EAGAIN) {
- process_port_change(p, error, devname);
- }
- while ((error = netdev_monitor_poll(p->netdev_monitor,
- &devname)) != EAGAIN) {
- process_port_change(p, error, devname);
+ handle_wdp_packet(p, xmemdup(&packet, sizeof packet));
}
if (p->in_band) {
}
}
- if (time_msec() >= p->next_expiration) {
- COVERAGE_INC(ofproto_expiration);
- p->next_expiration = time_msec() + 1000;
- update_used(p);
-
- classifier_for_each(&p->cls, CLS_INC_ALL, expire_rule, p);
-
- /* Let the hook know that we're at a stable point: all outstanding data
- * in existing flows has been accounted to the account_cb. Thus, the
- * hook can now reasonably do operations that depend on having accurate
- * flow volume accounting (currently, that's just bond rebalancing). */
- if (p->ofhooks->account_checkpoint_cb) {
- p->ofhooks->account_checkpoint_cb(p->aux);
- }
- }
-
if (p->netflow) {
netflow_run(p->netflow);
}
};
int
-ofproto_run2(struct ofproto *p, bool revalidate_all)
+ofproto_run2(struct ofproto *p OVS_UNUSED, bool revalidate_all OVS_UNUSED)
{
- if (p->need_revalidate || revalidate_all
- || !tag_set_is_empty(&p->revalidate_set)) {
- struct revalidate_cbdata cbdata;
- cbdata.ofproto = p;
- cbdata.revalidate_all = revalidate_all;
- cbdata.revalidate_subrules = p->need_revalidate;
- cbdata.revalidate_set = p->revalidate_set;
- tag_set_init(&p->revalidate_set);
- COVERAGE_INC(ofproto_revalidate);
- classifier_for_each(&p->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
- p->need_revalidate = false;
- }
-
return 0;
}
struct ofconn *ofconn;
size_t i;
- xfif_recv_wait(p->xfif);
- xfif_port_poll_wait(p->xfif);
- netdev_monitor_poll_wait(p->netdev_monitor);
+ wdp_recv_wait(p->wdp);
+ wdp_port_poll_wait(p->wdp);
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
ofconn_wait(ofconn);
}
if (p->sflow) {
ofproto_sflow_wait(p->sflow);
}
- if (!tag_set_is_empty(&p->revalidate_set)) {
- poll_immediate_wake();
- }
- if (p->need_revalidate) {
- /* Shouldn't happen, but if it does just go around again. */
- VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
- poll_immediate_wake();
- } else if (p->next_expiration != LLONG_MAX) {
- poll_timer_wait(p->next_expiration - time_msec());
- }
for (i = 0; i < p->n_listeners; i++) {
pvconn_wait(p->listeners[i]);
}
}
void
-ofproto_revalidate(struct ofproto *ofproto, tag_type tag)
-{
- tag_set_add(&ofproto->revalidate_set, tag);
-}
-
-struct tag_set *
-ofproto_get_revalidate_set(struct ofproto *ofproto)
+ofproto_revalidate(struct ofproto *ofproto OVS_UNUSED, tag_type tag OVS_UNUSED)
{
- return &ofproto->revalidate_set;
+ //XXX tag_set_add(&ofproto->revalidate_set, tag);
}
bool
const union ofp_action *actions, size_t n_actions,
const struct ofpbuf *packet)
{
- struct xflow_actions xflow_actions;
- int error;
-
- error = xlate_actions(actions, n_actions, flow, p, packet, &xflow_actions,
- NULL, NULL, NULL);
- if (error) {
- return error;
- }
-
- /* XXX Should we translate the xfif_execute() errno value into an OpenFlow
+ /* XXX Should we translate the wdp_execute() errno value into an OpenFlow
* error code? */
- xfif_execute(p->xfif, flow->in_port, xflow_actions.actions,
- xflow_actions.n_actions, packet);
+ wdp_execute(p->wdp, flow->in_port, actions, n_actions, packet);
return 0;
}
void
-ofproto_add_flow(struct ofproto *p,
- const flow_t *flow, uint32_t wildcards, unsigned int priority,
+ofproto_add_flow(struct ofproto *p, const flow_t *flow,
const union ofp_action *actions, size_t n_actions,
int idle_timeout)
{
- struct rule *rule;
- rule = rule_create(p, NULL, actions, n_actions,
- idle_timeout >= 0 ? idle_timeout : 5 /* XXX */,
- 0, 0, false);
- cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
- rule_insert(p, rule, NULL, 0);
-}
+ struct wdp_flow_put put;
+ struct wdp_rule *rule;
-void
-ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow,
- uint32_t wildcards, unsigned int priority)
-{
- struct rule *rule;
+ put.flags = WDP_PUT_CREATE | WDP_PUT_MODIFY | WDP_PUT_ALL;
+ put.flow = flow;
+ put.actions = actions;
+ put.n_actions = n_actions;
+ put.idle_timeout = idle_timeout;
+ put.hard_timeout = 0;
- rule = rule_from_cls_rule(classifier_find_rule_exactly(&ofproto->cls,
- flow, wildcards,
- priority));
- if (rule) {
- rule_remove(ofproto, rule);
+ if (!wdp_flow_put(p->wdp, &put, NULL, &rule)) {
+ ofproto_rule_init(rule);
}
}
-static void
-destroy_rule(struct cls_rule *rule_, void *ofproto_)
+void
+ofproto_delete_flow(struct ofproto *ofproto, const flow_t *flow)
{
- struct rule *rule = rule_from_cls_rule(rule_);
- struct ofproto *ofproto = ofproto_;
-
- /* Mark the flow as not installed, even though it might really be
- * installed, so that rule_remove() doesn't bother trying to uninstall it.
- * There is no point in uninstalling it individually since we are about to
- * blow away all the flows with xfif_flow_flush(). */
- rule->installed = false;
-
- rule_remove(ofproto, rule);
+ struct wdp_rule *rule = wdp_flow_get(ofproto->wdp, flow);
+ if (rule) {
+ delete_flow(ofproto, rule, OFPRR_DELETE);
+ }
}
void
ofproto_flush_flows(struct ofproto *ofproto)
{
COVERAGE_INC(ofproto_flush);
- classifier_for_each(&ofproto->cls, CLS_INC_ALL, destroy_rule, ofproto);
- xfif_flow_flush(ofproto->xfif);
+ wdp_flow_flush(ofproto->wdp);
if (ofproto->in_band) {
in_band_flushed(ofproto->in_band);
}
}
}
\f
-static void
-reinit_ports(struct ofproto *p)
-{
- struct svec devnames;
- struct ofport *ofport;
- unsigned int port_no;
- struct xflow_port *xflow_ports;
- size_t n_xflow_ports;
- size_t i;
-
- svec_init(&devnames);
- PORT_ARRAY_FOR_EACH (ofport, &p->ports, port_no) {
- svec_add (&devnames, (char *) ofport->opp.name);
- }
- xfif_port_list(p->xfif, &xflow_ports, &n_xflow_ports);
- for (i = 0; i < n_xflow_ports; i++) {
- svec_add (&devnames, xflow_ports[i].devname);
- }
- free(xflow_ports);
-
- svec_sort_unique(&devnames);
- for (i = 0; i < devnames.n; i++) {
- update_port(p, devnames.names[i]);
- }
- svec_destroy(&devnames);
-}
-
-static size_t
-refresh_port_group(struct ofproto *p, unsigned int group)
-{
- uint16_t *ports;
- size_t n_ports;
- struct ofport *port;
- unsigned int port_no;
-
- assert(group == DP_GROUP_ALL || group == DP_GROUP_FLOOD);
-
- ports = xmalloc(port_array_count(&p->ports) * sizeof *ports);
- n_ports = 0;
- PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
- if (group == DP_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
- ports[n_ports++] = port_no;
- }
- }
- xfif_port_group_set(p->xfif, group, ports, n_ports);
- free(ports);
-
- return n_ports;
-}
-
-static void
-refresh_port_groups(struct ofproto *p)
-{
- size_t n_flood = refresh_port_group(p, DP_GROUP_FLOOD);
- size_t n_all = refresh_port_group(p, DP_GROUP_ALL);
- if (p->sflow) {
- ofproto_sflow_set_group_sizes(p->sflow, n_flood, n_all);
- }
-}
-
-static struct ofport *
-make_ofport(const struct xflow_port *xflow_port)
-{
- struct netdev_options netdev_options;
- enum netdev_flags flags;
- struct ofport *ofport;
- struct netdev *netdev;
- bool carrier;
- int error;
-
- memset(&netdev_options, 0, sizeof netdev_options);
- netdev_options.name = xflow_port->devname;
- netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
- netdev_options.may_open = true;
-
- error = netdev_open(&netdev_options, &netdev);
- if (error) {
- VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
- "cannot be opened (%s)",
- xflow_port->devname, xflow_port->port,
- xflow_port->devname, strerror(error));
- return NULL;
- }
-
- ofport = xmalloc(sizeof *ofport);
- ofport->netdev = netdev;
- ofport->opp.port_no = xflow_port_to_ofp_port(xflow_port->port);
- netdev_get_etheraddr(netdev, ofport->opp.hw_addr);
- memcpy(ofport->opp.name, xflow_port->devname,
- MIN(sizeof ofport->opp.name, sizeof xflow_port->devname));
- ofport->opp.name[sizeof ofport->opp.name - 1] = '\0';
-
- netdev_get_flags(netdev, &flags);
- ofport->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
-
- netdev_get_carrier(netdev, &carrier);
- ofport->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
-
- netdev_get_features(netdev,
- &ofport->opp.curr, &ofport->opp.advertised,
- &ofport->opp.supported, &ofport->opp.peer);
- return ofport;
-}
-
-static bool
-ofport_conflicts(const struct ofproto *p, const struct xflow_port *xflow_port)
-{
- if (port_array_get(&p->ports, xflow_port->port)) {
- VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
- xflow_port->port);
- return true;
- } else if (shash_find(&p->port_by_name, xflow_port->devname)) {
- VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
- xflow_port->devname);
- return true;
- } else {
- return false;
- }
-}
-
-static int
-ofport_equal(const struct ofport *a_, const struct ofport *b_)
-{
- const struct ofp_phy_port *a = &a_->opp;
- const struct ofp_phy_port *b = &b_->opp;
-
- BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
- return (a->port_no == b->port_no
- && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
- && !strcmp((char *) a->name, (char *) b->name)
- && a->state == b->state
- && a->config == b->config
- && a->curr == b->curr
- && a->advertised == b->advertised
- && a->supported == b->supported
- && a->peer == b->peer);
-}
-
-static void
-send_port_status(struct ofproto *p, const struct ofport *ofport,
- uint8_t reason)
-{
- /* XXX Should limit the number of queued port status change messages. */
- struct ofconn *ofconn;
- LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
- struct ofp_port_status *ops;
- struct ofpbuf *b;
-
- ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &b);
- ops->reason = reason;
- ops->desc = ofport->opp;
- hton_ofp_phy_port(&ops->desc);
- queue_tx(b, ofconn, NULL);
- }
- if (p->ofhooks->port_changed_cb) {
- p->ofhooks->port_changed_cb(reason, &ofport->opp, p->aux);
- }
-}
-
-static void
-ofport_install(struct ofproto *p, struct ofport *ofport)
-{
- uint16_t xflow_port = ofp_port_to_xflow_port(ofport->opp.port_no);
- const char *netdev_name = (const char *) ofport->opp.name;
-
- netdev_monitor_add(p->netdev_monitor, ofport->netdev);
- port_array_set(&p->ports, xflow_port, ofport);
- shash_add(&p->port_by_name, netdev_name, ofport);
- if (p->sflow) {
- ofproto_sflow_add_port(p->sflow, xflow_port, netdev_name);
- }
-}
-
-static void
-ofport_remove(struct ofproto *p, struct ofport *ofport)
-{
- uint16_t xflow_port = ofp_port_to_xflow_port(ofport->opp.port_no);
-
- netdev_monitor_remove(p->netdev_monitor, ofport->netdev);
- port_array_set(&p->ports, xflow_port, NULL);
- shash_delete(&p->port_by_name,
- shash_find(&p->port_by_name, (char *) ofport->opp.name));
- if (p->sflow) {
- ofproto_sflow_del_port(p->sflow, xflow_port);
- }
-}
-
-static void
-ofport_free(struct ofport *ofport)
-{
- if (ofport) {
- netdev_close(ofport->netdev);
- free(ofport);
- }
-}
-
-static void
-update_port(struct ofproto *p, const char *devname)
-{
- struct xflow_port xflow_port;
- struct ofport *old_ofport;
- struct ofport *new_ofport;
- int error;
-
- COVERAGE_INC(ofproto_update_port);
-
- /* Query the datapath for port information. */
- error = xfif_port_query_by_name(p->xfif, devname, &xflow_port);
-
- /* Find the old ofport. */
- old_ofport = shash_find_data(&p->port_by_name, devname);
- if (!error) {
- if (!old_ofport) {
- /* There's no port named 'devname' but there might be a port with
- * the same port number. This could happen if a port is deleted
- * and then a new one added in its place very quickly, or if a port
- * is renamed. In the former case we want to send an OFPPR_DELETE
- * and an OFPPR_ADD, and in the latter case we want to send a
- * single OFPPR_MODIFY. We can distinguish the cases by comparing
- * the old port's ifindex against the new port, or perhaps less
- * reliably but more portably by comparing the old port's MAC
- * against the new port's MAC. However, this code isn't that smart
- * and always sends an OFPPR_MODIFY (XXX). */
- old_ofport = port_array_get(&p->ports, xflow_port.port);
- }
- } else if (error != ENOENT && error != ENODEV) {
- VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error "
- "%s", strerror(error));
- return;
- }
-
- /* Create a new ofport. */
- new_ofport = !error ? make_ofport(&xflow_port) : NULL;
-
- /* Eliminate a few pathological cases. */
- if (!old_ofport && !new_ofport) {
- return;
- } else if (old_ofport && new_ofport) {
- /* Most of the 'config' bits are OpenFlow soft state, but
- * OFPPC_PORT_DOWN is maintained the kernel. So transfer the OpenFlow
- * bits from old_ofport. (make_ofport() only sets OFPPC_PORT_DOWN and
- * leaves the other bits 0.) */
- new_ofport->opp.config |= old_ofport->opp.config & ~OFPPC_PORT_DOWN;
-
- if (ofport_equal(old_ofport, new_ofport)) {
- /* False alarm--no change. */
- ofport_free(new_ofport);
- return;
- }
- }
-
- /* Now deal with the normal cases. */
- if (old_ofport) {
- ofport_remove(p, old_ofport);
- }
- if (new_ofport) {
- ofport_install(p, new_ofport);
- }
- send_port_status(p, new_ofport ? new_ofport : old_ofport,
- (!old_ofport ? OFPPR_ADD
- : !new_ofport ? OFPPR_DELETE
- : OFPPR_MODIFY));
- ofport_free(old_ofport);
-
- /* Update port groups. */
- refresh_port_groups(p);
-}
-
-static int
-init_ports(struct ofproto *p)
-{
- struct xflow_port *ports;
- size_t n_ports;
- size_t i;
- int error;
-
- error = xfif_port_list(p->xfif, &ports, &n_ports);
- if (error) {
- return error;
- }
-
- for (i = 0; i < n_ports; i++) {
- const struct xflow_port *xflow_port = &ports[i];
- if (!ofport_conflicts(p, xflow_port)) {
- struct ofport *ofport = make_ofport(xflow_port);
- if (ofport) {
- ofport_install(p, ofport);
- }
- }
- }
- free(ports);
- refresh_port_groups(p);
- return 0;
-}
-\f
static struct ofconn *
ofconn_create(struct ofproto *p, struct rconn *rconn)
{
}
}
\f
-/* Caller is responsible for initializing the 'cr' member of the returned
- * rule. */
-static struct rule *
-rule_create(struct ofproto *ofproto, struct rule *super,
- const union ofp_action *actions, size_t n_actions,
- uint16_t idle_timeout, uint16_t hard_timeout,
- uint64_t flow_cookie, bool send_flow_removed)
-{
- struct rule *rule = xzalloc(sizeof *rule);
- rule->idle_timeout = idle_timeout;
- rule->hard_timeout = hard_timeout;
- rule->flow_cookie = flow_cookie;
- rule->used = rule->created = time_msec();
- rule->send_flow_removed = send_flow_removed;
- rule->super = super;
- if (super) {
- list_push_back(&super->list, &rule->list);
- } else {
- list_init(&rule->list);
- }
- rule->n_actions = n_actions;
- rule->actions = xmemdup(actions, n_actions * sizeof *actions);
- netflow_flow_clear(&rule->nf_flow);
- netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
-
- return rule;
-}
-
-static struct rule *
-rule_from_cls_rule(const struct cls_rule *cls_rule)
-{
- return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
-}
-
-static void
-rule_free(struct rule *rule)
-{
- free(rule->actions);
- free(rule->xflow_actions);
- free(rule);
-}
-
-/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
- * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
- * through all of its subrules and revalidates them, destroying any that no
- * longer has a super-rule (which is probably all of them).
- *
- * Before calling this function, the caller must make have removed 'rule' from
- * the classifier. If 'rule' is an exact-match rule, the caller is also
- * responsible for ensuring that it has been uninstalled from the datapath. */
-static void
-rule_destroy(struct ofproto *ofproto, struct rule *rule)
-{
- if (!rule->super) {
- struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
- revalidate_rule(ofproto, subrule);
- }
- } else {
- list_remove(&rule->list);
- }
- rule_free(rule);
-}
-
static bool
-rule_has_out_port(const struct rule *rule, uint16_t out_port)
+rule_has_out_port(const struct wdp_rule *rule, uint16_t out_port)
{
const union ofp_action *oa;
struct actions_iterator i;
}
return false;
}
-
-/* Executes the actions indicated by 'rule' on 'packet', which is in flow
- * 'flow' and is considered to have arrived on XFLOW port 'in_port'.
- *
- * The flow that 'packet' actually contains does not need to actually match
- * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
- * the packet and byte counters for 'rule' will be credited for the packet sent
- * out whether or not the packet actually matches 'rule'.
- *
- * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
- * the caller must already have accurately composed XFLOW actions for it given
- * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
- * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
- * function will compose a set of XFLOW actions based on 'rule''s OpenFlow
- * actions and apply them to 'packet'. */
+\f
static void
-rule_execute(struct ofproto *ofproto, struct rule *rule,
- struct ofpbuf *packet, const flow_t *flow)
+queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
+ struct rconn_packet_counter *counter)
{
- const union xflow_action *actions;
- size_t n_actions;
- struct xflow_actions a;
-
- /* Grab or compose the XFLOW actions.
- *
- * The special case for an exact-match 'rule' where 'flow' is not the
- * rule's flow is important to avoid, e.g., sending a packet out its input
- * port simply because the XFLOW actions were composed for the wrong
- * scenario. */
- if (rule->cr.wc.wildcards || !flow_equal(flow, &rule->cr.flow)) {
- struct rule *super = rule->super ? rule->super : rule;
- if (xlate_actions(super->actions, super->n_actions, flow, ofproto,
- packet, &a, NULL, 0, NULL)) {
- return;
- }
- actions = a.actions;
- n_actions = a.n_actions;
- } else {
- actions = rule->xflow_actions;
- n_actions = rule->n_xflow_actions;
- }
-
- /* Execute the XFLOW actions. */
- if (!xfif_execute(ofproto->xfif, flow->in_port,
- actions, n_actions, packet)) {
- struct xflow_flow_stats stats;
- flow_extract_stats(flow, packet, &stats);
- update_stats(ofproto, rule, &stats);
- rule->used = time_msec();
- netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
+ update_openflow_length(msg);
+ if (rconn_send(ofconn->rconn, msg, counter)) {
+ ofpbuf_delete(msg);
}
}
static void
-rule_insert(struct ofproto *p, struct rule *rule, struct ofpbuf *packet,
- uint16_t in_port)
+send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
+ int error, const void *data, size_t len)
{
- struct rule *displaced_rule;
-
- /* Insert the rule in the classifier. */
- displaced_rule = rule_from_cls_rule(classifier_insert(&p->cls, &rule->cr));
- if (!rule->cr.wc.wildcards) {
- rule_make_actions(p, rule, packet);
- }
-
- /* Send the packet and credit it to the rule. */
- if (packet) {
- flow_t flow;
- flow_extract(packet, in_port, &flow);
- rule_execute(p, rule, packet, &flow);
- }
-
- /* Install the rule in the datapath only after sending the packet, to
- * avoid packet reordering. */
- if (rule->cr.wc.wildcards) {
- COVERAGE_INC(ofproto_add_wc_flow);
- p->need_revalidate = true;
- } else {
- rule_install(p, rule, displaced_rule);
- }
+ struct ofpbuf *buf;
+ struct ofp_error_msg *oem;
- /* Free the rule that was displaced, if any. */
- if (displaced_rule) {
- rule_destroy(p, displaced_rule);
+ if (!(error >> 16)) {
+ VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
+ error);
+ return;
}
-}
-static struct rule *
-rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
- const flow_t *flow)
-{
- struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
- rule->idle_timeout, rule->hard_timeout,
- 0, false);
- COVERAGE_INC(ofproto_subrule_create);
- cls_rule_from_flow(&subrule->cr, flow, 0,
- (rule->cr.priority <= UINT16_MAX ? UINT16_MAX
- : rule->cr.priority));
- classifier_insert_exact(&ofproto->cls, &subrule->cr);
-
- return subrule;
+ COVERAGE_INC(ofproto_error);
+ oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
+ oh ? oh->xid : 0, &buf);
+ oem->type = htons((unsigned int) error >> 16);
+ oem->code = htons(error & 0xffff);
+ memcpy(oem->data, data, len);
+ queue_tx(buf, ofconn, ofconn->reply_counter);
}
static void
-rule_remove(struct ofproto *ofproto, struct rule *rule)
+send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
+ int error)
{
- if (rule->cr.wc.wildcards) {
- COVERAGE_INC(ofproto_del_wc_flow);
- ofproto->need_revalidate = true;
- } else {
- rule_uninstall(ofproto, rule);
- }
- classifier_remove(&ofproto->cls, &rule->cr);
- rule_destroy(ofproto, rule);
-}
-
-/* Returns true if the actions changed, false otherwise. */
-static bool
-rule_make_actions(struct ofproto *p, struct rule *rule,
- const struct ofpbuf *packet)
-{
- const struct rule *super;
- struct xflow_actions a;
- size_t actions_len;
-
- assert(!rule->cr.wc.wildcards);
-
- super = rule->super ? rule->super : rule;
- rule->tags = 0;
- xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
- packet, &a, &rule->tags, &rule->may_install,
- &rule->nf_flow.output_iface);
-
- actions_len = a.n_actions * sizeof *a.actions;
- if (rule->n_xflow_actions != a.n_actions
- || memcmp(rule->xflow_actions, a.actions, actions_len)) {
- COVERAGE_INC(ofproto_xflow_unchanged);
- free(rule->xflow_actions);
- rule->n_xflow_actions = a.n_actions;
- rule->xflow_actions = xmemdup(a.actions, actions_len);
- return true;
- } else {
- return false;
- }
+ size_t oh_length = ntohs(oh->length);
+ send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
}
static int
-do_put_flow(struct ofproto *ofproto, struct rule *rule, int flags,
- struct xflow_flow_put *put)
+handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
{
- memset(&put->flow.stats, 0, sizeof put->flow.stats);
- xflow_key_from_flow(&put->flow.key, &rule->cr.flow);
- put->flow.actions = rule->xflow_actions;
- put->flow.n_actions = rule->n_xflow_actions;
- put->flags = flags;
- return xfif_flow_put(ofproto->xfif, put);
-}
-
-static void
-rule_install(struct ofproto *p, struct rule *rule, struct rule *displaced_rule)
-{
- assert(!rule->cr.wc.wildcards);
-
- if (rule->may_install) {
- struct xflow_flow_put put;
- if (!do_put_flow(p, rule,
- XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS,
- &put)) {
- rule->installed = true;
- if (displaced_rule) {
- update_stats(p, displaced_rule, &put.flow.stats);
- rule_post_uninstall(p, displaced_rule);
- }
- }
- } else if (displaced_rule) {
- rule_uninstall(p, displaced_rule);
- }
-}
-
-static void
-rule_reinstall(struct ofproto *ofproto, struct rule *rule)
-{
- if (rule->installed) {
- struct xflow_flow_put put;
- COVERAGE_INC(ofproto_dp_missed);
- do_put_flow(ofproto, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put);
- } else {
- rule_install(ofproto, rule, NULL);
- }
-}
-
-static void
-rule_update_actions(struct ofproto *ofproto, struct rule *rule)
-{
- bool actions_changed;
- uint16_t new_out_iface, old_out_iface;
-
- old_out_iface = rule->nf_flow.output_iface;
- actions_changed = rule_make_actions(ofproto, rule, NULL);
-
- if (rule->may_install) {
- if (rule->installed) {
- if (actions_changed) {
- struct xflow_flow_put put;
- do_put_flow(ofproto, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY
- | XFLOWPF_ZERO_STATS, &put);
- update_stats(ofproto, rule, &put.flow.stats);
-
- /* Temporarily set the old output iface so that NetFlow
- * messages have the correct output interface for the old
- * stats. */
- new_out_iface = rule->nf_flow.output_iface;
- rule->nf_flow.output_iface = old_out_iface;
- rule_post_uninstall(ofproto, rule);
- rule->nf_flow.output_iface = new_out_iface;
- }
- } else {
- rule_install(ofproto, rule, NULL);
- }
- } else {
- rule_uninstall(ofproto, rule);
- }
-}
-
-static void
-rule_account(struct ofproto *ofproto, struct rule *rule, uint64_t extra_bytes)
-{
- uint64_t total_bytes = rule->byte_count + extra_bytes;
-
- if (ofproto->ofhooks->account_flow_cb
- && total_bytes > rule->accounted_bytes)
- {
- ofproto->ofhooks->account_flow_cb(
- &rule->cr.flow, rule->xflow_actions, rule->n_xflow_actions,
- total_bytes - rule->accounted_bytes, ofproto->aux);
- rule->accounted_bytes = total_bytes;
- }
-}
-
-static void
-rule_uninstall(struct ofproto *p, struct rule *rule)
-{
- assert(!rule->cr.wc.wildcards);
- if (rule->installed) {
- struct xflow_flow xflow_flow;
-
- xflow_key_from_flow(&xflow_flow.key, &rule->cr.flow);
- xflow_flow.actions = NULL;
- xflow_flow.n_actions = 0;
- if (!xfif_flow_del(p->xfif, &xflow_flow)) {
- update_stats(p, rule, &xflow_flow.stats);
- }
- rule->installed = false;
-
- rule_post_uninstall(p, rule);
- }
-}
-
-static bool
-is_controller_rule(struct rule *rule)
-{
- /* If the only action is send to the controller then don't report
- * NetFlow expiration messages since it is just part of the control
- * logic for the network and not real traffic. */
-
- if (rule && rule->super) {
- struct rule *super = rule->super;
-
- return super->n_actions == 1 &&
- super->actions[0].type == htons(OFPAT_OUTPUT) &&
- super->actions[0].output.port == htons(OFPP_CONTROLLER);
- }
-
- return false;
-}
-
-static void
-rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
-{
- struct rule *super = rule->super;
-
- rule_account(ofproto, rule, 0);
-
- if (ofproto->netflow && !is_controller_rule(rule)) {
- struct ofexpired expired;
- expired.flow = rule->cr.flow;
- expired.packet_count = rule->packet_count;
- expired.byte_count = rule->byte_count;
- expired.used = rule->used;
- netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
- }
- if (super) {
- super->packet_count += rule->packet_count;
- super->byte_count += rule->byte_count;
-
- /* Reset counters to prevent double counting if the rule ever gets
- * reinstalled. */
- rule->packet_count = 0;
- rule->byte_count = 0;
- rule->accounted_bytes = 0;
-
- netflow_flow_clear(&rule->nf_flow);
- }
-}
-\f
-static void
-queue_tx(struct ofpbuf *msg, const struct ofconn *ofconn,
- struct rconn_packet_counter *counter)
-{
- update_openflow_length(msg);
- if (rconn_send(ofconn->rconn, msg, counter)) {
- ofpbuf_delete(msg);
- }
-}
-
-static void
-send_error(const struct ofconn *ofconn, const struct ofp_header *oh,
- int error, const void *data, size_t len)
-{
- struct ofpbuf *buf;
- struct ofp_error_msg *oem;
-
- if (!(error >> 16)) {
- VLOG_WARN_RL(&rl, "not sending bad error code %d to controller",
- error);
- return;
- }
-
- COVERAGE_INC(ofproto_error);
- oem = make_openflow_xid(len + sizeof *oem, OFPT_ERROR,
- oh ? oh->xid : 0, &buf);
- oem->type = htons((unsigned int) error >> 16);
- oem->code = htons(error & 0xffff);
- memcpy(oem->data, data, len);
- queue_tx(buf, ofconn, ofconn->reply_counter);
-}
-
-static void
-send_error_oh(const struct ofconn *ofconn, const struct ofp_header *oh,
- int error)
-{
- size_t oh_length = ntohs(oh->length);
- send_error(ofconn, oh, error, oh, MIN(oh_length, 64));
-}
-
-static void
-hton_ofp_phy_port(struct ofp_phy_port *opp)
-{
- opp->port_no = htons(opp->port_no);
- opp->config = htonl(opp->config);
- opp->state = htonl(opp->state);
- opp->curr = htonl(opp->curr);
- opp->advertised = htonl(opp->advertised);
- opp->supported = htonl(opp->supported);
- opp->peer = htonl(opp->peer);
-}
-
-static int
-handle_echo_request(struct ofconn *ofconn, struct ofp_header *oh)
-{
- struct ofp_header *rq = oh;
- queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
- return 0;
+ struct ofp_header *rq = oh;
+ queue_tx(make_echo_reply(rq), ofconn, ofconn->reply_counter);
+ return 0;
}
static int
handle_features_request(struct ofproto *p, struct ofconn *ofconn,
struct ofp_header *oh)
{
- struct ofp_switch_features *osf;
- struct ofpbuf *buf;
- unsigned int port_no;
- struct ofport *port;
-
- osf = make_openflow_xid(sizeof *osf, OFPT_FEATURES_REPLY, oh->xid, &buf);
- osf->datapath_id = htonll(p->datapath_id);
- osf->n_buffers = htonl(pktbuf_capacity());
- osf->n_tables = 2;
- osf->capabilities = htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
- OFPC_PORT_STATS | OFPC_ARP_MATCH_IP);
- osf->actions = htonl((1u << OFPAT_OUTPUT) |
- (1u << OFPAT_SET_VLAN_VID) |
- (1u << OFPAT_SET_VLAN_PCP) |
- (1u << OFPAT_STRIP_VLAN) |
- (1u << OFPAT_SET_DL_SRC) |
- (1u << OFPAT_SET_DL_DST) |
- (1u << OFPAT_SET_NW_SRC) |
- (1u << OFPAT_SET_NW_DST) |
- (1u << OFPAT_SET_NW_TOS) |
- (1u << OFPAT_SET_TP_SRC) |
- (1u << OFPAT_SET_TP_DST));
-
- PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
- hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
- }
+ struct ofpbuf *features;
+ int error;
- queue_tx(buf, ofconn, ofconn->reply_counter);
- return 0;
+ error = wdp_get_features(p->wdp, &features);
+ if (!error) {
+ struct ofp_switch_features *osf = features->data;
+
+ osf->header.xid = oh->xid;
+ osf->datapath_id = htonll(p->datapath_id);
+ osf->n_buffers = htonl(pktbuf_capacity());
+ osf->capabilities |= htonl(OFPC_FLOW_STATS | OFPC_TABLE_STATS |
+ OFPC_PORT_STATS);
+
+ queue_tx(features, ofconn, ofconn->reply_counter);
+ }
+ return error;
}
static int
bool drop_frags;
/* Figure out flags. */
- xfif_get_drop_frags(p->xfif, &drop_frags);
+ wdp_get_drop_frags(p->wdp, &drop_frags);
flags = drop_frags ? OFPC_FRAG_DROP : OFPC_FRAG_NORMAL;
/* Send reply. */
if (ofconn == p->controller) {
switch (flags & OFPC_FRAG_MASK) {
case OFPC_FRAG_NORMAL:
- xfif_set_drop_frags(p->xfif, false);
+ wdp_set_drop_frags(p->wdp, false);
break;
case OFPC_FRAG_DROP:
- xfif_set_drop_frags(p->xfif, true);
+ wdp_set_drop_frags(p->wdp, true);
break;
default:
VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
return 0;
}
-static void
-add_output_group_action(struct xflow_actions *actions, uint16_t group,
- uint16_t *nf_output_iface)
-{
- xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group;
-
- if (group == DP_GROUP_ALL || group == DP_GROUP_FLOOD) {
- *nf_output_iface = NF_OUT_FLOOD;
- }
-}
-
-static void
-add_controller_action(struct xflow_actions *actions,
- const struct ofp_action_output *oao)
-{
- union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER);
- a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
-}
-
-struct action_xlate_ctx {
- /* Input. */
- const flow_t *flow; /* Flow to which these actions correspond. */
- int recurse; /* Recursion level, via xlate_table_action. */
- struct ofproto *ofproto;
- const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
- * null pointer if we are revalidating
- * without a packet to refer to. */
-
- /* Output. */
- struct xflow_actions *out; /* Datapath actions. */
- tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
- bool may_set_up_flow; /* True ordinarily; false if the actions must
- * be reassessed for every packet. */
- uint16_t nf_output_iface; /* Output interface index for NetFlow. */
-};
-
-static void do_xlate_actions(const union ofp_action *in, size_t n_in,
- struct action_xlate_ctx *ctx);
-
-static void
-add_output_action(struct action_xlate_ctx *ctx, uint16_t port)
-{
- const struct ofport *ofport = port_array_get(&ctx->ofproto->ports, port);
-
- if (ofport) {
- if (ofport->opp.config & OFPPC_NO_FWD) {
- /* Forwarding disabled on port. */
- return;
- }
- } else {
- /*
- * We don't have an ofport record for this port, but it doesn't hurt to
- * allow forwarding to it anyhow. Maybe such a port will appear later
- * and we're pre-populating the flow table.
- */
- }
-
- xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port;
- ctx->nf_output_iface = port;
-}
-
-static struct rule *
-lookup_valid_rule(struct ofproto *ofproto, const flow_t *flow)
-{
- struct rule *rule;
- rule = rule_from_cls_rule(classifier_lookup(&ofproto->cls, flow));
-
- /* The rule we found might not be valid, since we could be in need of
- * revalidation. If it is not valid, don't return it. */
- if (rule
- && rule->super
- && ofproto->need_revalidate
- && !revalidate_rule(ofproto, rule)) {
- COVERAGE_INC(ofproto_invalidated);
- return NULL;
- }
-
- return rule;
-}
-
-static void
-xlate_table_action(struct action_xlate_ctx *ctx, uint16_t in_port)
-{
- if (!ctx->recurse) {
- struct rule *rule;
- flow_t flow;
-
- flow = *ctx->flow;
- flow.in_port = in_port;
-
- rule = lookup_valid_rule(ctx->ofproto, &flow);
- if (rule) {
- if (rule->super) {
- rule = rule->super;
- }
-
- ctx->recurse++;
- do_xlate_actions(rule->actions, rule->n_actions, ctx);
- ctx->recurse--;
- }
- }
-}
-
-static void
-xlate_output_action(struct action_xlate_ctx *ctx,
- const struct ofp_action_output *oao)
-{
- uint16_t xflow_port;
- uint16_t prev_nf_output_iface = ctx->nf_output_iface;
-
- ctx->nf_output_iface = NF_OUT_DROP;
-
- switch (ntohs(oao->port)) {
- case OFPP_IN_PORT:
- add_output_action(ctx, ctx->flow->in_port);
- break;
- case OFPP_TABLE:
- xlate_table_action(ctx, ctx->flow->in_port);
- break;
- case OFPP_NORMAL:
- if (!ctx->ofproto->ofhooks->normal_cb(ctx->flow, ctx->packet,
- ctx->out, ctx->tags,
- &ctx->nf_output_iface,
- ctx->ofproto->aux)) {
- COVERAGE_INC(ofproto_uninstallable);
- ctx->may_set_up_flow = false;
- }
- break;
- case OFPP_FLOOD:
- add_output_group_action(ctx->out, DP_GROUP_FLOOD,
- &ctx->nf_output_iface);
- break;
- case OFPP_ALL:
- add_output_group_action(ctx->out, DP_GROUP_ALL, &ctx->nf_output_iface);
- break;
- case OFPP_CONTROLLER:
- add_controller_action(ctx->out, oao);
- break;
- case OFPP_LOCAL:
- add_output_action(ctx, XFLOWP_LOCAL);
- break;
- default:
- xflow_port = ofp_port_to_xflow_port(ntohs(oao->port));
- if (xflow_port != ctx->flow->in_port) {
- add_output_action(ctx, xflow_port);
- }
- break;
- }
-
- if (prev_nf_output_iface == NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_FLOOD;
- } else if (ctx->nf_output_iface == NF_OUT_DROP) {
- ctx->nf_output_iface = prev_nf_output_iface;
- } else if (prev_nf_output_iface != NF_OUT_DROP &&
- ctx->nf_output_iface != NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_MULTI;
- }
-}
-
-static void
-xlate_nicira_action(struct action_xlate_ctx *ctx,
- const struct nx_action_header *nah)
-{
- const struct nx_action_resubmit *nar;
- int subtype = ntohs(nah->subtype);
-
- assert(nah->vendor == htonl(NX_VENDOR_ID));
- switch (subtype) {
- case NXAST_RESUBMIT:
- nar = (const struct nx_action_resubmit *) nah;
- xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port)));
- break;
-
- default:
- VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
- break;
- }
-}
-
-static void
-do_xlate_actions(const union ofp_action *in, size_t n_in,
- struct action_xlate_ctx *ctx)
-{
- struct actions_iterator iter;
- const union ofp_action *ia;
- const struct ofport *port;
-
- port = port_array_get(&ctx->ofproto->ports, ctx->flow->in_port);
- if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
- port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr)
- ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
- /* Drop this flow. */
- return;
- }
-
- for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
- uint16_t type = ntohs(ia->type);
- union xflow_action *oa;
-
- switch (type) {
- case OFPAT_OUTPUT:
- xlate_output_action(ctx, &ia->output);
- break;
-
- case OFPAT_SET_VLAN_VID:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
- oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK);
- oa->dl_tci.mask = htons(VLAN_VID_MASK);
- break;
-
- case OFPAT_SET_VLAN_PCP:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
- oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT)
- & VLAN_PCP_MASK);
- oa->dl_tci.mask = htons(VLAN_PCP_MASK);
- break;
-
- case OFPAT_STRIP_VLAN:
- xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN);
- break;
-
- case OFPAT_SET_DL_SRC:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC);
- memcpy(oa->dl_addr.dl_addr,
- ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
- break;
-
- case OFPAT_SET_DL_DST:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST);
- memcpy(oa->dl_addr.dl_addr,
- ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
- break;
-
- case OFPAT_SET_NW_SRC:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC);
- oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
- break;
-
- case OFPAT_SET_NW_DST:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST);
- oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
- break;
-
- case OFPAT_SET_NW_TOS:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS);
- oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
- break;
-
- case OFPAT_SET_TP_SRC:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC);
- oa->tp_port.tp_port = ia->tp_port.tp_port;
- break;
-
- case OFPAT_SET_TP_DST:
- oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST);
- oa->tp_port.tp_port = ia->tp_port.tp_port;
- break;
-
- case OFPAT_VENDOR:
- xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
- break;
-
- default:
- VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
- break;
- }
- }
-}
-
-static int
-xlate_actions(const union ofp_action *in, size_t n_in,
- const flow_t *flow, struct ofproto *ofproto,
- const struct ofpbuf *packet,
- struct xflow_actions *out, tag_type *tags, bool *may_set_up_flow,
- uint16_t *nf_output_iface)
-{
- tag_type no_tags = 0;
- struct action_xlate_ctx ctx;
- COVERAGE_INC(ofproto_ofp2xflow);
- xflow_actions_init(out);
- ctx.flow = flow;
- ctx.recurse = 0;
- ctx.ofproto = ofproto;
- ctx.packet = packet;
- ctx.out = out;
- ctx.tags = tags ? tags : &no_tags;
- ctx.may_set_up_flow = true;
- ctx.nf_output_iface = NF_OUT_DROP;
- do_xlate_actions(in, n_in, &ctx);
-
- /* Check with in-band control to see if we're allowed to set up this
- * flow. */
- if (!in_band_rule_check(ofproto->in_band, flow, out)) {
- ctx.may_set_up_flow = false;
- }
-
- if (may_set_up_flow) {
- *may_set_up_flow = ctx.may_set_up_flow;
- }
- if (nf_output_iface) {
- *nf_output_iface = ctx.nf_output_iface;
- }
- if (xflow_actions_overflow(out)) {
- xflow_actions_init(out);
- return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
- }
- return 0;
-}
-
static int
handle_packet_out(struct ofproto *p, struct ofconn *ofconn,
struct ofp_header *oh)
{
struct ofp_packet_out *opo;
struct ofpbuf payload, *buffer;
- struct xflow_actions actions;
+ struct ofp_action_header *actions;
int n_actions;
uint16_t in_port;
flow_t flow;
return error;
}
opo = (struct ofp_packet_out *) oh;
+ actions = opo->actions;
COVERAGE_INC(ofproto_packet_out);
if (opo->buffer_id != htonl(UINT32_MAX)) {
buffer = NULL;
}
- flow_extract(&payload, ofp_port_to_xflow_port(ntohs(opo->in_port)), &flow);
- error = xlate_actions((const union ofp_action *) opo->actions, n_actions,
- &flow, p, &payload, &actions, NULL, NULL, NULL);
- if (error) {
- return error;
- }
-
- xfif_execute(p->xfif, flow.in_port, actions.actions, actions.n_actions,
- &payload);
+ flow_extract(&payload, ntohs(opo->in_port), &flow);
+ wdp_execute(p->wdp, flow.in_port, (const union ofp_action *) actions,
+ n_actions, &payload);
ofpbuf_delete(buffer);
return 0;
}
-static void
-update_port_config(struct ofproto *p, struct ofport *port,
- uint32_t config, uint32_t mask)
-{
- mask &= config ^ port->opp.config;
- if (mask & OFPPC_PORT_DOWN) {
- if (config & OFPPC_PORT_DOWN) {
- netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
- } else {
- netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
- }
- }
-#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
- if (mask & REVALIDATE_BITS) {
- COVERAGE_INC(ofproto_costly_flags);
- port->opp.config ^= mask & REVALIDATE_BITS;
- p->need_revalidate = true;
- }
-#undef REVALIDATE_BITS
- if (mask & OFPPC_NO_FLOOD) {
- port->opp.config ^= OFPPC_NO_FLOOD;
- refresh_port_groups(p);
- }
- if (mask & OFPPC_NO_PACKET_IN) {
- port->opp.config ^= OFPPC_NO_PACKET_IN;
- }
-}
-
static int
handle_port_mod(struct ofproto *p, struct ofp_header *oh)
{
const struct ofp_port_mod *opm;
- struct ofport *port;
+ struct wdp_port *port;
int error;
error = check_ofp_message(oh, OFPT_PORT_MOD, sizeof *opm);
}
opm = (struct ofp_port_mod *) oh;
- port = port_array_get(&p->ports,
- ofp_port_to_xflow_port(ntohs(opm->port_no)));
+ wdp_port_query_by_number(p->wdp, ntohs(opm->port_no), &port);
if (!port) {
return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_PORT);
} else if (memcmp(port->opp.hw_addr, opm->hw_addr, OFP_ETH_ALEN)) {
return ofp_mkerr(OFPET_PORT_MOD_FAILED, OFPPMFC_BAD_HW_ADDR);
} else {
- update_port_config(p, port, ntohl(opm->config), ntohl(opm->mask));
+ uint32_t mask, new_config;
+
+ mask = ntohl(opm->mask) & (OFPPC_PORT_DOWN | OFPPC_NO_STP
+ | OFPPC_NO_RECV | OFPPC_NO_RECV_STP
+ | OFPPC_NO_FLOOD | OFPPC_NO_FWD
+ | OFPPC_NO_PACKET_IN);
+ new_config = (port->opp.config & ~mask) | (ntohl(opm->config) & mask);
+ if (new_config != port->opp.config) {
+ wdp_port_set_config(p->wdp, ntohs(opm->port_no), new_config);
+ }
if (opm->advertise) {
netdev_set_advertisements(port->netdev, ntohl(opm->advertise));
}
}
+
return 0;
}
return 0;
}
-static void
-count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
-{
- struct rule *rule = rule_from_cls_rule(cls_rule);
- int *n_subrules = n_subrules_;
-
- if (rule->super) {
- (*n_subrules)++;
- }
-}
-
static int
handle_table_stats_request(struct ofproto *p, struct ofconn *ofconn,
struct ofp_stats_request *request)
{
struct ofp_table_stats *ots;
struct ofpbuf *msg;
- struct xflow_stats xfstats;
- int n_exact, n_subrules, n_wild;
+ struct wdp_stats dpstats;
msg = start_stats_reply(request, sizeof *ots * 2);
- /* Count rules of various kinds. */
- n_subrules = 0;
- classifier_for_each(&p->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
- n_exact = classifier_count_exact(&p->cls) - n_subrules;
- n_wild = classifier_count(&p->cls) - classifier_count_exact(&p->cls);
+ wdp_get_wdp_stats(p->wdp, &dpstats);
/* Hash table. */
- xfif_get_xf_stats(p->xfif, &xfstats);
ots = append_stats_reply(sizeof *ots, ofconn, &msg);
memset(ots, 0, sizeof *ots);
ots->table_id = TABLEID_HASH;
strcpy(ots->name, "hash");
ots->wildcards = htonl(0);
- ots->max_entries = htonl(xfstats.max_capacity);
- ots->active_count = htonl(n_exact);
- ots->lookup_count = htonll(xfstats.n_frags + xfstats.n_hit +
- xfstats.n_missed);
- ots->matched_count = htonll(xfstats.n_hit); /* XXX */
+ ots->max_entries = htonl(dpstats.exact.max_capacity);
+ ots->active_count = htonl(dpstats.exact.n_flows);
+ ots->lookup_count = htonll(dpstats.exact.n_hit + dpstats.exact.n_missed);
+ ots->matched_count = htonll(dpstats.exact.n_hit);
/* Classifier table. */
ots = append_stats_reply(sizeof *ots, ofconn, &msg);
ots->table_id = TABLEID_CLASSIFIER;
strcpy(ots->name, "classifier");
ots->wildcards = htonl(OFPFW_ALL);
- ots->max_entries = htonl(65536);
- ots->active_count = htonl(n_wild);
- ots->lookup_count = htonll(0); /* XXX */
- ots->matched_count = htonll(0); /* XXX */
+ ots->max_entries = htonl(dpstats.wild.max_capacity);
+ ots->active_count = htonl(dpstats.wild.n_flows);
+ ots->lookup_count = htonll(dpstats.wild.n_hit + dpstats.wild.n_missed);
+ ots->matched_count = htonll(dpstats.wild.n_hit);
queue_tx(msg, ofconn, ofconn->reply_counter);
return 0;
}
static void
-append_port_stat(struct ofport *port, uint16_t port_no, struct ofconn *ofconn,
+append_port_stat(struct wdp_port *port, struct ofconn *ofconn,
struct ofpbuf *msg)
{
struct netdev_stats stats;
netdev_get_stats(port->netdev, &stats);
ops = append_stats_reply(sizeof *ops, ofconn, &msg);
- ops->port_no = htons(xflow_port_to_ofp_port(port_no));
+ ops->port_no = htons(port->opp.port_no);
memset(ops->pad, 0, sizeof ops->pad);
ops->rx_packets = htonll(stats.rx_packets);
ops->tx_packets = htonll(stats.tx_packets);
struct ofp_port_stats_request *psr;
struct ofp_port_stats *ops;
struct ofpbuf *msg;
- struct ofport *port;
- unsigned int port_no;
if (arg_size != sizeof *psr) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
msg = start_stats_reply(osr, sizeof *ops * 16);
if (psr->port_no != htons(OFPP_NONE)) {
- port = port_array_get(&p->ports,
- ofp_port_to_xflow_port(ntohs(psr->port_no)));
+ struct wdp_port *port;
+
+ wdp_port_query_by_number(p->wdp, ntohs(psr->port_no), &port);
if (port) {
- append_port_stat(port, ntohs(psr->port_no), ofconn, msg);
+ append_port_stat(port, ofconn, msg);
}
} else {
- PORT_ARRAY_FOR_EACH (port, &p->ports, port_no) {
- append_port_stat(port, port_no, ofconn, msg);
+ struct wdp_port **ports;
+ size_t n_ports;
+ size_t i;
+
+ wdp_port_list(p->wdp, &ports, &n_ports);
+ for (i = 0; i < n_ports; i++) {
+ append_port_stat(ports[i], ofconn, msg);
}
+ free(ports);
}
queue_tx(msg, ofconn, ofconn->reply_counter);
* '*packet_countp' and '*byte_countp'. If 'rule' is a wildcarded rule, the
* returned statistic include statistics for all of 'rule''s subrules. */
static void
-query_stats(struct ofproto *p, struct rule *rule,
+query_stats(struct ofproto *p, struct wdp_rule *rule,
uint64_t *packet_countp, uint64_t *byte_countp)
{
- uint64_t packet_count, byte_count;
- struct rule *subrule;
- struct xflow_flow *xflow_flows;
- size_t n_xflow_flows;
-
- /* Start from historical data for 'rule' itself that are no longer tracked
- * by the datapath. This counts, for example, subrules that have
- * expired. */
- packet_count = rule->packet_count;
- byte_count = rule->byte_count;
-
- /* Prepare to ask the datapath for statistics on 'rule', or if it is
- * wildcarded then on all of its subrules.
- *
- * Also, add any statistics that are not tracked by the datapath for each
- * subrule. This includes, for example, statistics for packets that were
- * executed "by hand" by ofproto via xfif_execute() but must be accounted
- * to a flow. */
- n_xflow_flows = rule->cr.wc.wildcards ? list_size(&rule->list) : 1;
- xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows);
- if (rule->cr.wc.wildcards) {
- size_t i = 0;
- LIST_FOR_EACH (subrule, struct rule, list, &rule->list) {
- xflow_key_from_flow(&xflow_flows[i++].key, &subrule->cr.flow);
- packet_count += subrule->packet_count;
- byte_count += subrule->byte_count;
- }
- } else {
- xflow_key_from_flow(&xflow_flows[0].key, &rule->cr.flow);
- }
+ struct wdp_flow_stats stats;
- /* Fetch up-to-date statistics from the datapath and add them in. */
- if (!xfif_flow_get_multiple(p->xfif, xflow_flows, n_xflow_flows)) {
- size_t i;
- for (i = 0; i < n_xflow_flows; i++) {
- struct xflow_flow *xflow_flow = &xflow_flows[i];
- packet_count += xflow_flow->stats.n_packets;
- byte_count += xflow_flow->stats.n_bytes;
- }
+ if (!wdp_flow_get_stats(p->wdp, rule, &stats)) {
+ *packet_countp = stats.n_packets;
+ *byte_countp = stats.n_bytes;
+ } else {
+ *packet_countp = 0;
+ *byte_countp = 0;
}
- free(xflow_flows);
-
- /* Return the stats to the caller. */
- *packet_countp = packet_count;
- *byte_countp = byte_count;
}
static void
-flow_stats_cb(struct cls_rule *rule_, void *cbdata_)
+flow_stats_cb(struct wdp_rule *rule, void *cbdata_)
{
- struct rule *rule = rule_from_cls_rule(rule_);
struct flow_stats_cbdata *cbdata = cbdata_;
struct ofp_flow_stats *ofs;
uint64_t packet_count, byte_count;
uint32_t sec = tdiff / 1000;
uint32_t msec = tdiff - (sec * 1000);
- if (rule_is_hidden(rule) || !rule_has_out_port(rule, cbdata->out_port)) {
+ if (rule_is_hidden(rule)
+ || !rule_has_out_port(rule, cbdata->out_port)) {
return;
}
ofs = append_stats_reply(len, cbdata->ofconn, &cbdata->msg);
ofs->length = htons(len);
- ofs->table_id = rule->cr.wc.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
+ ofs->table_id = rule->cr.flow.wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
ofs->pad = 0;
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofs->match);
+ flow_to_match(&rule->cr.flow, &ofs->match);
ofs->duration_sec = htonl(sec);
ofs->duration_nsec = htonl(msec * 1000000);
- ofs->cookie = rule->flow_cookie;
- ofs->priority = htons(rule->cr.priority);
+ ofs->cookie = ofproto_rule_cast(rule)->flow_cookie;
+ ofs->priority = htons(rule->cr.flow.priority);
ofs->idle_timeout = htons(rule->idle_timeout);
ofs->hard_timeout = htons(rule->hard_timeout);
memset(ofs->pad2, 0, sizeof ofs->pad2);
{
struct ofp_flow_stats_request *fsr;
struct flow_stats_cbdata cbdata;
- struct cls_rule target;
+ flow_t target;
if (arg_size != sizeof *fsr) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
cbdata.ofconn = ofconn;
cbdata.out_port = fsr->out_port;
cbdata.msg = start_stats_reply(osr, 1024);
- cls_rule_from_match(&target, &fsr->match, 0);
- classifier_for_each_match(&p->cls, &target,
- table_id_to_include(fsr->table_id),
- flow_stats_cb, &cbdata);
+ flow_from_match(&target, 0, &fsr->match);
+ wdp_flow_for_each_match(p->wdp, &target,
+ table_id_to_include(fsr->table_id),
+ flow_stats_cb, &cbdata);
queue_tx(cbdata.msg, ofconn, ofconn->reply_counter);
return 0;
}
};
static void
-flow_stats_ds_cb(struct cls_rule *rule_, void *cbdata_)
+flow_stats_ds_cb(struct wdp_rule *rule, void *cbdata_)
{
- struct rule *rule = rule_from_cls_rule(rule_);
struct flow_stats_ds_cbdata *cbdata = cbdata_;
struct ds *results = cbdata->results;
struct ofp_match match;
uint64_t packet_count, byte_count;
size_t act_len = sizeof *rule->actions * rule->n_actions;
- /* Don't report on subrules. */
- if (rule->super != NULL) {
- return;
- }
-
query_stats(cbdata->ofproto, rule, &packet_count, &byte_count);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &match);
+ flow_to_match(&rule->cr.flow, &match);
ds_put_format(results, "duration=%llds, ",
(time_msec() - rule->created) / 1000);
- ds_put_format(results, "priority=%u, ", rule->cr.priority);
+ ds_put_format(results, "priority=%u, ", rule->cr.flow.priority);
ds_put_format(results, "n_packets=%"PRIu64", ", packet_count);
ds_put_format(results, "n_bytes=%"PRIu64", ", byte_count);
ofp_print_match(results, &match, true);
void
ofproto_get_all_flows(struct ofproto *p, struct ds *results)
{
- struct ofp_match match;
- struct cls_rule target;
struct flow_stats_ds_cbdata cbdata;
+ struct ofp_match match;
+ flow_t target;
memset(&match, 0, sizeof match);
match.wildcards = htonl(OFPFW_ALL);
cbdata.ofproto = p;
cbdata.results = results;
- cls_rule_from_match(&target, &match, 0);
- classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
- flow_stats_ds_cb, &cbdata);
+ flow_from_match(&target, 0, &match);
+ wdp_flow_for_each_match(p->wdp, &target, CLS_INC_ALL,
+ flow_stats_ds_cb, &cbdata);
}
struct aggregate_stats_cbdata {
};
static void
-aggregate_stats_cb(struct cls_rule *rule_, void *cbdata_)
+aggregate_stats_cb(struct wdp_rule *rule, void *cbdata_)
{
- struct rule *rule = rule_from_cls_rule(rule_);
struct aggregate_stats_cbdata *cbdata = cbdata_;
uint64_t packet_count, byte_count;
struct ofp_aggregate_stats_request *asr;
struct ofp_aggregate_stats_reply *reply;
struct aggregate_stats_cbdata cbdata;
- struct cls_rule target;
struct ofpbuf *msg;
+ flow_t target;
if (arg_size != sizeof *asr) {
return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_BAD_LEN);
cbdata.packet_count = 0;
cbdata.byte_count = 0;
cbdata.n_flows = 0;
- cls_rule_from_match(&target, &asr->match, 0);
- classifier_for_each_match(&p->cls, &target,
- table_id_to_include(asr->table_id),
- aggregate_stats_cb, &cbdata);
+ flow_from_match(&target, 0, &asr->match);
+ wdp_flow_for_each_match(p->wdp, &target,
+ table_id_to_include(asr->table_id),
+ aggregate_stats_cb, &cbdata);
msg = start_stats_reply(osr, sizeof *reply);
reply = append_stats_reply(sizeof *reply, ofconn, &msg);
}
}
-static long long int
-msec_from_nsec(uint64_t sec, uint32_t nsec)
-{
- return !sec ? 0 : sec * 1000 + nsec / 1000000;
-}
-
-static void
-update_time(struct ofproto *ofproto, struct rule *rule,
- const struct xflow_flow_stats *stats)
-{
- long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
- if (used > rule->used) {
- rule->used = used;
- if (rule->super && used > rule->super->used) {
- rule->super->used = used;
- }
- netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
- }
-}
-
-static void
-update_stats(struct ofproto *ofproto, struct rule *rule,
- const struct xflow_flow_stats *stats)
-{
- if (stats->n_packets) {
- update_time(ofproto, rule, stats);
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
- stats->tcp_flags);
- }
-}
-
static int
add_flow(struct ofproto *p, struct ofconn *ofconn,
struct ofp_flow_mod *ofm, size_t n_actions)
{
+ struct wdp_rule *rule;
+ struct wdp_flow_put put;
struct ofpbuf *packet;
- struct rule *rule;
uint16_t in_port;
+ flow_t flow;
int error;
- if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)) {
- flow_t flow;
- uint32_t wildcards;
-
- flow_from_match(&flow, &wildcards, &ofm->match);
- if (classifier_rule_overlaps(&p->cls, &flow, wildcards,
- ntohs(ofm->priority))) {
- return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
- }
+ flow_from_match(&flow, ntohs(ofm->priority), &ofm->match);
+ if (ofm->flags & htons(OFPFF_CHECK_OVERLAP)
+ && wdp_flow_overlaps(p->wdp, &flow)) {
+ return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_OVERLAP);
}
- rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
- n_actions, ntohs(ofm->idle_timeout),
- ntohs(ofm->hard_timeout), ofm->cookie,
- ofm->flags & htons(OFPFF_SEND_FLOW_REM));
- cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
+ put.flags = WDP_PUT_CREATE | WDP_PUT_MODIFY | WDP_PUT_ALL;
+ put.flow = &flow;
+ put.actions = (const union ofp_action *) ofm->actions;
+ put.n_actions = n_actions;
+ put.idle_timeout = ntohs(ofm->idle_timeout);
+ put.hard_timeout = ntohs(ofm->hard_timeout);
+ error = wdp_flow_put(p->wdp, &put, NULL, &rule);
+ if (error) {
+ /* XXX wdp_flow_put should return OpenFlow error code. */
+ return error;
+ }
+ ofproto_rule_init(rule);
- error = 0;
if (ofm->buffer_id != htonl(UINT32_MAX)) {
error = pktbuf_retrieve(ofconn->pktbuf, ntohl(ofm->buffer_id),
&packet, &in_port);
- } else {
- packet = NULL;
- in_port = UINT16_MAX;
+ if (!error) {
+ wdp_flow_inject(p->wdp, rule, in_port, packet);
+ ofpbuf_delete(packet);
+ }
}
- rule_insert(p, rule, packet, in_port);
- ofpbuf_delete(packet);
- return error;
+ return 0;
}
static int
modify_flow(struct ofproto *p, const struct ofp_flow_mod *ofm,
- size_t n_actions, uint16_t command, struct rule *rule)
+ size_t n_actions, uint16_t command, struct wdp_rule *rule)
{
if (rule_is_hidden(rule)) {
return 0;
}
if (command == OFPFC_DELETE) {
- send_flow_removed(p, rule, OFPRR_DELETE);
- rule_remove(p, rule);
+ delete_flow(p, rule, OFPPR_DELETE);
} else {
- size_t actions_len = n_actions * sizeof *rule->actions;
+ const struct ofp_action_header *actions = ofm->actions;
+ struct wdp_flow_put put;
- rule->flow_cookie = ofm->cookie;
- if (n_actions == rule->n_actions
- && !memcmp(ofm->actions, rule->actions, actions_len))
- {
- return 0;
- }
-
- free(rule->actions);
- rule->actions = xmemdup(ofm->actions, actions_len);
- rule->n_actions = n_actions;
+ ofproto_rule_cast(rule)->flow_cookie = ofm->cookie;
- if (rule->cr.wc.wildcards) {
- COVERAGE_INC(ofproto_mod_wc_flow);
- p->need_revalidate = true;
- } else {
- rule_update_actions(p, rule);
- }
+ put.flags = WDP_PUT_MODIFY | WDP_PUT_ACTIONS;
+ put.flow = &rule->cr.flow;
+ put.actions = (const union ofp_action *) actions;
+ put.n_actions = n_actions;
+ put.idle_timeout = put.hard_timeout = 0;
+ wdp_flow_put(p->wdp, &put, NULL, NULL);
}
return 0;
modify_flows_strict(struct ofproto *p, const struct ofp_flow_mod *ofm,
size_t n_actions, uint16_t command)
{
- struct rule *rule;
- uint32_t wildcards;
+ struct wdp_rule *rule;
flow_t flow;
- flow_from_match(&flow, &wildcards, &ofm->match);
- rule = rule_from_cls_rule(classifier_find_rule_exactly(
- &p->cls, &flow, wildcards,
- ntohs(ofm->priority)));
+ flow_from_match(&flow, ntohs(ofm->priority), &ofm->match);
+ rule = wdp_flow_get(p->wdp, &flow);
if (rule) {
if (command == OFPFC_DELETE
};
static void
-modify_flows_cb(struct cls_rule *rule_, void *cbdata_)
+modify_flows_cb(struct wdp_rule *rule, void *cbdata_)
{
- struct rule *rule = rule_from_cls_rule(rule_);
struct modify_flows_cbdata *cbdata = cbdata_;
if (cbdata->out_port != htons(OFPP_NONE)
size_t n_actions, uint16_t command)
{
struct modify_flows_cbdata cbdata;
- struct cls_rule target;
+ flow_t target;
cbdata.ofproto = p;
cbdata.ofm = ofm;
cbdata.n_actions = n_actions;
cbdata.command = command;
- cls_rule_from_match(&target, &ofm->match, 0);
-
- classifier_for_each_match(&p->cls, &target, CLS_INC_ALL,
- modify_flows_cb, &cbdata);
+ flow_from_match(&target, 0, &ofm->match);
+ wdp_flow_for_each_match(p->wdp, &target, CLS_INC_ALL,
+ modify_flows_cb, &cbdata);
return 0;
}
}
\f
static void
-handle_xflow_miss_msg(struct ofproto *p, struct ofpbuf *packet)
+handle_flow_miss(struct ofproto *p, struct wdp_packet *packet)
{
- struct xflow_msg *msg = packet->data;
- uint16_t in_port = xflow_port_to_ofp_port(msg->port);
- struct rule *rule;
- struct ofpbuf payload;
+ struct wdp_rule *rule;
flow_t flow;
- payload.data = msg + 1;
- payload.size = msg->length - sizeof *msg;
- flow_extract(&payload, msg->port, &flow);
-
- /* Check with in-band control to see if this packet should be sent
- * to the local port regardless of the flow table. */
- if (in_band_msg_in_hook(p->in_band, &flow, &payload)) {
- union xflow_action action;
-
- memset(&action, 0, sizeof(action));
- action.output.type = XFLOWAT_OUTPUT;
- action.output.port = XFLOWP_LOCAL;
- xfif_execute(p->xfif, flow.in_port, &action, 1, &payload);
- }
-
- rule = lookup_valid_rule(p, &flow);
+ flow_extract(packet->payload, packet->in_port, &flow);
+ rule = wdp_flow_match(p->wdp, &flow);
if (!rule) {
/* Don't send a packet-in if OFPPC_NO_PACKET_IN asserted. */
- struct ofport *port = port_array_get(&p->ports, msg->port);
+ struct wdp_port *port;
+
+ wdp_port_query_by_number(p->wdp, packet->in_port, &port);
if (port) {
if (port->opp.config & OFPPC_NO_PACKET_IN) {
COVERAGE_INC(ofproto_no_packet_in);
- /* XXX install 'drop' flow entry */
- ofpbuf_delete(packet);
+ wdp_packet_destroy(packet);
return;
}
} else {
- VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, msg->port);
+ VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16,
+ packet->in_port);
}
COVERAGE_INC(ofproto_packet_in);
- pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
+ pinsched_send(p->miss_sched, packet->in_port, packet,
+ send_packet_in_miss, p);
return;
}
- if (rule->cr.wc.wildcards) {
- rule = rule_create_subrule(p, rule, &flow);
- rule_make_actions(p, rule, packet);
- } else {
- if (!rule->may_install) {
- /* The rule is not installable, that is, we need to process every
- * packet, so process the current packet and set its actions into
- * 'subrule'. */
- rule_make_actions(p, rule, packet);
- } else {
- /* XXX revalidate rule if it needs it */
- }
- }
-
- rule_execute(p, rule, &payload, &flow);
- rule_reinstall(p, rule);
+ wdp_flow_inject(p->wdp, rule, packet->in_port, packet->payload);
- if (rule->super && rule->super->cr.priority == FAIL_OPEN_PRIORITY
+ if (rule->cr.flow.priority == FAIL_OPEN_PRIORITY
&& rconn_is_connected(p->controller->rconn)) {
/*
* Extra-special case for fail-open mode.
*
* See the top-level comment in fail-open.c for more information.
*/
- pinsched_send(p->miss_sched, in_port, packet, send_packet_in_miss, p);
+ pinsched_send(p->miss_sched, packet->in_port, packet,
+ send_packet_in_miss, p);
} else {
- ofpbuf_delete(packet);
+ wdp_packet_destroy(packet);
}
}
static void
-handle_xflow_msg(struct ofproto *p, struct ofpbuf *packet)
+handle_wdp_packet(struct ofproto *p, struct wdp_packet *packet)
{
- struct xflow_msg *msg = packet->data;
-
- switch (msg->type) {
- case _XFLOWL_ACTION_NR:
+ switch (packet->channel) {
+ case WDP_CHAN_ACTION:
COVERAGE_INC(ofproto_ctlr_action);
- pinsched_send(p->action_sched, xflow_port_to_ofp_port(msg->port), packet,
+ pinsched_send(p->action_sched, packet->in_port, packet,
send_packet_in_action, p);
break;
- case _XFLOWL_SFLOW_NR:
- if (p->sflow) {
- ofproto_sflow_received(p->sflow, msg);
- }
- ofpbuf_delete(packet);
+ case WDP_CHAN_SFLOW:
+ /* XXX */
+ wdp_packet_destroy(packet);
break;
- case _XFLOWL_MISS_NR:
- handle_xflow_miss_msg(p, packet);
+ case WDP_CHAN_MISS:
+ handle_flow_miss(p, packet);
break;
+ case WDP_N_CHANS:
default:
- VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32,
- msg->type);
+ wdp_packet_destroy(packet);
+ VLOG_WARN_RL(&rl, "received message on unexpected channel %d",
+ (int) packet->channel);
break;
}
}
\f
-static void
-revalidate_cb(struct cls_rule *sub_, void *cbdata_)
-{
- struct rule *sub = rule_from_cls_rule(sub_);
- struct revalidate_cbdata *cbdata = cbdata_;
-
- if (cbdata->revalidate_all
- || (cbdata->revalidate_subrules && sub->super)
- || (tag_set_intersects(&cbdata->revalidate_set, sub->tags))) {
- revalidate_rule(cbdata->ofproto, sub);
- }
-}
-
-static bool
-revalidate_rule(struct ofproto *p, struct rule *rule)
-{
- const flow_t *flow = &rule->cr.flow;
-
- COVERAGE_INC(ofproto_revalidate_rule);
- if (rule->super) {
- struct rule *super;
- super = rule_from_cls_rule(classifier_lookup_wild(&p->cls, flow));
- if (!super) {
- rule_remove(p, rule);
- return false;
- } else if (super != rule->super) {
- COVERAGE_INC(ofproto_revalidate_moved);
- list_remove(&rule->list);
- list_push_back(&super->list, &rule->list);
- rule->super = super;
- rule->hard_timeout = super->hard_timeout;
- rule->idle_timeout = super->idle_timeout;
- rule->created = super->created;
- rule->used = 0;
- }
- }
-
- rule_update_actions(p, rule);
- return true;
-}
-
static struct ofpbuf *
-compose_flow_removed(const struct rule *rule, long long int now, uint8_t reason)
+compose_flow_removed(const struct wdp_rule *rule, uint8_t reason)
{
- struct ofp_flow_removed *ofr;
- struct ofpbuf *buf;
- long long int tdiff = now - rule->created;
+ long long int tdiff = time_msec() - rule->created;
uint32_t sec = tdiff / 1000;
uint32_t msec = tdiff - (sec * 1000);
+ struct ofp_flow_removed *ofr;
+ struct ofpbuf *buf;
ofr = make_openflow(sizeof *ofr, OFPT_FLOW_REMOVED, &buf);
- flow_to_match(&rule->cr.flow, rule->cr.wc.wildcards, &ofr->match);
- ofr->cookie = rule->flow_cookie;
- ofr->priority = htons(rule->cr.priority);
+ flow_to_match(&rule->cr.flow, &ofr->match);
+ ofr->cookie = ofproto_rule_cast(rule)->flow_cookie;
+ ofr->priority = htons(rule->cr.flow.priority);
ofr->reason = reason;
ofr->duration_sec = htonl(sec);
ofr->duration_nsec = htonl(msec * 1000000);
ofr->idle_timeout = htons(rule->idle_timeout);
- ofr->packet_count = htonll(rule->packet_count);
- ofr->byte_count = htonll(rule->byte_count);
return buf;
}
static void
-uninstall_idle_flow(struct ofproto *ofproto, struct rule *rule)
-{
- assert(rule->installed);
- assert(!rule->cr.wc.wildcards);
-
- if (rule->super) {
- rule_remove(ofproto, rule);
- } else {
- rule_uninstall(ofproto, rule);
- }
-}
-static void
-send_flow_removed(struct ofproto *p, struct rule *rule, uint8_t reason)
+delete_flow(struct ofproto *p, struct wdp_rule *rule, uint8_t reason)
{
/* We limit the maximum number of queued flow expirations it by accounting
* them under the counter for replies. That works because preventing
* being added (and expiring). (It also prevents processing OpenFlow
* requests that would not add new flows, so it is imperfect.) */
- if (rule->send_flow_removed) {
- long long int now = time_msec();
+ struct ofproto_rule *ofproto_rule = ofproto_rule_cast(rule);
+ struct wdp_flow_stats stats;
+ struct ofpbuf *buf;
+
+ if (ofproto_rule->send_flow_removed) {
+ /* Compose most of the ofp_flow_removed before 'rule' is destroyed. */
+ buf = compose_flow_removed(rule, reason);
+ } else {
+ buf = NULL;
+ }
+
+ if (wdp_flow_delete(p->wdp, rule, &stats)) {
+ return;
+ }
+
+ if (buf) {
+ struct ofp_flow_removed *ofr;
struct ofconn *prev = NULL;
- struct ofpbuf *buf = NULL;
struct ofconn *ofconn;
+ /* Compose the parts of the ofp_flow_removed that require stats. */
+ ofr = buf->data;
+ ofr->packet_count = htonll(stats.n_packets);
+ ofr->byte_count = htonll(stats.n_bytes);
+
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
if (rconn_is_connected(ofconn->rconn)) {
if (prev) {
queue_tx(ofpbuf_clone(buf), prev, prev->reply_counter);
- } else {
- buf = compose_flow_removed(rule, now, reason);
}
prev = ofconn;
}
}
if (prev) {
queue_tx(buf, prev, prev->reply_counter);
+ } else {
+ ofpbuf_delete(buf);
}
}
-}
-
-static void
-expire_rule(struct cls_rule *cls_rule, void *p_)
-{
- struct ofproto *p = p_;
- struct rule *rule = rule_from_cls_rule(cls_rule);
- long long int hard_expire, idle_expire, expire, now;
-
- hard_expire = (rule->hard_timeout
- ? rule->created + rule->hard_timeout * 1000
- : LLONG_MAX);
- idle_expire = (rule->idle_timeout
- && (rule->super || list_is_empty(&rule->list))
- ? rule->used + rule->idle_timeout * 1000
- : LLONG_MAX);
- expire = MIN(hard_expire, idle_expire);
-
- now = time_msec();
- if (now < expire) {
- if (rule->installed && now >= rule->used + 5000) {
- uninstall_idle_flow(p, rule);
- } else if (!rule->cr.wc.wildcards) {
- active_timeout(p, rule);
- }
-
- return;
- }
-
- COVERAGE_INC(ofproto_expired);
-
- /* Update stats. This code will be a no-op if the rule expired
- * due to an idle timeout. */
- if (rule->cr.wc.wildcards) {
- struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
- rule_remove(p, subrule);
- }
- } else {
- rule_uninstall(p, rule);
- }
-
- if (!rule_is_hidden(rule)) {
- send_flow_removed(p, rule,
- (now >= hard_expire
- ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
- }
- rule_remove(p, rule);
-}
-
-static void
-active_timeout(struct ofproto *ofproto, struct rule *rule)
-{
- if (ofproto->netflow && !is_controller_rule(rule) &&
- netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
- struct ofexpired expired;
- struct xflow_flow xflow_flow;
-
- /* Get updated flow stats. */
- memset(&xflow_flow, 0, sizeof xflow_flow);
- if (rule->installed) {
- xflow_key_from_flow(&xflow_flow.key, &rule->cr.flow);
- xflow_flow.flags = XFLOWFF_ZERO_TCP_FLAGS;
- xfif_flow_get(ofproto->xfif, &xflow_flow);
-
- if (xflow_flow.stats.n_packets) {
- update_time(ofproto, rule, &xflow_flow.stats);
- netflow_flow_update_flags(&rule->nf_flow, xflow_flow.stats.ip_tos,
- xflow_flow.stats.tcp_flags);
- }
- }
-
- expired.flow = rule->cr.flow;
- expired.packet_count = rule->packet_count +
- xflow_flow.stats.n_packets;
- expired.byte_count = rule->byte_count + xflow_flow.stats.n_bytes;
- expired.used = rule->used;
-
- netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
-
- /* Schedule us to send the accumulated records once we have
- * collected all of them. */
- poll_immediate_wake();
- }
-}
-
-static void
-update_used(struct ofproto *p)
-{
- struct xflow_flow *flows;
- size_t n_flows;
- size_t i;
- int error;
-
- error = xfif_flow_list_all(p->xfif, &flows, &n_flows);
- if (error) {
- return;
- }
-
- for (i = 0; i < n_flows; i++) {
- struct xflow_flow *f = &flows[i];
- struct rule *rule;
- flow_t flow;
-
- xflow_key_to_flow(&f->key, &flow);
-
- rule = rule_from_cls_rule(
- classifier_find_rule_exactly(&p->cls, &flow, 0, UINT16_MAX));
- if (!rule || !rule->installed) {
- COVERAGE_INC(ofproto_unexpected_rule);
- xfif_flow_del(p->xfif, f);
- continue;
- }
-
- update_time(p, rule, &f->stats);
- rule_account(p, rule, f->stats.n_bytes);
- }
- free(flows);
+ free(ofproto_rule);
}
static void
do_send_packet_in(struct ofconn *ofconn, uint32_t buffer_id,
- const struct ofpbuf *packet, int send_len)
+ const struct wdp_packet *packet, int send_len)
{
- struct xflow_msg *msg = packet->data;
- struct ofpbuf payload;
struct ofpbuf *opi;
uint8_t reason;
- /* Extract packet payload from 'msg'. */
- payload.data = msg + 1;
- payload.size = msg->length - sizeof *msg;
-
- /* Construct ofp_packet_in message. */
- reason = msg->type == _XFLOWL_ACTION_NR ? OFPR_ACTION : OFPR_NO_MATCH;
- opi = make_packet_in(buffer_id, xflow_port_to_ofp_port(msg->port), reason,
- &payload, send_len);
-
- /* Send. */
+ reason = packet->channel == WDP_CHAN_ACTION ? OFPR_ACTION : OFPR_NO_MATCH;
+ opi = make_packet_in(buffer_id, packet->in_port, reason,
+ packet->payload, send_len);
rconn_send_with_limit(ofconn->rconn, opi, ofconn->packet_in_counter, 100);
}
static void
-send_packet_in_action(struct ofpbuf *packet, void *p_)
+send_packet_in_action(struct wdp_packet *packet, void *p_)
{
struct ofproto *p = p_;
struct ofconn *ofconn;
- struct xflow_msg *msg;
- msg = packet->data;
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
if (ofconn == p->controller || ofconn->miss_send_len) {
- do_send_packet_in(ofconn, UINT32_MAX, packet, msg->arg);
+ do_send_packet_in(ofconn, UINT32_MAX, packet, packet->send_len);
}
}
- ofpbuf_delete(packet);
+ wdp_packet_destroy(packet);
}
static void
-send_packet_in_miss(struct ofpbuf *packet, void *p_)
+send_packet_in_miss(struct wdp_packet *packet, void *p_)
{
struct ofproto *p = p_;
bool in_fail_open = p->fail_open && fail_open_is_active(p->fail_open);
struct ofconn *ofconn;
- struct ofpbuf payload;
- struct xflow_msg *msg;
- msg = packet->data;
- payload.data = msg + 1;
- payload.size = msg->length - sizeof *msg;
LIST_FOR_EACH (ofconn, struct ofconn, node, &p->all_conns) {
if (ofconn->miss_send_len) {
struct pktbuf *pb = ofconn->pktbuf;
uint32_t buffer_id = (in_fail_open
? pktbuf_get_null()
- : pktbuf_save(pb, &payload, msg->port));
+ : pktbuf_save(pb, packet->payload,
+ packet->in_port));
int send_len = (buffer_id != UINT32_MAX ? ofconn->miss_send_len
: UINT32_MAX);
do_send_packet_in(ofconn, buffer_id, packet, send_len);
}
}
- ofpbuf_delete(packet);
+ wdp_packet_destroy(packet);
}
static uint64_t
pick_datapath_id(const struct ofproto *ofproto)
{
- const struct ofport *port;
+ struct wdp_port *port;
- port = port_array_get(&ofproto->ports, XFLOWP_LOCAL);
+ wdp_port_query_by_number(ofproto->wdp, OFPP_LOCAL, &port);
if (port) {
uint8_t ea[ETH_ADDR_LEN];
int error;
VLOG_WARN("could not get MAC address for %s (%s)",
netdev_get_name(port->netdev), strerror(error));
}
+
return ofproto->fallback_dpid;
}
eth_addr_nicira_random(ea);
return eth_addr_to_uint64(ea);
}
-\f
-static bool
-default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
- struct xflow_actions *actions, tag_type *tags,
- uint16_t *nf_output_iface, void *ofproto_)
-{
- struct ofproto *ofproto = ofproto_;
- int out_port;
-
- /* Drop frames for reserved multicast addresses. */
- if (eth_addr_is_reserved(flow->dl_dst)) {
- return true;
- }
-
- /* Learn source MAC (but don't try to learn from revalidation). */
- if (packet != NULL) {
- tag_type rev_tag = mac_learning_learn(ofproto->ml, flow->dl_src,
- 0, flow->in_port);
- if (rev_tag) {
- /* The log messages here could actually be useful in debugging,
- * so keep the rate limit relatively high. */
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
- VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
- ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
- ofproto_revalidate(ofproto, rev_tag);
- }
- }
-
- /* Determine output port. */
- out_port = mac_learning_lookup_tag(ofproto->ml, flow->dl_dst, 0, tags);
- if (out_port < 0) {
- add_output_group_action(actions, DP_GROUP_FLOOD, nf_output_iface);
- } else if (out_port != flow->in_port) {
- xflow_actions_add(actions, XFLOWAT_OUTPUT)->output.port = out_port;
- *nf_output_iface = out_port;
- } else {
- /* Drop. */
- }
-
- return true;
-}
-
-static const struct ofhooks default_ofhooks = {
- NULL,
- default_normal_ofhook_cb,
- NULL,
- NULL
-};
struct ofproto;
struct svec;
-enum {
- DP_GROUP_FLOOD = 0,
- DP_GROUP_ALL = 1
-};
-
struct ofexpired {
flow_t flow;
uint64_t packet_count; /* Packets from subrules. */
int ofproto_send_packet(struct ofproto *, const flow_t *,
const union ofp_action *, size_t n_actions,
const struct ofpbuf *);
-void ofproto_add_flow(struct ofproto *, const flow_t *, uint32_t wildcards,
- unsigned int priority,
+void ofproto_add_flow(struct ofproto *, const flow_t *,
const union ofp_action *, size_t n_actions,
int idle_timeout);
-void ofproto_delete_flow(struct ofproto *, const flow_t *, uint32_t wildcards,
- unsigned int priority);
+void ofproto_delete_flow(struct ofproto *, const flow_t *);
void ofproto_flush_flows(struct ofproto *);
/* Hooks for ovs-vswitchd. */
#include <arpa/inet.h>
#include <stdint.h>
#include <stdlib.h>
+#include "list.h"
#include "ofpbuf.h"
#include "openflow/openflow.h"
#include "poll-loop.h"
#include "port-array.h"
-#include "queue.h"
#include "random.h"
#include "rconn.h"
#include "status.h"
#include "timeval.h"
#include "vconn.h"
+#include "wdp.h"
+
+struct wdp_packet_queue {
+ struct list list;
+ int n;
+};
struct pinsched {
/* Client-supplied parameters. */
int burst_limit; /* Maximum token bucket size, in packets. */
/* One queue per physical port. */
- struct port_array queues; /* Array of "struct ovs_queue *". */
+ struct port_array queues; /* Array of "struct wdp_packet_queue". */
int n_queued; /* Sum over queues[*].n. */
unsigned int last_tx_port; /* Last port checked in round-robin. */
struct status_category *ss_cat;
};
-static struct ofpbuf *
-dequeue_packet(struct pinsched *ps, struct ovs_queue *q,
+static struct wdp_packet *
+dequeue_packet(struct pinsched *ps, struct wdp_packet_queue *q,
unsigned int port_no)
{
- struct ofpbuf *packet = queue_pop_head(q);
- if (!q->n) {
+ struct wdp_packet *packet;
+
+ packet = CONTAINER_OF(list_pop_front(&q->list), struct wdp_packet, list);
+ q->n--;
+ if (list_is_empty(&q->list)) {
free(q);
port_array_set(&ps->queues, port_no, NULL);
}
static void
drop_packet(struct pinsched *ps)
{
- struct ovs_queue *longest; /* Queue currently selected as longest. */
- int n_longest; /* # of queues of same length as 'longest'. */
+ struct wdp_packet_queue *longest;
+ int n_longest;
unsigned int longest_port_no;
unsigned int port_no;
- struct ovs_queue *q;
+ struct wdp_packet_queue *q;
ps->n_queue_dropped++;
}
/* FIXME: do we want to pop the tail instead? */
- ofpbuf_delete(dequeue_packet(ps, longest, longest_port_no));
+ wdp_packet_destroy(dequeue_packet(ps, longest, longest_port_no));
}
/* Remove and return the next packet to transmit (in round-robin order). */
-static struct ofpbuf *
+static struct wdp_packet *
get_tx_packet(struct pinsched *ps)
{
- struct ovs_queue *q = port_array_next(&ps->queues, &ps->last_tx_port);
+ struct wdp_packet_queue *q;
+
+ q = port_array_next(&ps->queues, &ps->last_tx_port);
if (!q) {
q = port_array_first(&ps->queues, &ps->last_tx_port);
}
void
pinsched_send(struct pinsched *ps, uint16_t port_no,
- struct ofpbuf *packet, pinsched_tx_cb *cb, void *aux)
+ struct wdp_packet *packet, pinsched_tx_cb *cb, void *aux)
{
if (!ps) {
cb(packet, aux);
cb(packet, aux);
} else {
/* Otherwise queue it up for the periodic callback to drain out. */
- struct ovs_queue *q;
+ struct wdp_packet_queue *q;
/* We are called with a buffer obtained from xfif_recv() that has much
* more allocated space than actual content most of the time. Since
* we're going to store the packet for some time, free up that
* otherwise wasted space. */
- ofpbuf_trim(packet);
+ ofpbuf_trim(packet->payload);
if (ps->n_queued >= ps->burst_limit) {
drop_packet(ps);
q = port_array_get(&ps->queues, port_no);
if (!q) {
q = xmalloc(sizeof *q);
- queue_init(q);
+ list_init(&q->list);
+ q->n = 0;
port_array_set(&ps->queues, port_no, q);
}
- queue_push_tail(q, packet);
+ list_push_back(&q->list, &packet->list);
+ q->n++;
ps->n_queued++;
ps->n_limited++;
}
/*
- * Copyright (c) 2008, 2009 Nicira Networks.
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
struct ofpbuf;
struct switch_status;
+struct wdp_packet;
-typedef void pinsched_tx_cb(struct ofpbuf *, void *aux);
+typedef void pinsched_tx_cb(struct wdp_packet *, void *aux);
struct pinsched *pinsched_create(int rate_limit, int burst_limit,
struct switch_status *);
void pinsched_set_limits(struct pinsched *, int rate_limit, int burst_limit);
void pinsched_destroy(struct pinsched *);
-void pinsched_send(struct pinsched *, uint16_t port_no, struct ofpbuf *,
+void pinsched_send(struct pinsched *, uint16_t port_no, struct wdp_packet *,
pinsched_tx_cb *, void *aux);
void pinsched_run(struct pinsched *, pinsched_tx_cb *, void *aux);
void pinsched_wait(struct pinsched *);
--- /dev/null
+/*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WDP_PROVIDER_H
+#define WDP_PROVIDER_H 1
+
+/* Provider interface to wdps, which provide an interface to an Open vSwitch
+ * datapath. */
+
+#include <assert.h>
+#include "util.h"
+#include "wdp.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline struct wdp_rule *
+wdp_rule_cast(const struct cls_rule *cls_rule)
+{
+ return cls_rule ? CONTAINER_OF(cls_rule, struct wdp_rule, cr) : NULL;
+}
+
+/* Open vSwitch datapath interface.
+ *
+ * This structure should be treated as opaque by wdp implementations. */
+struct wdp {
+ const struct wdp_class *wdp_class;
+ char *base_name;
+ char *full_name;
+ uint8_t netflow_engine_type;
+ uint8_t netflow_engine_id;
+};
+
+void wdp_init(struct wdp *, const struct wdp_class *, const char *name,
+ uint8_t netflow_engine_type, uint8_t netflow_engine_id);
+void wdp_uninit(struct wdp *wdp, bool close);
+
+static inline void wdp_assert_class(const struct wdp *wdp,
+ const struct wdp_class *wdp_class)
+{
+ assert(wdp->wdp_class == wdp_class);
+}
+
+/* Datapath interface class structure, to be defined by each implementation of
+ * a datapath interface.
+ *
+ * These functions return 0 if successful or a positive errno value on failure,
+ * except where otherwise noted.
+ *
+ * These functions are expected to execute synchronously, that is, to block as
+ * necessary to obtain a result. Thus, they may not return EAGAIN or
+ * EWOULDBLOCK or EINPROGRESS. We may relax this requirement in the future if
+ * and when we encounter performance problems. */
+struct wdp_class {
+ /* Type of wdp in this class, e.g. "system", "netdev", etc.
+ *
+ * One of the providers should supply a "system" type, since this is
+ * the type assumed if no type is specified when opening a wdp. */
+ const char *type;
+
+ /* Performs periodic work needed by wdps of this class, if any is
+ * necessary. */
+ void (*run)(void);
+
+ /* Arranges for poll_block() to wake up if the "run" member function needs
+ * to be called. */
+ void (*wait)(void);
+
+ /* Enumerates the names of all known created datapaths for 'wdp_class',
+ * if possible, into 'all_wdps'. The caller has already initialized
+ * 'all_wdps' and other wdp classes might already have added names to it.
+ *
+ * This is used by the vswitch at startup, so that it can delete any
+ * datapaths that are not configured.
+ *
+ * Some kinds of datapaths might not be practically enumerable, in which
+ * case this function may be a null pointer. */
+ int (*enumerate)(const struct wdp_class *wdp_class,
+ struct svec *all_wdps);
+
+ /* Attempts to open an existing wdp of class 'wdp_class' called 'name',
+ * if 'create' is false, or to open an existing wdp or create a new one,
+ * if 'create' is true.
+ *
+ * If successful, stores a pointer to the new wdp in '*wdpp'. On
+ * failure there are no requirements on what is stored in '*wdpp'. */
+ int (*open)(const struct wdp_class *wdp_class, const char *name,
+ bool create, struct wdp **wdpp);
+
+ /* Closes 'wdp' and frees associated memory. */
+ void (*close)(struct wdp *wdp);
+
+ /* Enumerates all names that may be used to open 'wdp' into 'all_names'.
+ * The Linux datapath, for example, supports opening a datapath both by
+ * number, e.g. "wdp0", and by the name of the datapath's local port. For
+ * some datapaths, this might be an infinite set (e.g. in a file name,
+ * slashes may be duplicated any number of times), in which case only the
+ * names most likely to be used should be enumerated.
+ *
+ * The caller has already initialized 'all_names' and might already have
+ * added some names to it. This function should not disturb any existing
+ * names in 'all_names'.
+ *
+ * If a datapath class does not support multiple names for a datapath, this
+ * function may be a null pointer.
+ *
+ * This is used by the vswitch at startup, */
+ int (*get_all_names)(const struct wdp *wdp, struct svec *all_names);
+
+ /* Attempts to destroy the wdp underlying 'wdp'.
+ *
+ * If successful, 'wdp' will not be used again except as an argument for
+ * the 'close' member function. */
+ int (*destroy)(struct wdp *wdp);
+
+ /* Creates a "struct ofp_switch_features" for 'wdp' and stores it in
+ * '*featuresp'. The caller is responsible for freeing '*featuresp' (with
+ * ofpbuf_delete()) when it is no longer needed. */
+ int (*get_features)(const struct wdp *wdp, struct ofpbuf **featuresp);
+
+ /* Retrieves statistics for 'wdp' into 'stats'. */
+ int (*get_stats)(const struct wdp *wdp, struct wdp_stats *stats);
+
+ /* Retrieves 'wdp''s current treatment of IP fragments into '*drop_frags':
+ * true indicates that fragments are dropped, false indicates that
+ * fragments are treated in the same way as other IP packets (except that
+ * the L4 header cannot be read). */
+ int (*get_drop_frags)(const struct wdp *wdp, bool *drop_frags);
+
+ /* Changes 'wdp''s treatment of IP fragments to 'drop_frags', whose
+ * meaning is the same as for the get_drop_frags member function. */
+ int (*set_drop_frags)(struct wdp *wdp, bool drop_frags);
+
+ /* Creates a new port in 'wdp' connected to network device 'devname'. If
+ * 'internal' is true, creates the port as an internal port. If
+ * successful, sets '*port_no' to the new port's port number. */
+ int (*port_add)(struct wdp *wdp, const char *devname,
+ bool internal, uint16_t *port_no);
+
+ /* Removes port numbered 'port_no' from 'wdp'. */
+ int (*port_del)(struct wdp *wdp, uint16_t port_no);
+
+ /* Looks up a port in 'wdp' by name or number. On success, returns 0 and
+ * points '*portp' to a wdp_port representing the specified port. On
+ * failure, returns a positive errno value and sets '*portp' to NULL.
+ *
+ * The caller is not allowed to modify or free the returned wdp_port. The
+ * wdp_port must remain accessible until the next call to the 'run' member
+ * function for this class or or wdp_port_poll() for 'wdp'. */
+ int (*port_query_by_number)(const struct wdp *wdp, uint16_t port_no,
+ struct wdp_port **portp);
+ int (*port_query_by_name)(const struct wdp *wdp, const char *devname,
+ struct wdp_port **portp);
+
+ /* Obtains a list of all the ports in 'wdp'. Sets '*portsp' to point to
+ * an array of pointers to port structures and '*n_portsp' to the number of
+ * pointers in the array.
+ *
+ * The caller is responsible for freeing '*portsp' by calling free(). The
+ * calleris not allowed to modify or free the individual wdp_port
+ * structures. The wdp_ports must remain accessible until the next call to
+ * the 'run' member function for this class or or wdp_port_poll() for
+ * 'wdp'. */
+ int (*port_list)(const struct wdp *wdp, struct wdp_port ***portsp,
+ size_t *n_portsp);
+
+ int (*port_set_config)(struct wdp *sdpif, uint16_t port_no,
+ uint32_t config);
+
+ /* Polls for changes in the set of ports in 'wdp'. If the set of ports
+ * in 'wdp' has changed, then this function should do one of the
+ * following:
+ *
+ * - Preferably: store the name of the device that was added to or deleted
+ * from 'wdp' in '*devnamep' and return 0. The caller is responsible
+ * for freeing '*devnamep' (with free()) when it no longer needs it.
+ *
+ * - Alternatively: return ENOBUFS, without indicating the device that was
+ * added or deleted.
+ *
+ * Occasional 'false positives', in which the function returns 0 while
+ * indicating a device that was not actually added or deleted or returns
+ * ENOBUFS without any change, are acceptable.
+ *
+ * If the set of ports in 'wdp' has not changed, returns EAGAIN. May
+ * also return other positive errno values to indicate that something has
+ * gone wrong. */
+ int (*port_poll)(const struct wdp *wdp, char **devnamep);
+
+ /* Arranges for the poll loop to wake up when 'port_poll' will return a
+ * value other than EAGAIN. */
+ void (*port_poll_wait)(const struct wdp *wdp);
+
+ /* If 'wdp' contains a flow exactly equal to 'flow', returns that flow.
+ * Otherwise returns null. */
+ struct wdp_rule *(*flow_get)(const struct wdp *wdp,
+ const flow_t *flow);
+
+ /* If 'wdp' contains one or more flows that match 'flow', returns the
+ * highest-priority matching flow. If there is more than one
+ * highest-priority match, picks one of them in an arbitrary fashion.
+ * Otherwise returns null.
+ *
+ * Ignores 'flow->priority' and 'flow->wildcards'. */
+ struct wdp_rule *(*flow_match)(const struct wdp *wdp,
+ const flow_t *flow);
+
+ /* Iterates through all of the flows in 'wdp''s flow table, passing each
+ * flow that matches the specified search criteria to 'callback' along with
+ * 'aux'.
+ *
+ * Flows are filtered out in two ways. First, based on 'include':
+ * Exact-match flows are excluded unless CLS_INC_EXACT is in 'include'.
+ * Wildcarded flows are excluded unless CLS_INC_WILD is in 'include'.
+ *
+ * Flows are also filtered out based on 'target': on a field-by-field
+ * basis, a flow is included if 'target' wildcards that field or if the
+ * flow and 'target' both have the same exact value for the field. A flow
+ * is excluded if any field does not match based on these criteria.
+ *
+ * Ignores 'target->priority'.
+ *
+ * 'callback' is allowed to delete the rule that is passed as its argument.
+ * It may modify any flow in 'wdp', e.g. changing their actions.
+ * 'callback' must not delete flows from 'wdp' other than its argument
+ * flow, nor may it insert new flows into 'wdp'. */
+ void (*flow_for_each_match)(const struct wdp *wdp, const flow_t *flow,
+ int include,
+ wdp_flow_cb_func *callback, void *aux);
+
+ /* Retrieves flow statistics for 'rule', which must be in 'wdp''s flow
+ * table, and stores them into '*stats'. Returns 0 if successful,
+ * otherwise a positive errno value. */
+ int (*flow_get_stats)(const struct wdp *wdp,
+ const struct wdp_rule *rule,
+ struct wdp_flow_stats *stats);
+
+ /* Searches 'wdp''s flow table for a flow that overlaps 'flow'. Two flow
+ * entries overlap if they have the same priority and a single packet may
+ * match both.
+ *
+ * This is intended for implementing OpenFlow's OFPFF_CHECK_OVERLAP
+ * feature. */
+ bool (*flow_overlaps)(const struct wdp *wdp, const flow_t *flow);
+
+ /* Adds or modifies a flow in 'wdp' as specified in 'put':
+ *
+ * - If a rule with the same priority, wildcards, and values for fields
+ * that are not wildcarded specified in 'put->flow' does not already
+ * exist in 'wdp', then behavior depends on whether WDP_PUT_CREATE is
+ * specified in 'put->flags': if it is, the flow will be added,
+ * otherwise the operation will fail with ENOENT.
+ *
+ * The new flow's actions and timeouts are set from the values in
+ * 'put'.
+ *
+ * - Otherwise, the flow specified in 'put->flow' does exist in 'wdp'.
+ * Behavior in this case depends on whether WDP_PUT_MODIFY is specified
+ * in 'put->flags': if it is, the flow will be updated, otherwise the
+ * operation will fail with EEXIST. The exact updates depend on the
+ * remaining flags in 'put->flags':
+ *
+ * . If WDP_PUT_COUNTERS is set, packet counters, byte counters, TCP
+ * flags, and IP TOS values are set to 0.
+ *
+ * . If WDP_PUT_ACTIONS is set, the actions are replaced by the
+ * 'put->n_actions' actions in 'put->actions'.
+ *
+ * . If WDP_PUT_INSERTED is set, the flow's insertion time is updated
+ * to the current time. (Timeouts are relative to a flow's
+ * insertion time so this affects their interpretation.)
+ *
+ * . If WDP_PUT_TIMEOUTS is set, the flow's idle and hard timeouts
+ * are updated from 'put->idle_timeout' and 'put->hard_timeout',
+ * respectively.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. If
+ * successful:
+ *
+ * - If 'old_stats' is nonnull, then 'old_stats' is filled with the
+ * flow's stats as they existed just before the update, or it is zeroed
+ * if the flow is newly created.
+ *
+ * - If 'rulep' is nonnull, then it is set to the newly created rule.
+ *
+ * Some error return values have specific meanings:
+ *
+ * - ENOENT: Flow does not exist and WDP_PUT_CREATE not specified.
+ *
+ * - EEXIST: Flow exists and WDP_PUT_MODIFY not specified.
+ *
+ * - ENOBUFS: Flow table full.
+ *
+ * - EINVAL: Flow table cannot accept flow of this form.
+ */
+ int (*flow_put)(struct wdp *wdp, const struct wdp_flow_put *put,
+ struct wdp_flow_stats *old_stats,
+ struct wdp_rule **rulep);
+
+ /* Deletes 'rule' from 'wdp'. Returns 0 if successful, otherwise a
+ * positive errno value.
+ *
+ * If successful and 'final_stats' is non-null, stores the flow's
+ * statistics just before it is deleted into '*final_stats'. */
+ int (*flow_delete)(struct wdp *wdp, struct wdp_rule *rule,
+ struct wdp_flow_stats *final_stats);
+
+ /* Deletes all flows from 'wdp' and clears all of its queues of received
+ * packets. */
+ int (*flow_flush)(struct wdp *wdp);
+
+ /* Performs the actions for 'rule' on the Ethernet frame specified in
+ * 'packet'. Pretends that the frame was originally received on the port
+ * numbered 'in_port'. Packets and bytes sent should be credited to
+ * 'rule'. */
+ int (*flow_inject)(struct wdp *wdp, struct wdp_rule *rule,
+ uint16_t in_port, const struct ofpbuf *packet);
+
+ /* Performs the 'n_actions' actions in 'actions' on the Ethernet frame
+ * specified in 'packet'. Pretends that the frame was originally received
+ * on the port numbered 'in_port'. */
+ int (*execute)(struct wdp *wdp, uint16_t in_port,
+ const union ofp_action actions[], int n_actions,
+ const struct ofpbuf *packet);
+
+ /* Retrieves 'wdp''s "listen mask" into '*listen_mask'. Each bit set in
+ * '*listen_mask' indicates the 'wdp' will receive messages of the
+ * corresponding WDP_CHAN_* when it calls the recv member function. */
+ int (*recv_get_mask)(const struct wdp *wdp, int *listen_mask);
+
+ /* Sets 'wdp''s "listen mask" to 'listen_mask'. Each bit set in
+ * 'listen_mask' indicates the 'wdp' will receive messages of the
+ * corresponding WDP_CHAN_* type when it calls the recv member function. */
+ int (*recv_set_mask)(struct wdp *wdp, int listen_mask);
+
+ /* Retrieves 'wdp''s sFlow sampling probability into '*probability'.
+ * Return value is 0 or a positive errno value. EOPNOTSUPP indicates that
+ * the datapath does not support sFlow, as does a null pointer.
+ *
+ * '*probability' is expressed as the number of packets out of UINT_MAX to
+ * sample, e.g. probability/UINT_MAX is the probability of sampling a given
+ * packet. */
+ int (*get_sflow_probability)(const struct wdp *wdp,
+ uint32_t *probability);
+
+ /* Sets 'wdp''s sFlow sampling probability to 'probability'. Return value
+ * is 0 or a positive errno value. EOPNOTSUPP indicates that the datapath
+ * does not support sFlow, as does a null pointer.
+ *
+ * 'probability' is expressed as the number of packets out of UINT_MAX to
+ * sample, e.g. probability/UINT_MAX is the probability of sampling a given
+ * packet. */
+ int (*set_sflow_probability)(struct wdp *wdp, uint32_t probability);
+
+ /* Attempts to receive a message from 'wdp'. If successful, stores the
+ * message into '*packet'. Only messages of the types selected with the
+ * recv_set_mask member function should be received.
+ *
+ * This function must not block. If no message is ready to be received
+ * when it is called, it should return EAGAIN without blocking. */
+ int (*recv)(struct wdp *wdp, struct wdp_packet *packet);
+
+ /* Arranges for the poll loop to wake up when 'wdp' has a message queued
+ * to be received with the recv member function. */
+ void (*recv_wait)(struct wdp *wdp);
+};
+
+extern const struct wdp_class wdp_linux_class;
+extern const struct wdp_class wdp_netdev_class;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* wdp-provider.h */
--- /dev/null
+/*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "wdp-xflow.h"
+
+#include <errno.h>
+#include <inttypes.h>
+
+#include "coverage.h"
+#include "dhcp.h"
+#include "netdev.h"
+#include "netflow.h"
+#include "ofpbuf.h"
+#include "openflow/nicira-ext.h"
+#include "openflow/openflow.h"
+#include "packets.h"
+#include "poll-loop.h"
+#include "port-array.h"
+#include "shash.h"
+#include "stp.h"
+#include "svec.h"
+#include "timeval.h"
+#include "util.h"
+#include "vconn.h"
+#include "wdp-provider.h"
+#include "xfif.h"
+#include "xflow-util.h"
+#include "xtoxll.h"
+
+#define THIS_MODULE VLM_wdp_xflow
+#include "vlog.h"
+
+static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+\f
+/* Maximum numbers of rules. */
+#define WX_MAX_WILD 65536 /* Wildcarded rules. */
+#define WX_MAX_EXACT 1048576 /* Exact-match rules. */
+
+struct wx {
+ struct list list_node;
+ struct wdp wdp;
+ struct xfif *xfif;
+ struct classifier cls;
+ struct netdev_monitor *netdev_monitor;
+ struct port_array ports; /* Index is ODP port nr; wdp_port->opp.port_no
+ * is OFP port nr. */
+ struct shash port_by_name;
+ bool need_revalidate;
+ long long int next_expiration;
+};
+
+static struct list all_wx = LIST_INITIALIZER(&all_wx);
+
+static int wx_port_init(struct wx *);
+static void wx_port_run(struct wx *);
+static void wx_port_refresh_groups(struct wx *);
+
+enum {
+ WX_GROUP_FLOOD = 0,
+ WX_GROUP_ALL = 1
+};
+
+static struct wx *
+wx_cast(const struct wdp *wdp)
+{
+ return CONTAINER_OF(wdp, struct wx, wdp);
+}
+
+static int
+wx_xlate_actions(struct wx *, const union ofp_action *, size_t n,
+ const flow_t *flow, const struct ofpbuf *packet,
+ struct xflow_actions *out, bool *may_set_up_flow);
+\f
+struct wx_rule {
+ struct wdp_rule wr;
+
+ uint64_t packet_count; /* Number of packets received. */
+ uint64_t byte_count; /* Number of bytes received. */
+ uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
+ long long int used; /* Last-used time (0 if never used). */
+
+ /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
+ * exact-match rule (having cr.wc.wildcards of 0) generated from the
+ * wildcard rule 'super'. In this case, 'list' is an element of the
+ * super-rule's list.
+ *
+ * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
+ * a list of subrules. A super-rule with no wildcards (where
+ * cr.wc.wildcards is 0) will never have any subrules. */
+ struct wx_rule *super;
+ struct list list;
+
+ /* Datapath actions.
+ *
+ * A super-rule with wildcard fields never has XFLOW actions (since the
+ * datapath only supports exact-match flows). */
+ bool installed; /* Installed in datapath? */
+ bool may_install; /* True ordinarily; false if actions must
+ * be reassessed for every packet. */
+ int n_xflow_actions;
+ union xflow_action *xflow_actions;
+};
+
+static void wx_rule_destroy(struct wx *, struct wx_rule *);
+static void wx_rule_update_actions(struct wx *, struct wx_rule *);
+static void wx_rule_execute(struct wx *, struct wx_rule *,
+ struct ofpbuf *packet, const flow_t *);
+static bool wx_rule_make_actions(struct wx *, struct wx_rule *,
+ const struct ofpbuf *packet);
+static void wx_rule_install(struct wx *, struct wx_rule *,
+ struct wx_rule *displaced_rule);
+
+static struct wx_rule *
+wx_rule_cast(const struct cls_rule *cls_rule)
+{
+ return cls_rule ? CONTAINER_OF(cls_rule, struct wx_rule, wr.cr) : NULL;
+}
+
+/* Returns true if 'rule' is merely an implementation detail that should be
+ * hidden from the client. */
+static inline bool
+wx_rule_is_hidden(const struct wx_rule *rule)
+{
+ return rule->super != NULL;
+}
+
+static void
+wx_rule_free(struct wx_rule *rule)
+{
+ wdp_rule_uninit(&rule->wr);
+ free(rule->xflow_actions);
+ free(rule);
+}
+
+static void
+wx_rule_account(struct wx *wx OVS_UNUSED, struct wx_rule *rule OVS_UNUSED,
+ uint64_t extra_bytes OVS_UNUSED)
+{
+ /* XXX call account_cb hook */
+}
+
+static void
+wx_rule_post_uninstall(struct wx *wx, struct wx_rule *rule)
+{
+ struct wx_rule *super = rule->super;
+
+ wx_rule_account(wx, rule, 0);
+
+ /* XXX netflow expiration */
+
+ if (super) {
+ super->packet_count += rule->packet_count;
+ super->byte_count += rule->byte_count;
+
+ /* Reset counters to prevent double counting if the rule ever gets
+ * reinstalled. */
+ rule->packet_count = 0;
+ rule->byte_count = 0;
+ rule->accounted_bytes = 0;
+
+ //XXX netflow_flow_clear(&rule->nf_flow);
+ }
+}
+
+static long long int
+xflow_flow_stats_to_msec(const struct xflow_flow_stats *stats)
+{
+ return (stats->used_sec
+ ? stats->used_sec * 1000 + stats->used_nsec / 1000000
+ : 0);
+}
+
+static void
+wx_rule_update_time(struct wx *wx OVS_UNUSED, struct wx_rule *rule,
+ const struct xflow_flow_stats *stats)
+{
+ long long int used = xflow_flow_stats_to_msec(stats);
+ if (used > rule->used) {
+ rule->used = used;
+ if (rule->super && used > rule->super->used) {
+ rule->super->used = used;
+ }
+ //XXX netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
+ }
+}
+
+static void
+wx_rule_update_stats(struct wx *wx, struct wx_rule *rule,
+ const struct xflow_flow_stats *stats)
+{
+ if (stats->n_packets) {
+ wx_rule_update_time(wx, rule, stats);
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
+ /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
+ stats->tcp_flags); */
+ }
+}
+
+static void
+wx_rule_uninstall(struct wx *wx, struct wx_rule *rule)
+{
+ assert(!rule->wr.cr.flow.wildcards);
+ if (rule->installed) {
+ struct xflow_flow xflow_flow;
+
+ xflow_key_from_flow(&xflow_flow.key, &rule->wr.cr.flow);
+ xflow_flow.actions = NULL;
+ xflow_flow.n_actions = 0;
+ xflow_flow.flags = 0;
+ if (!xfif_flow_del(wx->xfif, &xflow_flow)) {
+ wx_rule_update_stats(wx, rule, &xflow_flow.stats);
+ }
+ rule->installed = false;
+
+ wx_rule_post_uninstall(wx, rule);
+ }
+}
+
+#if 0
+static bool
+is_controller_rule(struct wx_rule *rule)
+{
+ /* If the only action is send to the controller then don't report
+ * NetFlow expiration messages since it is just part of the control
+ * logic for the network and not real traffic. */
+
+ if (rule && rule->super) {
+ struct wdp_rule *super = &rule->super->wr;
+
+ return super->n_actions == 1 &&
+ super->actions[0].type == htons(OFPAT_OUTPUT) &&
+ super->actions[0].output.port == htons(OFPP_CONTROLLER);
+ }
+
+ return false;
+}
+#endif
+
+static void
+wx_rule_remove(struct wx *wx, struct wx_rule *rule)
+{
+ if (rule->wr.cr.flow.wildcards) {
+ COVERAGE_INC(wx_del_wc_flow);
+ wx->need_revalidate = true;
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+ classifier_remove(&wx->cls, &rule->wr.cr);
+ wx_rule_destroy(wx, rule);
+}
+
+static bool
+wx_rule_revalidate(struct wx *wx, struct wx_rule *rule)
+{
+ const flow_t *flow = &rule->wr.cr.flow;
+
+ COVERAGE_INC(wx_rule_revalidate);
+ if (rule->super) {
+ struct wx_rule *super;
+ super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
+ if (!super) {
+ wx_rule_remove(wx, rule);
+ return false;
+ } else if (super != rule->super) {
+ COVERAGE_INC(wx_revalidate_moved);
+ list_remove(&rule->list);
+ list_push_back(&super->list, &rule->list);
+ rule->super = super;
+ rule->wr.hard_timeout = super->wr.hard_timeout;
+ rule->wr.idle_timeout = super->wr.idle_timeout;
+ rule->wr.created = super->wr.created;
+ rule->used = 0;
+ }
+ }
+
+ wx_rule_update_actions(wx, rule);
+ return true;
+}
+
+/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
+ * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
+ * through all of its subrules and revalidates them, destroying any that no
+ * longer has a super-rule (which is probably all of them).
+ *
+ * Before calling this function, the caller must make have removed 'rule' from
+ * the classifier. If 'rule' is an exact-match rule, the caller is also
+ * responsible for ensuring that it has been uninstalled from the datapath. */
+static void
+wx_rule_destroy(struct wx *wx, struct wx_rule *rule)
+{
+ if (!rule->super) {
+ struct wx_rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) {
+ wx_rule_revalidate(wx, subrule);
+ }
+ } else {
+ list_remove(&rule->list);
+ }
+ wx_rule_free(rule);
+}
+
+#if 0
+static bool
+wx_rule_has_out_port(const struct wx_rule *rule, uint16_t out_port)
+{
+ const union ofp_action *oa;
+ struct actions_iterator i;
+
+ if (out_port == htons(OFPP_NONE)) {
+ return true;
+ }
+ for (oa = actions_first(&i, rule->wr.actions,
+ rule->wr.n_actions);
+ oa;
+ oa = actions_next(&i)) {
+ if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
+ return true;
+ }
+ }
+ return false;
+}
+#endif
+
+/* Caller is responsible for initializing the 'cr' member of the returned
+ * rule. */
+static struct wx_rule *
+wx_rule_create(struct wx_rule *super,
+ const union ofp_action *actions, size_t n_actions,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+{
+ struct wx_rule *rule = xzalloc(sizeof *rule);
+ wdp_rule_init(&rule->wr, actions, n_actions);
+ rule->wr.idle_timeout = idle_timeout;
+ rule->wr.hard_timeout = hard_timeout;
+ rule->used = rule->wr.created;
+ rule->super = super;
+ if (super) {
+ list_push_back(&super->list, &rule->list);
+ } else {
+ list_init(&rule->list);
+ }
+#if 0
+ netflow_flow_clear(&rule->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
+#endif
+
+ return rule;
+}
+
+/* Executes the actions indicated by 'rule' on 'packet', which is in flow
+ * 'flow' and is considered to have arrived on XFLOW port 'in_port'.
+ *
+ * The flow that 'packet' actually contains does not need to actually match
+ * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
+ * the packet and byte counters for 'rule' will be credited for the packet sent
+ * out whether or not the packet actually matches 'rule'.
+ *
+ * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
+ * the caller must already have accurately composed XFLOW actions for it given
+ * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
+ * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
+ * function will compose a set of XFLOW actions based on 'rule''s OpenFlow
+ * actions and apply them to 'packet'. */
+static void
+wx_rule_execute(struct wx *wx, struct wx_rule *rule,
+ struct ofpbuf *packet, const flow_t *flow)
+{
+ const union xflow_action *actions;
+ size_t n_actions;
+ struct xflow_actions a;
+
+ /* Grab or compose the XFLOW actions.
+ *
+ * The special case for an exact-match 'rule' where 'flow' is not the
+ * rule's flow is important to avoid, e.g., sending a packet out its input
+ * port simply because the XFLOW actions were composed for the wrong
+ * scenario. */
+ if (rule->wr.cr.flow.wildcards
+ || !flow_equal(flow, &rule->wr.cr.flow))
+ {
+ struct wx_rule *super = rule->super ? rule->super : rule;
+ if (wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, flow,
+ packet, &a, NULL)) {
+ return;
+ }
+ actions = a.actions;
+ n_actions = a.n_actions;
+ } else {
+ actions = rule->xflow_actions;
+ n_actions = rule->n_xflow_actions;
+ }
+
+ /* Execute the XFLOW actions. */
+ if (!xfif_execute(wx->xfif, flow->in_port,
+ actions, n_actions, packet)) {
+ struct xflow_flow_stats stats;
+ flow_extract_stats(flow, packet, &stats);
+ wx_rule_update_stats(wx, rule, &stats);
+ rule->used = time_msec();
+ //XXX netflow_flow_update_time(wx->netflow, &rule->nf_flow, rule->used);
+ }
+}
+
+static void
+wx_rule_insert(struct wx *wx, struct wx_rule *rule, struct ofpbuf *packet,
+ uint16_t in_port)
+{
+ struct wx_rule *displaced_rule;
+
+ /* Insert the rule in the classifier. */
+ displaced_rule = wx_rule_cast(classifier_insert(&wx->cls, &rule->wr.cr));
+ if (!rule->wr.cr.flow.wildcards) {
+ wx_rule_make_actions(wx, rule, packet);
+ }
+
+ /* Send the packet and credit it to the rule. */
+ if (packet) {
+ flow_t flow;
+ flow_extract(packet, in_port, &flow);
+ wx_rule_execute(wx, rule, packet, &flow);
+ }
+
+ /* Install the rule in the datapath only after sending the packet, to
+ * avoid packet reordering. */
+ if (rule->wr.cr.flow.wildcards) {
+ COVERAGE_INC(wx_add_wc_flow);
+ wx->need_revalidate = true;
+ } else {
+ wx_rule_install(wx, rule, displaced_rule);
+ }
+
+ /* Free the rule that was displaced, if any. */
+ if (displaced_rule) {
+ wx_rule_destroy(wx, displaced_rule);
+ }
+}
+
+static struct wx_rule *
+wx_rule_create_subrule(struct wx *wx, struct wx_rule *rule, const flow_t *flow)
+{
+ struct wx_rule *subrule;
+
+ subrule = wx_rule_create(rule, NULL, 0,
+ rule->wr.idle_timeout,
+ rule->wr.hard_timeout);
+ COVERAGE_INC(wx_subrule_create);
+ cls_rule_from_flow(&subrule->wr.cr, flow);
+ classifier_insert_exact(&wx->cls, &subrule->wr.cr);
+
+ return subrule;
+}
+
+/* Returns true if the actions changed, false otherwise. */
+static bool
+wx_rule_make_actions(struct wx *wx, struct wx_rule *rule,
+ const struct ofpbuf *packet)
+{
+ const struct wx_rule *super;
+ struct xflow_actions a;
+ size_t actions_len;
+
+ assert(!rule->wr.cr.flow.wildcards);
+
+ super = rule->super ? rule->super : rule;
+ wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions,
+ &rule->wr.cr.flow, packet, &a, &rule->may_install);
+
+ actions_len = a.n_actions * sizeof *a.actions;
+ if (rule->n_xflow_actions != a.n_actions
+ || memcmp(rule->xflow_actions, a.actions, actions_len)) {
+ COVERAGE_INC(wx_xflow_unchanged);
+ free(rule->xflow_actions);
+ rule->n_xflow_actions = a.n_actions;
+ rule->xflow_actions = xmemdup(a.actions, actions_len);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int
+do_put_flow(struct wx *wx, struct wx_rule *rule, int flags,
+ struct xflow_flow_put *put)
+{
+ memset(&put->flow.stats, 0, sizeof put->flow.stats);
+ xflow_key_from_flow(&put->flow.key, &rule->wr.cr.flow);
+ put->flow.actions = rule->xflow_actions;
+ put->flow.n_actions = rule->n_xflow_actions;
+ put->flow.flags = 0;
+ put->flags = flags;
+ return xfif_flow_put(wx->xfif, put);
+}
+
+static void
+wx_rule_install(struct wx *wx, struct wx_rule *rule, struct wx_rule *displaced_rule)
+{
+ assert(!rule->wr.cr.flow.wildcards);
+
+ if (rule->may_install) {
+ struct xflow_flow_put put;
+ if (!do_put_flow(wx, rule,
+ XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS,
+ &put)) {
+ rule->installed = true;
+ if (displaced_rule) {
+ wx_rule_update_stats(wx, displaced_rule, &put.flow.stats);
+ wx_rule_post_uninstall(wx, displaced_rule);
+ }
+ }
+ } else if (displaced_rule) {
+ wx_rule_uninstall(wx, displaced_rule);
+ }
+}
+
+static void
+wx_rule_reinstall(struct wx *wx, struct wx_rule *rule)
+{
+ if (rule->installed) {
+ struct xflow_flow_put put;
+ COVERAGE_INC(wx_dp_missed);
+ do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put);
+ } else {
+ wx_rule_install(wx, rule, NULL);
+ }
+}
+
+static void
+wx_rule_update_actions(struct wx *wx, struct wx_rule *rule)
+{
+ bool actions_changed;
+#if 0
+ uint16_t new_out_iface, old_out_iface;
+
+ old_out_iface = rule->nf_flow.output_iface;
+#endif
+ actions_changed = wx_rule_make_actions(wx, rule, NULL);
+
+ if (rule->may_install) {
+ if (rule->installed) {
+ if (actions_changed) {
+ struct xflow_flow_put put;
+ do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY
+ | XFLOWPF_ZERO_STATS, &put);
+ wx_rule_update_stats(wx, rule, &put.flow.stats);
+#if 0
+ /* Temporarily set the old output iface so that NetFlow
+ * messages have the correct output interface for the old
+ * stats. */
+ new_out_iface = rule->nf_flow.output_iface;
+ rule->nf_flow.output_iface = old_out_iface;
+#endif
+ wx_rule_post_uninstall(wx, rule);
+ //rule->nf_flow.output_iface = new_out_iface;
+ }
+ } else {
+ wx_rule_install(wx, rule, NULL);
+ }
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+}
+\f
+static void
+add_output_group_action(struct xflow_actions *actions, uint16_t group,
+ uint16_t *nf_output_iface)
+{
+ xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group;
+
+ if (group == WX_GROUP_ALL || group == WX_GROUP_FLOOD) {
+ *nf_output_iface = NF_OUT_FLOOD;
+ }
+}
+
+static void
+add_controller_action(struct xflow_actions *actions,
+ const struct ofp_action_output *oao)
+{
+ union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER);
+ a->controller.arg = oao->max_len ? ntohs(oao->max_len) : UINT32_MAX;
+}
+
+struct wx_xlate_ctx {
+ /* Input. */
+ const flow_t *flow; /* Flow to which these actions correspond. */
+ int recurse; /* Recursion level, via xlate_table_action. */
+ struct wx *wx;
+ const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
+ * null pointer if we are revalidating
+ * without a packet to refer to. */
+
+ /* Output. */
+ struct xflow_actions *out; /* Datapath actions. */
+ //tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
+ bool may_set_up_flow; /* True ordinarily; false if the actions must
+ * be reassessed for every packet. */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
+};
+
+static void do_xlate_actions(const union ofp_action *in, size_t n_in,
+ struct wx_xlate_ctx *ctx);
+
+static void
+add_output_action(struct wx_xlate_ctx *ctx, uint16_t port)
+{
+ const struct wdp_port *wdp_port = port_array_get(&ctx->wx->ports, port);
+
+ if (wdp_port) {
+ if (wdp_port->opp.config & OFPPC_NO_FWD) {
+ /* Forwarding disabled on port. */
+ return;
+ }
+ } else {
+ /*
+ * We don't have an ofport record for this port, but it doesn't hurt to
+ * allow forwarding to it anyhow. Maybe such a port will appear later
+ * and we're pre-populating the flow table.
+ */
+ }
+
+ xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port;
+ //ctx->nf_output_iface = port;
+}
+
+static struct wx_rule *
+wx_rule_lookup_valid(struct wx *wx, const flow_t *flow)
+{
+ struct wx_rule *rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
+
+ /* The rule we found might not be valid, since we could be in need of
+ * revalidation. If it is not valid, don't return it. */
+ if (rule
+ && rule->super
+ && wx->need_revalidate
+ && !wx_rule_revalidate(wx, rule)) {
+ COVERAGE_INC(wx_invalidated);
+ return NULL;
+ }
+
+ return rule;
+}
+
+static void
+xlate_table_action(struct wx_xlate_ctx *ctx, uint16_t in_port)
+{
+ if (!ctx->recurse) {
+ struct wx_rule *rule;
+ flow_t flow;
+
+ flow = *ctx->flow;
+ flow.in_port = in_port;
+
+ rule = wx_rule_lookup_valid(ctx->wx, &flow);
+ if (rule) {
+ if (rule->super) {
+ rule = rule->super;
+ }
+
+ ctx->recurse++;
+ do_xlate_actions(rule->wr.actions, rule->wr.n_actions, ctx);
+ ctx->recurse--;
+ }
+ }
+}
+
+static void
+xlate_output_action(struct wx_xlate_ctx *ctx,
+ const struct ofp_action_output *oao)
+{
+ uint16_t xflow_port;
+ uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+
+ ctx->nf_output_iface = NF_OUT_DROP;
+
+ switch (ntohs(oao->port)) {
+ case OFPP_IN_PORT:
+ add_output_action(ctx, ctx->flow->in_port);
+ break;
+ case OFPP_TABLE:
+ xlate_table_action(ctx, ctx->flow->in_port);
+ break;
+ case OFPP_NORMAL:
+#if 0
+ if (!ctx->wx->ofhooks->normal_cb(ctx->flow, ctx->packet,
+ ctx->out, ctx->tags,
+ &ctx->nf_output_iface,
+ ctx->wx->aux)) {
+ COVERAGE_INC(wx_uninstallable);
+ ctx->may_set_up_flow = false;
+ }
+ break;
+#else
+ /* fall through to flood for now */
+#endif
+ case OFPP_FLOOD:
+ add_output_group_action(ctx->out, WX_GROUP_FLOOD,
+ &ctx->nf_output_iface);
+ break;
+ case OFPP_ALL:
+ add_output_group_action(ctx->out, WX_GROUP_ALL, &ctx->nf_output_iface);
+ break;
+ case OFPP_CONTROLLER:
+ add_controller_action(ctx->out, oao);
+ break;
+ case OFPP_LOCAL:
+ add_output_action(ctx, XFLOWP_LOCAL);
+ break;
+ default:
+ xflow_port = ofp_port_to_xflow_port(ntohs(oao->port));
+ if (xflow_port != ctx->flow->in_port) {
+ add_output_action(ctx, xflow_port);
+ }
+ break;
+ }
+
+ if (prev_nf_output_iface == NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
+ } else if (prev_nf_output_iface != NF_OUT_DROP &&
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
+}
+
+static void
+xlate_nicira_action(struct wx_xlate_ctx *ctx,
+ const struct nx_action_header *nah)
+{
+ const struct nx_action_resubmit *nar;
+ int subtype = ntohs(nah->subtype);
+
+ assert(nah->vendor == htonl(NX_VENDOR_ID));
+ switch (subtype) {
+ case NXAST_RESUBMIT:
+ nar = (const struct nx_action_resubmit *) nah;
+ xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port)));
+ break;
+
+ default:
+ VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
+ break;
+ }
+}
+
+static void
+do_xlate_actions(const union ofp_action *in, size_t n_in,
+ struct wx_xlate_ctx *ctx)
+{
+ struct actions_iterator iter;
+ const union ofp_action *ia;
+ const struct wdp_port *port;
+
+ port = port_array_get(&ctx->wx->ports, ctx->flow->in_port);
+ if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
+ port->opp.config & (eth_addr_equals(ctx->flow->dl_dst, stp_eth_addr)
+ ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
+ /* Drop this flow. */
+ return;
+ }
+
+ for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
+ uint16_t type = ntohs(ia->type);
+ union xflow_action *oa;
+
+ switch (type) {
+ case OFPAT_OUTPUT:
+ xlate_output_action(ctx, &ia->output);
+ break;
+
+ case OFPAT_SET_VLAN_VID:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
+ oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK);
+ oa->dl_tci.mask = htons(VLAN_VID_MASK);
+ break;
+
+ case OFPAT_SET_VLAN_PCP:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
+ oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT)
+ & VLAN_PCP_MASK);
+ oa->dl_tci.mask = htons(VLAN_PCP_MASK);
+ break;
+
+ case OFPAT_STRIP_VLAN:
+ xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN);
+ break;
+
+ case OFPAT_SET_DL_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC);
+ memcpy(oa->dl_addr.dl_addr,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ break;
+
+ case OFPAT_SET_DL_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST);
+ memcpy(oa->dl_addr.dl_addr,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ break;
+
+ case OFPAT_SET_NW_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC);
+ oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+ break;
+
+ case OFPAT_SET_NW_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST);
+ oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+ break;
+
+ case OFPAT_SET_NW_TOS:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS);
+ oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
+ break;
+
+ case OFPAT_SET_TP_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC);
+ oa->tp_port.tp_port = ia->tp_port.tp_port;
+ break;
+
+ case OFPAT_SET_TP_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST);
+ oa->tp_port.tp_port = ia->tp_port.tp_port;
+ break;
+
+ case OFPAT_VENDOR:
+ xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
+ break;
+
+ default:
+ VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
+ break;
+ }
+ }
+}
+
+/* Returns true if 'flow' and 'actions' may be set up as a flow in the kernel.
+ * This is true most of the time, but we don't allow flows that would prevent
+ * DHCP replies from being seen by the local port to be set up in the
+ * kernel.
+ *
+ * We only need this, strictly speaking, when in-band control is turned on. */
+static bool
+wx_may_set_up(const flow_t *flow, const struct xflow_actions *actions)
+{
+ if (flow->dl_type == htons(ETH_TYPE_IP)
+ && flow->nw_proto == IP_TYPE_UDP
+ && flow->tp_src == htons(DHCP_SERVER_PORT)
+ && flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
+ int i;
+
+ for (i = 0; i < actions->n_actions; i++) {
+ const struct xflow_action_output *oao = &actions->actions[i].output;
+ if (oao->type == XFLOWAT_OUTPUT && oao->port == XFLOWP_LOCAL) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static int
+wx_xlate_actions(struct wx *wx, const union ofp_action *in, size_t n_in,
+ const flow_t *flow, const struct ofpbuf *packet,
+ struct xflow_actions *out, bool *may_set_up_flow)
+{
+ //tag_type no_tags = 0;
+ struct wx_xlate_ctx ctx;
+ COVERAGE_INC(wx_ofp2xflow);
+ xflow_actions_init(out);
+ ctx.flow = flow;
+ ctx.recurse = 0;
+ ctx.wx = wx;
+ ctx.packet = packet;
+ ctx.out = out;
+ //ctx.tags = tags ? tags : &no_tags;
+ ctx.may_set_up_flow = true;
+ ctx.nf_output_iface = NF_OUT_DROP;
+ do_xlate_actions(in, n_in, &ctx);
+
+ if (may_set_up_flow) {
+ *may_set_up_flow = ctx.may_set_up_flow && wx_may_set_up(flow, out);
+ }
+#if 0
+ if (nf_output_iface) {
+ *nf_output_iface = ctx.nf_output_iface;
+ }
+#endif
+ if (xflow_actions_overflow(out)) {
+ xflow_actions_init(out);
+ return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
+ }
+ return 0;
+}
+\f
+static void
+update_used(struct wx *wx)
+{
+ struct xflow_flow *flows;
+ size_t n_flows;
+ size_t i;
+ int error;
+
+ error = xfif_flow_list_all(wx->xfif, &flows, &n_flows);
+ if (error) {
+ return;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ struct xflow_flow *f = &flows[i];
+ struct wx_rule *rule;
+ flow_t flow;
+
+ xflow_key_to_flow(&f->key, &flow);
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, &flow));
+ if (!rule || !rule->installed) {
+ COVERAGE_INC(wx_unexpected_rule);
+ xfif_flow_del(wx->xfif, f);
+ continue;
+ }
+
+ wx_rule_update_time(wx, rule, &f->stats);
+ wx_rule_account(wx, rule, f->stats.n_bytes);
+ }
+ free(flows);
+}
+
+static void
+uninstall_idle_flow(struct wx *wx, struct wx_rule *rule)
+{
+ assert(rule->installed);
+ assert(!rule->wr.cr.flow.wildcards);
+
+ if (rule->super) {
+ wx_rule_remove(wx, rule);
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+}
+
+static void
+expire_rule(struct cls_rule *cls_rule, void *wx_)
+{
+ struct wx *wx = wx_;
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ long long int hard_expire, idle_expire, expire, now;
+
+ hard_expire = (rule->wr.hard_timeout
+ ? rule->wr.created + rule->wr.hard_timeout * 1000
+ : LLONG_MAX);
+ idle_expire = (rule->wr.idle_timeout
+ && (rule->super || list_is_empty(&rule->list))
+ ? rule->used + rule->wr.idle_timeout * 1000
+ : LLONG_MAX);
+ expire = MIN(hard_expire, idle_expire);
+
+ now = time_msec();
+ if (now < expire) {
+ if (rule->installed && now >= rule->used + 5000) {
+ uninstall_idle_flow(wx, rule);
+ } else if (!rule->wr.cr.flow.wildcards) {
+ //XXX active_timeout(wx, rule);
+ }
+
+ return;
+ }
+
+ COVERAGE_INC(wx_expired);
+
+ /* Update stats. This code will be a no-op if the rule expired
+ * due to an idle timeout. */
+ if (rule->wr.cr.flow.wildcards) {
+ struct wx_rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) {
+ wx_rule_remove(wx, subrule);
+ }
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+
+#if 0 /* XXX */
+ if (!wx_rule_is_hidden(rule)) {
+ send_flow_removed(wx, rule, now,
+ (now >= hard_expire
+ ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
+ }
+#endif
+ wx_rule_remove(wx, rule);
+}
+
+struct revalidate_cbdata {
+ struct wx *wx;
+ bool revalidate_all; /* Revalidate all exact-match rules? */
+ bool revalidate_subrules; /* Revalidate all exact-match subrules? */
+ //struct tag_set revalidate_set; /* Set of tags to revalidate. */
+};
+
+static bool
+revalidate_rule(struct wx *wx, struct wx_rule *rule)
+{
+ const flow_t *flow = &rule->wr.cr.flow;
+
+ COVERAGE_INC(wx_revalidate_rule);
+ if (rule->super) {
+ struct wx_rule *super;
+ super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
+ if (!super) {
+ wx_rule_remove(wx, rule);
+ return false;
+ } else if (super != rule->super) {
+ COVERAGE_INC(wx_revalidate_moved);
+ list_remove(&rule->list);
+ list_push_back(&super->list, &rule->list);
+ rule->super = super;
+ rule->wr.hard_timeout = super->wr.hard_timeout;
+ rule->wr.idle_timeout = super->wr.idle_timeout;
+ rule->wr.created = super->wr.created;
+ rule->used = 0;
+ }
+ }
+
+ wx_rule_update_actions(wx, rule);
+ return true;
+}
+
+static void
+revalidate_cb(struct cls_rule *sub_, void *cbdata_)
+{
+ struct wx_rule *sub = wx_rule_cast(sub_);
+ struct revalidate_cbdata *cbdata = cbdata_;
+
+ if (cbdata->revalidate_all
+ || (cbdata->revalidate_subrules && sub->super)
+ /*|| (tag_set_intersects(&cbdata->revalidate_set, sub->tags))*/) {
+ revalidate_rule(cbdata->wx, sub);
+ }
+}
+
+static void
+wx_run_one(struct wx *wx)
+{
+ wx_port_run(wx);
+
+ if (time_msec() >= wx->next_expiration) {
+ COVERAGE_INC(wx_expiration);
+ wx->next_expiration = time_msec() + 1000;
+ update_used(wx);
+
+ classifier_for_each(&wx->cls, CLS_INC_ALL, expire_rule, wx);
+
+ /* XXX account_checkpoint_cb */
+ }
+
+ if (wx->need_revalidate /*|| !tag_set_is_empty(&p->revalidate_set)*/) {
+ struct revalidate_cbdata cbdata;
+ cbdata.wx = wx;
+ cbdata.revalidate_all = false;
+ cbdata.revalidate_subrules = wx->need_revalidate;
+ //cbdata.revalidate_set = wx->revalidate_set;
+ //tag_set_init(&wx->revalidate_set);
+ COVERAGE_INC(wx_revalidate);
+ classifier_for_each(&wx->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
+ wx->need_revalidate = false;
+ }
+}
+
+static void
+wx_run(void)
+{
+ struct wx *wx;
+
+ LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) {
+ wx_run_one(wx);
+ }
+ xf_run();
+}
+
+static void
+wx_wait_one(struct wx *wx)
+{
+ xfif_port_poll_wait(wx->xfif);
+ netdev_monitor_poll_wait(wx->netdev_monitor);
+ if (wx->need_revalidate /*|| !tag_set_is_empty(&p->revalidate_set)*/) {
+ poll_immediate_wake();
+ } else if (wx->next_expiration != LLONG_MAX) {
+ poll_timer_wait(wx->next_expiration - time_msec());
+ }
+}
+
+static void
+wx_wait(void)
+{
+ struct wx *wx;
+
+ LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) {
+ wx_wait_one(wx);
+ }
+ xf_wait();
+}
+\f
+static int wx_flow_flush(struct wdp *);
+
+static int
+wx_enumerate(const struct wdp_class *wdp_class, struct svec *all_wdps)
+{
+ struct svec names = SVEC_EMPTY_INITIALIZER;
+ int error = xf_enumerate_names(wdp_class->type, &names);
+ svec_move(all_wdps, &names);
+ return error;
+}
+
+static int
+wx_open(const struct wdp_class *wdp_class, const char *name, bool create,
+ struct wdp **wdpp)
+{
+ struct xfif *xfif;
+ int error;
+
+ error = (create
+ ? xfif_create_and_open(name, wdp_class->type, &xfif)
+ : xfif_open(name, wdp_class->type, &xfif));
+ if (!error) {
+ struct wx *wx;
+
+ wx = xmalloc(sizeof *wx);
+ list_push_back(&all_wx, &wx->list_node);
+ wdp_init(&wx->wdp, wdp_class, name, 0, 0);
+ wx->xfif = xfif;
+ classifier_init(&wx->cls);
+ wx->netdev_monitor = netdev_monitor_create();
+ port_array_init(&wx->ports);
+ shash_init(&wx->port_by_name);
+ wx->next_expiration = time_msec() + 1000;
+
+ wx_port_init(wx);
+
+ *wdpp = &wx->wdp;
+ }
+
+ return error;
+}
+
+static void
+wx_close(struct wdp *wdp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ wx_flow_flush(wdp);
+ xfif_close(wx->xfif);
+ classifier_destroy(&wx->cls);
+ netdev_monitor_destroy(wx->netdev_monitor);
+ list_remove(&wx->list_node);
+ free(wx);
+}
+
+static int
+wx_get_all_names(const struct wdp *wdp, struct svec *all_names)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_all_names(wx->xfif, all_names);
+}
+
+static int
+wx_destroy(struct wdp *wdp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_delete(wx->xfif);
+}
+
+static void
+hton_ofp_phy_port(struct ofp_phy_port *opp)
+{
+ opp->port_no = htons(opp->port_no);
+ opp->config = htonl(opp->config);
+ opp->state = htonl(opp->state);
+ opp->curr = htonl(opp->curr);
+ opp->advertised = htonl(opp->advertised);
+ opp->supported = htonl(opp->supported);
+ opp->peer = htonl(opp->peer);
+}
+
+static int
+wx_get_features(const struct wdp *wdp, struct ofpbuf **featuresp)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct ofp_switch_features *osf;
+ struct ofpbuf *buf;
+ unsigned int port_no;
+ struct wdp_port *port;
+
+ osf = make_openflow(sizeof *osf, OFPT_FEATURES_REPLY, &buf);
+ osf->n_tables = 2;
+ osf->capabilities = htonl(OFPC_ARP_MATCH_IP);
+ osf->actions = htonl((1u << OFPAT_OUTPUT) |
+ (1u << OFPAT_SET_VLAN_VID) |
+ (1u << OFPAT_SET_VLAN_PCP) |
+ (1u << OFPAT_STRIP_VLAN) |
+ (1u << OFPAT_SET_DL_SRC) |
+ (1u << OFPAT_SET_DL_DST) |
+ (1u << OFPAT_SET_NW_SRC) |
+ (1u << OFPAT_SET_NW_DST) |
+ (1u << OFPAT_SET_NW_TOS) |
+ (1u << OFPAT_SET_TP_SRC) |
+ (1u << OFPAT_SET_TP_DST));
+
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
+ }
+
+ *featuresp = buf;
+ return 0;
+}
+
+static void
+count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
+{
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ int *n_subrules = n_subrules_;
+
+ if (rule->super) {
+ (*n_subrules)++;
+ }
+}
+
+static int
+wx_get_stats(const struct wdp *wdp, struct wdp_stats *stats)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_stats xflow_stats;
+ int n_subrules;
+ int error;
+
+ error = xfif_get_xf_stats(wx->xfif, &xflow_stats);
+
+ n_subrules = 0;
+ classifier_for_each(&wx->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
+
+ stats->exact.n_flows = classifier_count_exact(&wx->cls) - n_subrules;
+ stats->exact.cur_capacity = xflow_stats.cur_capacity;
+ stats->exact.max_capacity = MIN(WX_MAX_EXACT, xflow_stats.max_capacity);
+ stats->exact.n_hit = xflow_stats.n_hit;
+ stats->exact.n_missed = xflow_stats.n_missed;
+ stats->exact.n_lost = xflow_stats.n_lost;
+
+ stats->wild.n_flows = classifier_count_wild(&wx->cls);
+ stats->wild.cur_capacity = WX_MAX_WILD;
+ stats->wild.max_capacity = WX_MAX_WILD;
+ stats->wild.n_hit = 0; /* XXX */
+ stats->wild.n_missed = 0; /* XXX */
+ stats->wild.n_lost = 0; /* XXX */
+
+ stats->n_ports = xflow_stats.n_ports;
+ stats->max_ports = xflow_stats.max_ports;
+
+ stats->n_frags = xflow_stats.n_frags;
+
+ stats->max_miss_queue = xflow_stats.max_miss_queue;
+ stats->max_action_queue = xflow_stats.max_action_queue;
+ stats->max_sflow_queue = xflow_stats.max_sflow_queue;
+
+ return error;
+}
+
+static int
+wx_get_drop_frags(const struct wdp *wdp, bool *drop_frags)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_drop_frags(wx->xfif, drop_frags);
+}
+
+static int
+wx_set_drop_frags(struct wdp *wdp, bool drop_frags)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_set_drop_frags(wx->xfif, drop_frags);
+}
+
+static int
+wx_port_add(struct wdp *wdp, const char *devname,
+ bool internal, uint16_t *port_no)
+{
+ struct wx *wx = wx_cast(wdp);
+ uint16_t xflow_flags = internal ? XFLOW_PORT_INTERNAL : 0;
+ return xfif_port_add(wx->xfif, devname, xflow_flags, port_no);
+}
+
+static int
+wx_port_del(struct wdp *wdp, uint16_t port_no)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_port_del(wx->xfif, port_no);
+}
+
+static int
+wx_port_query_by_number(const struct wdp *wdp, uint16_t port_no,
+ struct wdp_port **portp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ *portp = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no));
+ return *portp ? 0 : ENOENT;
+}
+
+static int
+wx_port_query_by_name(const struct wdp *wdp, const char *devname,
+ struct wdp_port **portp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ *portp = shash_find_data(&wx->port_by_name, devname);
+ return *portp ? 0 : ENOENT;
+}
+
+static int
+wx_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wdp_port *port;
+ uint32_t changes;
+
+ port = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no));
+ if (!port) {
+ return ENOENT;
+ }
+ changes = config ^ port->opp.config;
+
+ if (changes & OFPPC_PORT_DOWN) {
+ int error;
+ if (config & OFPPC_PORT_DOWN) {
+ error = netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
+ } else {
+ error = netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
+ }
+ if (!error) {
+ port->opp.config ^= OFPPC_PORT_DOWN;
+ }
+ }
+
+#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
+ if (changes & REVALIDATE_BITS) {
+ COVERAGE_INC(wx_costly_flags);
+ port->opp.config ^= changes & REVALIDATE_BITS;
+ wx->need_revalidate = true;
+ }
+#undef REVALIDATE_BITS
+
+ if (changes & OFPPC_NO_FLOOD) {
+ port->opp.config ^= OFPPC_NO_FLOOD;
+ wx_port_refresh_groups(wx);
+ }
+
+ if (changes & OFPPC_NO_PACKET_IN) {
+ port->opp.config ^= OFPPC_NO_PACKET_IN;
+ }
+
+ return 0;
+}
+
+static int
+wx_port_list(const struct wdp *wdp, struct wdp_port ***portsp,
+ size_t *n_portsp)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wdp_port **ports;
+ struct wdp_port *port;
+ unsigned int port_no;
+ size_t n_ports, i;
+
+ *n_portsp = n_ports = port_array_count(&wx->ports);
+ *portsp = ports = xmalloc(n_ports * sizeof *ports);
+ i = 0;
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ ports[i++] = port;
+ }
+ assert(i == n_ports);
+
+ return 0;
+}
+
+static int
+wx_port_poll(const struct wdp *wdp, char **devnamep)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_port_poll(wx->xfif, devnamep);
+}
+
+static void
+wx_port_poll_wait(const struct wdp *wdp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ xfif_port_poll_wait(wx->xfif);
+}
+
+static struct wdp_rule *
+wx_flow_get(const struct wdp *wdp, const flow_t *flow)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, flow));
+ return rule && !wx_rule_is_hidden(rule) ? &rule->wr : NULL;
+}
+
+static struct wdp_rule *
+wx_flow_match(const struct wdp *wdp, const flow_t *flow)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+
+ rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
+ if (rule) {
+ if (wx_rule_is_hidden(rule)) {
+ rule = rule->super;
+ }
+ return &rule->wr;
+ } else {
+ return NULL;
+ }
+}
+
+struct wx_for_each_thunk_aux {
+ wdp_flow_cb_func *client_callback;
+ void *client_aux;
+};
+
+static void
+wx_for_each_thunk(struct cls_rule *cls_rule, void *aux_)
+{
+ struct wx_for_each_thunk_aux *aux = aux_;
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+
+ if (!wx_rule_is_hidden(rule)) {
+ aux->client_callback(&rule->wr, aux->client_aux);
+ }
+}
+
+static void
+wx_flow_for_each_match(const struct wdp *wdp, const flow_t *target,
+ int include,
+ wdp_flow_cb_func *client_callback, void *client_aux)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_for_each_thunk_aux aux;
+
+ aux.client_callback = client_callback;
+ aux.client_aux = client_aux;
+ classifier_for_each_match(&wx->cls, target, include,
+ wx_for_each_thunk, &aux);
+}
+
+/* Obtains statistic counters for 'rule' within 'wx' and stores them into
+ * '*stats'. If 'rule' is a wildcarded rule, the returned statistic include
+ * statistics for all of 'rule''s subrules. */
+static void
+query_stats(struct wx *wx, struct wx_rule *rule, struct wdp_flow_stats *stats)
+{
+ struct wx_rule *subrule;
+ struct xflow_flow *xflow_flows;
+ size_t n_xflow_flows;
+
+ /* Start from historical data for 'rule' itself that are no longer tracked
+ * by the datapath. This counts, for example, subrules that have
+ * expired. */
+ stats->n_packets = rule->packet_count;
+ stats->n_bytes = rule->byte_count;
+ stats->inserted = rule->wr.created;
+ stats->used = LLONG_MIN;
+ stats->tcp_flags = 0;
+ stats->ip_tos = 0;
+
+ /* Prepare to ask the datapath for statistics on 'rule', or if it is
+ * wildcarded then on all of its subrules.
+ *
+ * Also, add any statistics that are not tracked by the datapath for each
+ * subrule. This includes, for example, statistics for packets that were
+ * executed "by hand" by ofproto via xfif_execute() but must be accounted
+ * to a flow. */
+ n_xflow_flows = rule->wr.cr.flow.wildcards ? list_size(&rule->list) : 1;
+ xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows);
+ if (rule->wr.cr.flow.wildcards) {
+ size_t i = 0;
+ LIST_FOR_EACH (subrule, struct wx_rule, list, &rule->list) {
+ xflow_key_from_flow(&xflow_flows[i++].key, &subrule->wr.cr.flow);
+ stats->n_packets += subrule->packet_count;
+ stats->n_bytes += subrule->byte_count;
+ }
+ } else {
+ xflow_key_from_flow(&xflow_flows[0].key, &rule->wr.cr.flow);
+ }
+
+ /* Fetch up-to-date statistics from the datapath and add them in. */
+ if (!xfif_flow_get_multiple(wx->xfif, xflow_flows, n_xflow_flows)) {
+ size_t i;
+ for (i = 0; i < n_xflow_flows; i++) {
+ struct xflow_flow *xflow_flow = &xflow_flows[i];
+ long long int used;
+
+ stats->n_packets += xflow_flow->stats.n_packets;
+ stats->n_bytes += xflow_flow->stats.n_bytes;
+ used = xflow_flow_stats_to_msec(&xflow_flow->stats);
+ if (used > stats->used) {
+ stats->used = used;
+ if (xflow_flow->key.dl_type == htons(ETH_TYPE_IP)
+ && xflow_flow->key.nw_proto == IP_TYPE_TCP) {
+ stats->ip_tos = xflow_flow->stats.ip_tos;
+ }
+ }
+ stats->tcp_flags |= xflow_flow->stats.tcp_flags;
+ }
+ }
+ free(xflow_flows);
+}
+
+static int
+wx_flow_get_stats(const struct wdp *wdp,
+ const struct wdp_rule *wdp_rule,
+ struct wdp_flow_stats *stats)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+
+ query_stats(wx, rule, stats);
+ return 0;
+}
+
+static bool
+wx_flow_overlaps(const struct wdp *wdp, const flow_t *flow)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ /* XXX overlap with a subrule? */
+ return classifier_rule_overlaps(&wx->cls, flow);
+}
+
+static int
+wx_flow_put(struct wdp *wdp, const struct wdp_flow_put *put,
+ struct wdp_flow_stats *old_stats, struct wdp_rule **rulep)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, put->flow));
+ if (rule && wx_rule_is_hidden(rule)) {
+ rule = NULL;
+ }
+
+ if (rule) {
+ if (!(put->flags & WDP_PUT_MODIFY)) {
+ return EEXIST;
+ }
+ } else {
+ if (!(put->flags & WDP_PUT_CREATE)) {
+ return EINVAL;
+ }
+ if ((put->flow->wildcards
+ ? classifier_count_wild(&wx->cls) >= WX_MAX_WILD
+ : classifier_count_exact(&wx->cls) >= WX_MAX_EXACT)) {
+ /* XXX subrules should not count against exact-match limit */
+ return ENOBUFS;
+ }
+ }
+
+ rule = wx_rule_create(NULL, put->actions, put->n_actions,
+ put->idle_timeout, put->hard_timeout);
+ cls_rule_from_flow(&rule->wr.cr, put->flow);
+ wx_rule_insert(wx, rule, NULL, 0);
+
+ if (old_stats) {
+ /* XXX */
+ memset(old_stats, 0, sizeof *old_stats);
+ }
+ if (rulep) {
+ *rulep = &rule->wr;
+ }
+
+ return 0;
+}
+
+static int
+wx_flow_delete(struct wdp *wdp, struct wdp_rule *wdp_rule,
+ struct wdp_flow_stats *final_stats)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+
+ wx_rule_remove(wx, rule);
+ if (final_stats) {
+ memset(final_stats, 0, sizeof *final_stats); /* XXX */
+ }
+ return 0;
+}
+
+static void
+wx_flush_rule(struct cls_rule *cls_rule, void *wx_)
+{
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ struct wx *wx = wx_;
+
+ /* Mark the flow as not installed, even though it might really be
+ * installed, so that wx_rule_remove() doesn't bother trying to uninstall
+ * it. There is no point in uninstalling it individually since we are
+ * about to blow away all the flows with xfif_flow_flush(). */
+ rule->installed = false;
+
+ wx_rule_remove(wx, rule);
+}
+
+static int
+wx_flow_flush(struct wdp *wdp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ COVERAGE_INC(wx_flow_flush);
+ classifier_for_each(&wx->cls, CLS_INC_ALL, wx_flush_rule, wx);
+ xfif_flow_flush(wx->xfif);
+ return 0;
+}
+
+static int
+wx_execute(struct wdp *wdp, uint16_t in_port,
+ const union ofp_action actions[], int n_actions,
+ const struct ofpbuf *packet)
+{
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_actions xflow_actions;
+ flow_t flow;
+ int error;
+
+ flow_extract((struct ofpbuf *) packet, in_port, &flow);
+ error = wx_xlate_actions(wx, actions, n_actions, &flow, packet,
+ &xflow_actions, NULL);
+ if (error) {
+ return error;
+ }
+ xfif_execute(wx->xfif, ofp_port_to_xflow_port(in_port),
+ xflow_actions.actions, xflow_actions.n_actions, packet);
+ return 0;
+}
+
+static int
+wx_flow_inject(struct wdp *wdp, struct wdp_rule *wdp_rule,
+ uint16_t in_port, const struct ofpbuf *packet)
+{
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+ int error;
+
+ error = wx_execute(wdp, in_port, rule->wr.actions, rule->wr.n_actions,
+ packet);
+ if (!error) {
+ rule->packet_count++;
+ rule->byte_count += packet->size;
+ rule->used = time_msec();
+ }
+ return error;
+}
+
+static int
+wx_recv_get_mask(const struct wdp *wdp, int *listen_mask)
+{
+ struct wx *wx = wx_cast(wdp);
+ int xflow_listen_mask;
+ int error;
+
+ error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
+ if (!error) {
+ *listen_mask = 0;
+ if (xflow_listen_mask & XFLOWL_MISS) {
+ *listen_mask |= 1 << WDP_CHAN_MISS;
+ }
+ if (xflow_listen_mask & XFLOWL_ACTION) {
+ *listen_mask |= 1 << WDP_CHAN_ACTION;
+ }
+ if (xflow_listen_mask & XFLOWL_SFLOW) {
+ *listen_mask |= 1 << WDP_CHAN_SFLOW;
+ }
+ }
+ return error;
+}
+
+static int
+wx_recv_set_mask(struct wdp *wdp, int listen_mask)
+{
+ struct wx *wx = wx_cast(wdp);
+ int xflow_listen_mask;
+
+ xflow_listen_mask = 0;
+ if (listen_mask & (1 << WDP_CHAN_MISS)) {
+ xflow_listen_mask |= XFLOWL_MISS;
+ }
+ if (listen_mask & (1 << WDP_CHAN_ACTION)) {
+ xflow_listen_mask |= XFLOWL_ACTION;
+ }
+ if (listen_mask & (1 << WDP_CHAN_SFLOW)) {
+ xflow_listen_mask |= XFLOWL_SFLOW;
+ }
+
+ return xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
+}
+
+static int
+wx_get_sflow_probability(const struct wdp *wdp, uint32_t *probability)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_sflow_probability(wx->xfif, probability);
+}
+
+static int
+wx_set_sflow_probability(struct wdp *wdp, uint32_t probability)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_set_sflow_probability(wx->xfif, probability);
+}
+
+static int
+wx_translate_xflow_msg(struct xflow_msg *msg, struct ofpbuf *payload,
+ struct wdp_packet *packet)
+{
+ packet->in_port = xflow_port_to_ofp_port(msg->port);
+ packet->send_len = 0;
+
+ switch (msg->type) {
+ case _XFLOWL_MISS_NR:
+ packet->channel = WDP_CHAN_MISS;
+ packet->payload = payload;
+ return 0;
+
+ case _XFLOWL_ACTION_NR:
+ packet->channel = WDP_CHAN_ACTION;
+ packet->payload = payload;
+ packet->send_len = msg->arg;
+ return 0;
+
+ case _XFLOWL_SFLOW_NR:
+ /* XXX */
+ ofpbuf_delete(payload);
+ return ENOSYS;
+
+ default:
+ VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32,
+ msg->type);
+ ofpbuf_delete(payload);
+ return ENOSYS;
+ }
+}
+
+static const uint8_t *
+get_local_mac(const struct wx *wx)
+{
+ const struct wdp_port *port = port_array_get(&wx->ports, XFLOWP_LOCAL);
+ return port ? port->opp.hw_addr : NULL;
+}
+
+/* Returns true if 'packet' is a DHCP reply to the local port. Such a reply
+ * should be sent to the local port regardless of the flow table.
+ *
+ * We only need this, strictly speaking, when in-band control is turned on. */
+static bool
+wx_is_local_dhcp_reply(const struct wx *wx,
+ const flow_t *flow, const struct ofpbuf *packet)
+{
+ if (flow->dl_type == htons(ETH_TYPE_IP)
+ && flow->nw_proto == IP_TYPE_UDP
+ && flow->tp_src == htons(DHCP_SERVER_PORT)
+ && flow->tp_dst == htons(DHCP_CLIENT_PORT)
+ && packet->l7)
+ {
+ const uint8_t *local_mac = get_local_mac(wx);
+ struct dhcp_header *dhcp = ofpbuf_at(
+ packet, (char *)packet->l7 - (char *)packet->data, sizeof *dhcp);
+ return dhcp && local_mac && eth_addr_equals(dhcp->chaddr, local_mac);
+ }
+
+ return false;
+}
+
+static bool
+wx_explode_rule(struct wx *wx, struct xflow_msg *msg, struct ofpbuf *payload)
+{
+ struct wx_rule *rule;
+ flow_t flow;
+
+ flow_extract(payload, xflow_port_to_ofp_port(msg->port), &flow);
+
+ if (wx_is_local_dhcp_reply(wx, &flow, payload)) {
+ union xflow_action action;
+
+ memset(&action, 0, sizeof(action));
+ action.output.type = XFLOWAT_OUTPUT;
+ action.output.port = XFLOWP_LOCAL;
+ xfif_execute(wx->xfif, msg->port, &action, 1, payload);
+ }
+
+ rule = wx_rule_lookup_valid(wx, &flow);
+ if (!rule) {
+ return false;
+ }
+
+ if (rule->wr.cr.flow.wildcards) {
+ rule = wx_rule_create_subrule(wx, rule, &flow);
+ wx_rule_make_actions(wx, rule, payload);
+ } else {
+ if (!rule->may_install) {
+ /* The rule is not installable, that is, we need to process every
+ * packet, so process the current packet and set its actions into
+ * 'subrule'. */
+ wx_rule_make_actions(wx, rule, payload);
+ } else {
+ /* XXX revalidate rule if it needs it */
+ }
+ }
+
+ wx_rule_execute(wx, rule, payload, &flow);
+ wx_rule_reinstall(wx, rule);
+
+ return true;
+}
+
+static int
+wx_recv(struct wdp *wdp, struct wdp_packet *packet)
+{
+ struct wx *wx = wx_cast(wdp);
+ int i;
+
+ /* XXX need to avoid 50*50 potential cost for caller. */
+ for (i = 0; i < 50; i++) {
+ struct xflow_msg *msg;
+ struct ofpbuf *buf;
+ int error;
+
+ error = xfif_recv(wx->xfif, &buf);
+ if (error) {
+ return error;
+ }
+
+ msg = ofpbuf_pull(buf, sizeof *msg);
+ if (msg->type != _XFLOWL_MISS_NR || !wx_explode_rule(wx, msg, buf)) {
+ return wx_translate_xflow_msg(msg, buf, packet);
+ }
+ ofpbuf_delete(buf);
+ }
+ return EAGAIN;
+}
+
+static void
+wx_recv_wait(struct wdp *wdp)
+{
+ struct wx *wx = wx_cast(wdp);
+
+ xfif_recv_wait(wx->xfif);
+}
+\f
+static void wx_port_update(struct wx *, const char *devname);
+static void wx_port_reinit(struct wx *);
+
+static void
+wx_port_process_change(struct wx *wx, int error, char *devname)
+{
+ if (error == ENOBUFS) {
+ wx_port_reinit(wx);
+ } else if (!error) {
+ wx_port_update(wx, devname);
+ free(devname);
+ }
+}
+
+static void
+wx_port_run(struct wx *wx)
+{
+ char *devname;
+ int error;
+
+ while ((error = xfif_port_poll(wx->xfif, &devname)) != EAGAIN) {
+ wx_port_process_change(wx, error, devname);
+ }
+ while ((error = netdev_monitor_poll(wx->netdev_monitor,
+ &devname)) != EAGAIN) {
+ wx_port_process_change(wx, error, devname);
+ }
+}
+
+static size_t
+wx_port_refresh_group(struct wx *wx, unsigned int group)
+{
+ uint16_t *ports;
+ size_t n_ports;
+ struct wdp_port *port;
+ unsigned int port_no;
+
+ assert(group == WX_GROUP_ALL || group == WX_GROUP_FLOOD);
+
+ ports = xmalloc(port_array_count(&wx->ports) * sizeof *ports);
+ n_ports = 0;
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ if (group == WX_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
+ ports[n_ports++] = port_no;
+ }
+ }
+ xfif_port_group_set(wx->xfif, group, ports, n_ports);
+ free(ports);
+
+ return n_ports;
+}
+
+static void
+wx_port_refresh_groups(struct wx *wx)
+{
+ wx_port_refresh_group(wx, WX_GROUP_FLOOD);
+ wx_port_refresh_group(wx, WX_GROUP_ALL);
+}
+
+static void
+wx_port_reinit(struct wx *wx)
+{
+ struct svec devnames;
+ struct wdp_port *wdp_port;
+ unsigned int port_no;
+ struct xflow_port *xflow_ports;
+ size_t n_xflow_ports;
+ size_t i;
+
+ svec_init(&devnames);
+ PORT_ARRAY_FOR_EACH (wdp_port, &wx->ports, port_no) {
+ svec_add (&devnames, (char *) wdp_port->opp.name);
+ }
+ xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports);
+ for (i = 0; i < n_xflow_ports; i++) {
+ svec_add(&devnames, xflow_ports[i].devname);
+ }
+ free(xflow_ports);
+
+ svec_sort_unique(&devnames);
+ for (i = 0; i < devnames.n; i++) {
+ wx_port_update(wx, devnames.names[i]);
+ }
+ svec_destroy(&devnames);
+
+ wx_port_refresh_groups(wx);
+}
+
+static struct wdp_port *
+make_wdp_port(const struct xflow_port *xflow_port)
+{
+ struct netdev_options netdev_options;
+ enum netdev_flags flags;
+ struct wdp_port *wdp_port;
+ struct netdev *netdev;
+ bool carrier;
+ int error;
+
+ memset(&netdev_options, 0, sizeof netdev_options);
+ netdev_options.name = xflow_port->devname;
+ netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
+ netdev_options.may_create = true;
+ netdev_options.may_open = true;
+
+ error = netdev_open(&netdev_options, &netdev);
+ if (error) {
+ VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
+ "cannot be opened (%s)",
+ xflow_port->devname, xflow_port->port,
+ xflow_port->devname, strerror(error));
+ return NULL;
+ }
+
+ wdp_port = xmalloc(sizeof *wdp_port);
+ wdp_port->netdev = netdev;
+ wdp_port->opp.port_no = xflow_port_to_ofp_port(xflow_port->port);
+ netdev_get_etheraddr(netdev, wdp_port->opp.hw_addr);
+ strncpy((char *) wdp_port->opp.name, xflow_port->devname,
+ sizeof wdp_port->opp.name);
+ wdp_port->opp.name[sizeof wdp_port->opp.name - 1] = '\0';
+
+ netdev_get_flags(netdev, &flags);
+ wdp_port->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
+
+ netdev_get_carrier(netdev, &carrier);
+ wdp_port->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
+
+ netdev_get_features(netdev,
+ &wdp_port->opp.curr, &wdp_port->opp.advertised,
+ &wdp_port->opp.supported, &wdp_port->opp.peer);
+
+ wdp_port->devname = xstrdup(xflow_port->devname);
+ wdp_port->internal = (xflow_port->flags & XFLOW_PORT_INTERNAL) != 0;
+ return wdp_port;
+}
+
+static bool
+wx_port_conflicts(const struct wx *wx, const struct xflow_port *xflow_port)
+{
+ if (port_array_get(&wx->ports, xflow_port->port)) {
+ VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
+ xflow_port->port);
+ return true;
+ } else if (shash_find(&wx->port_by_name, xflow_port->devname)) {
+ VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
+ xflow_port->devname);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static int
+wdp_port_equal(const struct wdp_port *a_, const struct wdp_port *b_)
+{
+ const struct ofp_phy_port *a = &a_->opp;
+ const struct ofp_phy_port *b = &b_->opp;
+
+ BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
+ return (a->port_no == b->port_no
+ && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
+ && !strcmp((char *) a->name, (char *) b->name)
+ && a->state == b->state
+ && a->config == b->config
+ && a->curr == b->curr
+ && a->advertised == b->advertised
+ && a->supported == b->supported
+ && a->peer == b->peer);
+}
+
+static void
+wx_port_install(struct wx *wx, struct wdp_port *wdp_port)
+{
+ uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no);
+ const char *netdev_name = (const char *) wdp_port->opp.name;
+
+ netdev_monitor_add(wx->netdev_monitor, wdp_port->netdev);
+ port_array_set(&wx->ports, xflow_port, wdp_port);
+ shash_add(&wx->port_by_name, netdev_name, wdp_port);
+}
+
+static void
+wx_port_remove(struct wx *wx, struct wdp_port *wdp_port)
+{
+ uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no);
+
+ netdev_monitor_remove(wx->netdev_monitor, wdp_port->netdev);
+ port_array_set(&wx->ports, xflow_port, NULL);
+ shash_delete(&wx->port_by_name,
+ shash_find(&wx->port_by_name, (char *) wdp_port->opp.name));
+}
+
+static void
+wx_port_free(struct wdp_port *wdp_port)
+{
+ if (wdp_port) {
+ netdev_close(wdp_port->netdev);
+ free(wdp_port);
+ }
+}
+
+static void
+wx_port_update(struct wx *wx, const char *devname)
+{
+ struct xflow_port xflow_port;
+ struct wdp_port *old_wdp_port;
+ struct wdp_port *new_wdp_port;
+ int error;
+
+ COVERAGE_INC(wx_update_port);
+
+ /* Query the datapath for port information. */
+ error = xfif_port_query_by_name(wx->xfif, devname, &xflow_port);
+
+ /* Find the old wdp_port. */
+ old_wdp_port = shash_find_data(&wx->port_by_name, devname);
+ if (!error) {
+ if (!old_wdp_port) {
+ /* There's no port named 'devname' but there might be a port with
+ * the same port number. This could happen if a port is deleted
+ * and then a new one added in its place very quickly, or if a port
+ * is renamed. In the former case we want to send an OFPPR_DELETE
+ * and an OFPPR_ADD, and in the latter case we want to send a
+ * single OFPPR_MODIFY. We can distinguish the cases by comparing
+ * the old port's ifindex against the new port, or perhaps less
+ * reliably but more portably by comparing the old port's MAC
+ * against the new port's MAC. However, this code isn't that smart
+ * and always sends an OFPPR_MODIFY (XXX). */
+ old_wdp_port = port_array_get(&wx->ports, xflow_port.port);
+ }
+ } else if (error != ENOENT && error != ENODEV) {
+ VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error "
+ "%s", strerror(error));
+ return;
+ }
+
+ /* Create a new wdp_port. */
+ new_wdp_port = !error ? make_wdp_port(&xflow_port) : NULL;
+
+ /* Eliminate a few pathological cases. */
+ if (!old_wdp_port && !new_wdp_port) {
+ return;
+ } else if (old_wdp_port && new_wdp_port) {
+ /* Most of the 'config' bits are OpenFlow soft state, but
+ * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
+ * OpenFlow bits from old_wdp_port. (make_wdp_port() only sets
+ * OFPPC_PORT_DOWN and leaves the other bits 0.) */
+ new_wdp_port->opp.config |= old_wdp_port->opp.config & ~OFPPC_PORT_DOWN;
+
+ if (wdp_port_equal(old_wdp_port, new_wdp_port)) {
+ /* False alarm--no change. */
+ wx_port_free(new_wdp_port);
+ return;
+ }
+ }
+
+ /* Now deal with the normal cases. */
+ if (old_wdp_port) {
+ wx_port_remove(wx, old_wdp_port);
+ }
+ if (new_wdp_port) {
+ wx_port_install(wx, new_wdp_port);
+ }
+ wx_port_free(old_wdp_port);
+}
+
+static int
+wx_port_init(struct wx *wx)
+{
+ struct xflow_port *ports;
+ size_t n_ports;
+ size_t i;
+ int error;
+
+ error = xfif_port_list(wx->xfif, &ports, &n_ports);
+ if (error) {
+ return error;
+ }
+
+ for (i = 0; i < n_ports; i++) {
+ const struct xflow_port *xflow_port = &ports[i];
+ if (!wx_port_conflicts(wx, xflow_port)) {
+ struct wdp_port *wdp_port = make_wdp_port(xflow_port);
+ if (wdp_port) {
+ wx_port_install(wx, wdp_port);
+ }
+ }
+ }
+ free(ports);
+ wx_port_refresh_groups(wx);
+ return 0;
+}
+\f
+void
+wdp_xflow_register(void)
+{
+ static const struct wdp_class wdp_xflow_class = {
+ NULL, /* name */
+ wx_run,
+ wx_wait,
+ wx_enumerate,
+ wx_open,
+ wx_close,
+ wx_get_all_names,
+ wx_destroy,
+ wx_get_features,
+ wx_get_stats,
+ wx_get_drop_frags,
+ wx_set_drop_frags,
+ wx_port_add,
+ wx_port_del,
+ wx_port_query_by_number,
+ wx_port_query_by_name,
+ wx_port_list,
+ wx_port_set_config,
+ wx_port_poll,
+ wx_port_poll_wait,
+ wx_flow_get,
+ wx_flow_match,
+ wx_flow_for_each_match,
+ wx_flow_get_stats,
+ wx_flow_overlaps,
+ wx_flow_put,
+ wx_flow_delete,
+ wx_flow_flush,
+ wx_flow_inject,
+ wx_execute,
+ wx_recv_get_mask,
+ wx_recv_set_mask,
+ wx_get_sflow_probability,
+ wx_set_sflow_probability,
+ wx_recv,
+ wx_recv_wait,
+ };
+
+ static bool inited = false;
+
+ struct svec types;
+ const char *type;
+ bool registered;
+ int i;
+
+ if (inited) {
+ return;
+ }
+ inited = true;
+
+ svec_init(&types);
+ xf_enumerate_types(&types);
+
+ registered = false;
+ SVEC_FOR_EACH (i, type, &types) {
+ struct wdp_class *class;
+
+ class = xmalloc(sizeof *class);
+ *class = wdp_xflow_class;
+ class->type = xstrdup(type);
+ if (registered) {
+ class->run = NULL;
+ class->wait = NULL;
+ }
+ if (!wdp_register_provider(class)) {
+ registered = true;
+ }
+ }
+
+ svec_destroy(&types);
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WDP_XFLOW_H
+#define WDP_XFLOW_H 1
+
+void wdp_xflow_register(void);
+
+#endif /* ofproto/wdp-xflow.h */
--- /dev/null
+/*
+ * Copyright (c) 2008, 2009, 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+#include "wdp-provider.h"
+
+#include <assert.h>
+#include <ctype.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "coverage.h"
+#include "dynamic-string.h"
+#include "flow.h"
+#include "netlink.h"
+#include "ofp-print.h"
+#include "ofpbuf.h"
+#include "packets.h"
+#include "poll-loop.h"
+#include "shash.h"
+#include "svec.h"
+#include "timeval.h"
+#include "util.h"
+#include "valgrind.h"
+#include "wdp-xflow.h"
+
+#include "vlog.h"
+#define THIS_MODULE VLM_wdp
+\f
+/* wdp_rule */
+
+/* The caller is responsible for initialization 'rule->cr'. */
+void
+wdp_rule_init(struct wdp_rule *rule, const union ofp_action *actions,
+ size_t n_actions)
+{
+ rule->actions = xmemdup(actions, n_actions * sizeof *actions);
+ rule->n_actions = n_actions;
+ rule->created = time_msec();
+ rule->idle_timeout = 0;
+ rule->hard_timeout = 0;
+ rule->client_data = NULL;
+}
+
+void
+wdp_rule_uninit(struct wdp_rule *rule)
+{
+ free(rule->actions);
+}
+\f
+/* wdp */
+
+static const struct wdp_class *base_wdp_classes[] = {
+ /* XXX none yet */
+};
+
+struct registered_wdp_class {
+ const struct wdp_class *wdp_class;
+ int refcount;
+};
+
+static struct shash wdp_classes = SHASH_INITIALIZER(&wdp_classes);
+
+/* Rate limit for individual messages going to or from the datapath, output at
+ * DBG level. This is very high because, if these are enabled, it is because
+ * we really need to see them. */
+static struct vlog_rate_limit wdpmsg_rl = VLOG_RATE_LIMIT_INIT(600, 600);
+
+/* Not really much point in logging many wdp errors. */
+static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5);
+
+static void log_operation(const struct wdp *, const char *operation,
+ int error);
+
+static void
+wdp_initialize(void)
+{
+ static int status = -1;
+
+ if (status < 0) {
+ int i;
+
+ status = 0;
+ for (i = 0; i < ARRAY_SIZE(base_wdp_classes); i++) {
+ wdp_register_provider(base_wdp_classes[i]);
+ }
+ wdp_xflow_register();
+ }
+}
+
+/* Performs periodic work needed by all the various kinds of wdps.
+ *
+ * If your program opens any wdps, it must call both this function and
+ * netdev_run() within its main poll loop. */
+void
+wdp_run(void)
+{
+ struct shash_node *node;
+ SHASH_FOR_EACH(node, &wdp_classes) {
+ const struct registered_wdp_class *registered_class = node->data;
+ if (registered_class->wdp_class->run) {
+ registered_class->wdp_class->run();
+ }
+ }
+}
+
+/* Arranges for poll_block() to wake up when wdp_run() needs to be called.
+ *
+ * If your program opens any wdps, it must call both this function and
+ * netdev_wait() within its main poll loop. */
+void
+wdp_wait(void)
+{
+ struct shash_node *node;
+ SHASH_FOR_EACH(node, &wdp_classes) {
+ const struct registered_wdp_class *registered_class = node->data;
+ if (registered_class->wdp_class->wait) {
+ registered_class->wdp_class->wait();
+ }
+ }
+}
+
+/* Registers a new datapath provider. After successful registration, new
+ * datapaths of that type can be opened using wdp_open(). */
+int
+wdp_register_provider(const struct wdp_class *new_class)
+{
+ struct registered_wdp_class *registered_class;
+
+ if (shash_find(&wdp_classes, new_class->type)) {
+ VLOG_WARN("attempted to register duplicate datapath provider: %s",
+ new_class->type);
+ return EEXIST;
+ }
+
+ registered_class = xmalloc(sizeof *registered_class);
+ registered_class->wdp_class = new_class;
+ registered_class->refcount = 0;
+
+ shash_add(&wdp_classes, new_class->type, registered_class);
+
+ return 0;
+}
+
+/* Unregisters a datapath provider. 'type' must have been previously
+ * registered and not currently be in use by any wdps. After unregistration
+ * new datapaths of that type cannot be opened using wdp_open(). */
+int
+wdp_unregister_provider(const char *type)
+{
+ struct shash_node *node;
+ struct registered_wdp_class *registered_class;
+
+ node = shash_find(&wdp_classes, type);
+ if (!node) {
+ VLOG_WARN("attempted to unregister a datapath provider that is not "
+ "registered: %s", type);
+ return EAFNOSUPPORT;
+ }
+
+ registered_class = node->data;
+ if (registered_class->refcount) {
+ VLOG_WARN("attempted to unregister in use datapath provider: %s", type);
+ return EBUSY;
+ }
+
+ shash_delete(&wdp_classes, node);
+ free(registered_class);
+
+ return 0;
+}
+
+/* Clears 'types' and enumerates the types of all currently registered datapath
+ * providers into it. The caller must first initialize the svec. */
+void
+wdp_enumerate_types(struct svec *types)
+{
+ struct shash_node *node;
+
+ wdp_initialize();
+ svec_clear(types);
+
+ SHASH_FOR_EACH(node, &wdp_classes) {
+ const struct registered_wdp_class *registered_class = node->data;
+ svec_add(types, registered_class->wdp_class->type);
+ }
+}
+
+/* Clears 'names' and enumerates the names of all known created datapaths with
+ * the given 'type'. The caller must first initialize the svec. Returns 0 if
+ * successful, otherwise a positive errno value.
+ *
+ * Some kinds of datapaths might not be practically enumerable. This is not
+ * considered an error. */
+int
+wdp_enumerate_names(const char *type, struct svec *names)
+{
+ const struct registered_wdp_class *registered_class;
+ const struct wdp_class *wdp_class;
+ int error;
+
+ wdp_initialize();
+ svec_clear(names);
+
+ registered_class = shash_find_data(&wdp_classes, type);
+ if (!registered_class) {
+ VLOG_WARN("could not enumerate unknown type: %s", type);
+ return EAFNOSUPPORT;
+ }
+
+ wdp_class = registered_class->wdp_class;
+ error = (wdp_class->enumerate
+ ? wdp_class->enumerate(wdp_class, names)
+ : 0);
+
+ if (error) {
+ VLOG_WARN("failed to enumerate %s datapaths: %s", wdp_class->type,
+ strerror(error));
+ }
+
+ return error;
+}
+
+/* Parses 'datapath name', which is of the form type@name into its
+ * component pieces. 'name' and 'type' must be freed by the caller. */
+void
+wdp_parse_name(const char *datapath_name_, char **name, char **type)
+{
+ char *datapath_name = xstrdup(datapath_name_);
+ char *separator;
+
+ separator = strchr(datapath_name, '@');
+ if (separator) {
+ *separator = '\0';
+ *type = datapath_name;
+ *name = xstrdup(separator + 1);
+ } else {
+ *name = datapath_name;
+ *type = NULL;
+ }
+}
+
+static int
+do_open(const char *name, const char *type, bool create, struct wdp **wdpp)
+{
+ struct wdp *wdp = NULL;
+ int error;
+ struct registered_wdp_class *registered_class;
+
+ wdp_initialize();
+
+ if (!type || *type == '\0') {
+ type = "system";
+ }
+
+ registered_class = shash_find_data(&wdp_classes, type);
+ if (!registered_class) {
+ VLOG_WARN("could not create datapath %s of unknown type %s", name,
+ type);
+ error = EAFNOSUPPORT;
+ goto exit;
+ }
+
+ error = registered_class->wdp_class->open(registered_class->wdp_class,
+ name, create, &wdp);
+ if (!error) {
+ registered_class->refcount++;
+ }
+
+exit:
+ *wdpp = error ? NULL : wdp;
+ return error;
+}
+
+/* Tries to open an existing datapath named 'name' and type 'type'. Will fail
+ * if no datapath with 'name' and 'type' exists. 'type' may be either NULL or
+ * the empty string to specify the default system type. Returns 0 if
+ * successful, otherwise a positive errno value. On success stores a pointer
+ * to the datapath in '*wdpp', otherwise a null pointer. */
+int
+wdp_open(const char *name, const char *type, struct wdp **wdpp)
+{
+ return do_open(name, type, false, wdpp);
+}
+
+/* Tries to create and open a new datapath with the given 'name' and 'type'.
+ * 'type' may be either NULL or the empty string to specify the default system
+ * type. Will fail if a datapath with 'name' and 'type' already exists.
+ * Returns 0 if successful, otherwise a positive errno value. On success
+ * stores a pointer to the datapath in '*wdpp', otherwise a null pointer. */
+int
+wdp_create(const char *name, const char *type, struct wdp **wdpp)
+{
+ return do_open(name, type, true, wdpp);
+}
+
+/* Tries to open a datapath with the given 'name' and 'type', creating it if it
+ * does not exist. 'type' may be either NULL or the empty string to specify
+ * the default system type. Returns 0 if successful, otherwise a positive
+ * errno value. On success stores a pointer to the datapath in '*wdpp',
+ * otherwise a null pointer. */
+int
+wdp_create_and_open(const char *name, const char *type, struct wdp **wdpp)
+{
+ int error;
+
+ error = wdp_create(name, type, wdpp);
+ if (error == EEXIST || error == EBUSY) {
+ error = wdp_open(name, type, wdpp);
+ if (error) {
+ VLOG_WARN("datapath %s already exists but cannot be opened: %s",
+ name, strerror(error));
+ }
+ } else if (error) {
+ VLOG_WARN("failed to create datapath %s: %s", name, strerror(error));
+ }
+ return error;
+}
+
+/* Closes and frees the connection to 'wdp'. Does not destroy the wdp
+ * itself; call wdp_delete() first, instead, if that is desirable. */
+void
+wdp_close(struct wdp *wdp)
+{
+ if (wdp) {
+ struct registered_wdp_class *registered_class;
+
+ registered_class = shash_find_data(&wdp_classes,
+ wdp->wdp_class->type);
+ assert(registered_class);
+ assert(registered_class->refcount);
+
+ registered_class->refcount--;
+ wdp_uninit(wdp, true);
+ }
+}
+
+/* Returns the name of datapath 'wdp' prefixed with the type
+ * (for use in log messages). */
+const char *
+wdp_name(const struct wdp *wdp)
+{
+ return wdp->full_name;
+}
+
+/* Returns the name of datapath 'wdp' without the type
+ * (for use in device names). */
+const char *
+wdp_base_name(const struct wdp *wdp)
+{
+ return wdp->base_name;
+}
+
+/* Enumerates all names that may be used to open 'wdp' into 'all_names'. The
+ * Linux datapath, for example, supports opening a datapath both by number,
+ * e.g. "wdp0", and by the name of the datapath's local port. For some
+ * datapaths, this might be an infinite set (e.g. in a file name, slashes may
+ * be duplicated any number of times), in which case only the names most likely
+ * to be used will be enumerated.
+ *
+ * The caller must already have initialized 'all_names'. Any existing names in
+ * 'all_names' will not be disturbed. */
+int
+wdp_get_all_names(const struct wdp *wdp, struct svec *all_names)
+{
+ if (wdp->wdp_class->get_all_names) {
+ int error = wdp->wdp_class->get_all_names(wdp, all_names);
+ if (error) {
+ VLOG_WARN_RL(&error_rl,
+ "failed to retrieve names for datpath %s: %s",
+ wdp_name(wdp), strerror(error));
+ }
+ return error;
+ } else {
+ svec_add(all_names, wdp_base_name(wdp));
+ return 0;
+ }
+}
+
+/* Destroys the datapath that 'wdp' is connected to, first removing all of
+ * its ports. After calling this function, it does not make sense to pass
+ * 'wdp' to any functions other than wdp_name() or wdp_close(). */
+int
+wdp_delete(struct wdp *wdp)
+{
+ int error;
+
+ COVERAGE_INC(wdp_destroy);
+
+ error = wdp->wdp_class->destroy(wdp);
+ log_operation(wdp, "delete", error);
+ return error;
+}
+
+int
+wdp_get_features(const struct wdp *wdp, struct ofpbuf **featuresp)
+{
+ int error = wdp->wdp_class->get_features(wdp, featuresp);
+ if (error) {
+ *featuresp = NULL;
+ }
+ return error;
+}
+
+/* Retrieves statistics for 'wdp' into 'stats'. Returns 0 if successful,
+ * otherwise a positive errno value. */
+int
+wdp_get_wdp_stats(const struct wdp *wdp, struct wdp_stats *stats)
+{
+ int error = wdp->wdp_class->get_stats(wdp, stats);
+ if (error) {
+ memset(stats, 0, sizeof *stats);
+ }
+ log_operation(wdp, "get_stats", error);
+ return error;
+}
+
+/* Retrieves the current IP fragment handling policy for 'wdp' into
+ * '*drop_frags': true indicates that fragments are dropped, false indicates
+ * that fragments are treated in the same way as other IP packets (except that
+ * the L4 header cannot be read). Returns 0 if successful, otherwise a
+ * positive errno value. */
+int
+wdp_get_drop_frags(const struct wdp *wdp, bool *drop_frags)
+{
+ int error = wdp->wdp_class->get_drop_frags(wdp, drop_frags);
+ if (error) {
+ *drop_frags = false;
+ }
+ log_operation(wdp, "get_drop_frags", error);
+ return error;
+}
+
+/* Changes 'wdp''s treatment of IP fragments to 'drop_frags', whose meaning is
+ * the same as for the get_drop_frags member function. Returns 0 if
+ * successful, otherwise a positive errno value. */
+int
+wdp_set_drop_frags(struct wdp *wdp, bool drop_frags)
+{
+ int error = wdp->wdp_class->set_drop_frags(wdp, drop_frags);
+ log_operation(wdp, "set_drop_frags", error);
+ return error;
+}
+
+/* Attempts to add 'devname' as a port on 'wdp'. If 'internal' is true,
+ * creates the port as an internal port. If successful, returns 0 and sets
+ * '*port_nop' to the new port's port number (if 'port_nop' is non-null). On
+ * failure, returns a positive errno value and sets '*port_nop' to UINT16_MAX
+ * (if 'port_nop' is non-null). */
+int
+wdp_port_add(struct wdp *wdp, const char *devname,
+ bool internal, uint16_t *port_nop)
+{
+ uint16_t port_no;
+ int error;
+
+ COVERAGE_INC(wdp_port_add);
+
+ error = wdp->wdp_class->port_add(wdp, devname, internal, &port_no);
+ if (!error) {
+ VLOG_DBG_RL(&wdpmsg_rl, "%s: added %s as port %"PRIu16,
+ wdp_name(wdp), devname, port_no);
+ } else {
+ VLOG_WARN_RL(&error_rl, "%s: failed to add %s as port: %s",
+ wdp_name(wdp), devname, strerror(error));
+ port_no = UINT16_MAX;
+ }
+ if (port_nop) {
+ *port_nop = port_no;
+ }
+ return error;
+}
+
+/* Attempts to remove 'wdp''s port number 'port_no'. Returns 0 if successful,
+ * otherwise a positive errno value. */
+int
+wdp_port_del(struct wdp *wdp, uint16_t port_no)
+{
+ int error;
+
+ COVERAGE_INC(wdp_port_del);
+
+ error = wdp->wdp_class->port_del(wdp, port_no);
+ log_operation(wdp, "port_del", error);
+ return error;
+}
+
+/* Looks up port number 'port_no' in 'wdp'. On success, returns 0 and points
+ * '*portp' to a wdp_port representing the specified port. On failure, returns
+ * a positive errno value and sets '*portp' to NULL.
+ *
+ * The caller must not modify or free the returned wdp_port. Calling
+ * wdp_run() or wdp_port_poll() may free the returned wdp_port. */
+int
+wdp_port_query_by_number(const struct wdp *wdp, uint16_t port_no,
+ struct wdp_port **portp)
+{
+ int error;
+
+ error = wdp->wdp_class->port_query_by_number(wdp, port_no, portp);
+ if (!error) {
+ VLOG_DBG_RL(&wdpmsg_rl, "%s: port %"PRIu16" is device %s",
+ wdp_name(wdp), port_no, (*portp)->devname);
+ } else {
+ *portp = NULL;
+ VLOG_WARN_RL(&error_rl, "%s: failed to query port %"PRIu16": %s",
+ wdp_name(wdp), port_no, strerror(error));
+ }
+ return error;
+}
+
+/* Same as wdp_port_query_by_number() except that it look for a port named
+ * 'devname' in 'wdp'. */
+int
+wdp_port_query_by_name(const struct wdp *wdp, const char *devname,
+ struct wdp_port **portp)
+{
+ int error = wdp->wdp_class->port_query_by_name(wdp, devname, portp);
+ if (!error) {
+ VLOG_DBG_RL(&wdpmsg_rl, "%s: device %s is on port %"PRIu16,
+ wdp_name(wdp), devname, (*portp)->opp.port_no);
+ } else {
+ *portp = NULL;
+
+ /* Log level is DBG here because all the current callers are interested
+ * in whether 'wdp' actually has a port 'devname', so that it's not
+ * an issue worth logging if it doesn't. */
+ VLOG_DBG_RL(&error_rl, "%s: failed to query port %s: %s",
+ wdp_name(wdp), devname, strerror(error));
+ }
+ return error;
+}
+
+/* Looks up port number 'port_no' in 'wdp'. On success, returns 0 and stores
+ * a copy of the port's name in '*namep'. On failure, returns a positive errno
+ * value and stores NULL in '*namep'.
+ *
+ * The caller is responsible for freeing '*namep' (with free()). */
+int
+wdp_port_get_name(struct wdp *wdp, uint16_t port_no, char **namep)
+{
+ struct wdp_port *port;
+ int error;
+
+ error = wdp_port_query_by_number(wdp, port_no, &port);
+ *namep = !error ? xstrdup(port->devname) : NULL;
+ return error;
+}
+
+/* Obtains a list of all the ports in 'wdp'.
+ *
+ * If successful, returns 0 and sets '*portsp' to point to an array of
+ * pointers to port structures and '*n_portsp' to the number of pointers in the
+ * array. On failure, returns a positive errno value and sets '*portsp' to
+ * NULL and '*n_portsp' to 0.
+ *
+ * The caller is responsible for freeing '*portsp' by calling free(). The
+ * caller must not free the individual wdp_port structures. Calling
+ * wdp_run() or wdp_port_poll() may free the returned wdp_ports. */
+int
+wdp_port_list(const struct wdp *wdp,
+ struct wdp_port ***portsp, size_t *n_portsp)
+{
+ int error;
+
+ error = wdp->wdp_class->port_list(wdp, portsp, n_portsp);
+ if (error) {
+ *portsp = NULL;
+ *n_portsp = 0;
+ }
+ log_operation(wdp, "port_list", error);
+ return error;
+}
+
+int
+wdp_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config)
+{
+ return wdp->wdp_class->port_set_config(wdp, port_no, config);
+}
+
+/* Polls for changes in the set of ports in 'wdp'. If the set of ports in
+ * 'wdp' has changed, this function does one of the following:
+ *
+ * - Stores the name of the device that was added to or deleted from 'wdp' in
+ * '*devnamep' and returns 0. The caller is responsible for freeing
+ * '*devnamep' (with free()) when it no longer needs it.
+ *
+ * - Returns ENOBUFS and sets '*devnamep' to NULL.
+ *
+ * This function may also return 'false positives', where it returns 0 and
+ * '*devnamep' names a device that was not actually added or deleted or it
+ * returns ENOBUFS without any change.
+ *
+ * Returns EAGAIN if the set of ports in 'wdp' has not changed. May also
+ * return other positive errno values to indicate that something has gone
+ * wrong. */
+int
+wdp_port_poll(const struct wdp *wdp, char **devnamep)
+{
+ int error = wdp->wdp_class->port_poll(wdp, devnamep);
+ if (error) {
+ *devnamep = NULL;
+ }
+ return error;
+}
+
+/* Arranges for the poll loop to wake up when port_poll(wdp) will return a
+ * value other than EAGAIN. */
+void
+wdp_port_poll_wait(const struct wdp *wdp)
+{
+ wdp->wdp_class->port_poll_wait(wdp);
+}
+
+/* Deletes all flows from 'wdp'. Returns 0 if successful, otherwise a
+ * positive errno value. */
+int
+wdp_flow_flush(struct wdp *wdp)
+{
+ int error;
+
+ COVERAGE_INC(wdp_flow_flush);
+
+ error = wdp->wdp_class->flow_flush(wdp);
+ log_operation(wdp, "flow_flush", error);
+ return error;
+}
+
+struct wdp_rule *
+wdp_flow_get(struct wdp *wdp, const flow_t *flow)
+{
+ return wdp->wdp_class->flow_get(wdp, flow);
+}
+
+struct wdp_rule *
+wdp_flow_match(struct wdp *wdp, const flow_t *flow)
+{
+ return wdp->wdp_class->flow_match(wdp, flow);
+}
+
+void
+wdp_flow_for_each_match(const struct wdp *wdp, const flow_t *target,
+ int include, wdp_flow_cb_func *callback, void *aux)
+{
+ wdp->wdp_class->flow_for_each_match(wdp, target, include,
+ callback, aux);
+}
+
+int
+wdp_flow_get_stats(const struct wdp *wdp, const struct wdp_rule *rule,
+ struct wdp_flow_stats *stats)
+{
+ int error = wdp->wdp_class->flow_get_stats(wdp, rule, stats);
+ if (error) {
+ memset(stats, 0, sizeof *stats);
+ }
+ return error;
+}
+
+bool
+wdp_flow_overlaps(const struct wdp *wdp, const flow_t *flow)
+{
+ return wdp->wdp_class->flow_overlaps(wdp, flow);
+}
+
+int
+wdp_flow_put(struct wdp *wdp, struct wdp_flow_put *put,
+ struct wdp_flow_stats *old_stats, struct wdp_rule **rulep)
+{
+ int error = wdp->wdp_class->flow_put(wdp, put, old_stats, rulep);
+ if (error) {
+ if (old_stats) {
+ memset(old_stats, 0, sizeof *old_stats);
+ }
+ if (rulep) {
+ *rulep = NULL;
+ }
+ }
+ return error;
+}
+
+int
+wdp_flow_delete(struct wdp *wdp, struct wdp_rule *rule,
+ struct wdp_flow_stats *final_stats)
+{
+ int error = wdp->wdp_class->flow_delete(wdp, rule, final_stats);
+ if (error && final_stats) {
+ memset(final_stats, 0, sizeof *final_stats);
+ }
+ return error;
+}
+
+int
+wdp_flow_inject(struct wdp *wdp, struct wdp_rule *rule,
+ uint16_t in_port, const struct ofpbuf *packet)
+{
+ return wdp->wdp_class->flow_inject(wdp, rule, in_port, packet);
+}
+
+int
+wdp_execute(struct wdp *wdp, uint16_t in_port,
+ const union ofp_action actions[], size_t n_actions,
+ const struct ofpbuf *buf)
+{
+ int error;
+
+ COVERAGE_INC(wdp_execute);
+ if (n_actions > 0) {
+ error = wdp->wdp_class->execute(wdp, in_port, actions,
+ n_actions, buf);
+ } else {
+ error = 0;
+ }
+ return error;
+}
+
+/* Retrieves 'wdp''s "listen mask" into '*listen_mask'. Each bit set in
+ * '*listen_mask' indicates that wdp_recv() will receive messages of the
+ * corresponding WDP_CHAN_* type. Returns 0 if successful, otherwise a
+ * positive errno value. */
+int
+wdp_recv_get_mask(const struct wdp *wdp, int *listen_mask)
+{
+ int error = wdp->wdp_class->recv_get_mask(wdp, listen_mask);
+ if (error) {
+ *listen_mask = 0;
+ }
+ log_operation(wdp, "recv_get_mask", error);
+ return error;
+}
+
+/* Sets 'wdp''s "listen mask" to 'listen_mask'. Each bit set in
+ * '*listen_mask' requests that wdp_recv() receive messages of the
+ * corresponding WDP_CHAN_* type. Returns 0 if successful, otherwise a
+ * positive errno value. */
+int
+wdp_recv_set_mask(struct wdp *wdp, int listen_mask)
+{
+ int error = wdp->wdp_class->recv_set_mask(wdp, listen_mask);
+ log_operation(wdp, "recv_set_mask", error);
+ return error;
+}
+
+/* Retrieve the sFlow sampling probability. '*probability' is expressed as the
+ * number of packets out of UINT_MAX to sample, e.g. probability/UINT_MAX is
+ * the probability of sampling a given packet.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. EOPNOTSUPP
+ * indicates that 'wdp' does not support sFlow sampling. */
+int
+wdp_get_sflow_probability(const struct wdp *wdp, uint32_t *probability)
+{
+ int error = (wdp->wdp_class->get_sflow_probability
+ ? wdp->wdp_class->get_sflow_probability(wdp, probability)
+ : EOPNOTSUPP);
+ if (error) {
+ *probability = 0;
+ }
+ log_operation(wdp, "get_sflow_probability", error);
+ return error;
+}
+
+/* Set the sFlow sampling probability. 'probability' is expressed as the
+ * number of packets out of UINT_MAX to sample, e.g. probability/UINT_MAX is
+ * the probability of sampling a given packet.
+ *
+ * Returns 0 if successful, otherwise a positive errno value. EOPNOTSUPP
+ * indicates that 'wdp' does not support sFlow sampling. */
+int
+wdp_set_sflow_probability(struct wdp *wdp, uint32_t probability)
+{
+ int error = (wdp->wdp_class->set_sflow_probability
+ ? wdp->wdp_class->set_sflow_probability(wdp, probability)
+ : EOPNOTSUPP);
+ log_operation(wdp, "set_sflow_probability", error);
+ return error;
+}
+
+/* Attempts to receive a message from 'wdp'. If successful, stores the
+ * message into '*packetp'. Only messages of the types selected with
+ * wdp_set_listen_mask() will ordinarily be received (but if a message type
+ * is enabled and then later disabled, some stragglers might pop up).
+ *
+ * Returns 0 if successful, otherwise a positive errno value. Returns EAGAIN
+ * if no message is immediately available. */
+int
+wdp_recv(struct wdp *wdp, struct wdp_packet *packet)
+{
+ int error = wdp->wdp_class->recv(wdp, packet);
+ if (!error) {
+ /* XXX vlog_dbg received packet */
+ } else {
+ memset(packet, 0, sizeof *packet);
+ packet->channel = -1;
+ }
+ return error;
+}
+
+/* Discards all messages that would otherwise be received by wdp_recv() on
+ * 'wdp'. Returns 0 if successful, otherwise a positive errno value. */
+int
+wdp_recv_purge(struct wdp *wdp)
+{
+ struct wdp_stats stats;
+ unsigned int i;
+ int error;
+
+ COVERAGE_INC(wdp_purge);
+
+ error = wdp_get_wdp_stats(wdp, &stats);
+ if (error) {
+ return error;
+ }
+
+ for (i = 0; i < stats.max_miss_queue + stats.max_action_queue + stats.max_sflow_queue; i++) {
+ struct wdp_packet packet;
+
+ error = wdp_recv(wdp, &packet);
+ if (error) {
+ return error == EAGAIN ? 0 : error;
+ }
+ ofpbuf_delete(packet.payload);
+ }
+ return 0;
+}
+
+/* Arranges for the poll loop to wake up when 'wdp' has a message queued to be
+ * received with wdp_recv(). */
+void
+wdp_recv_wait(struct wdp *wdp)
+{
+ wdp->wdp_class->recv_wait(wdp);
+}
+
+/* Obtains the NetFlow engine type and engine ID for 'wdp' into '*engine_type'
+ * and '*engine_id', respectively. */
+void
+wdp_get_netflow_ids(const struct wdp *wdp,
+ uint8_t *engine_type, uint8_t *engine_id)
+{
+ *engine_type = wdp->netflow_engine_type;
+ *engine_id = wdp->netflow_engine_id;
+}
+\f
+void
+wdp_packet_destroy(struct wdp_packet *packet)
+{
+ if (packet) {
+ ofpbuf_delete(packet->payload);
+ free(packet);
+ }
+}
+
+void
+wdp_init(struct wdp *wdp, const struct wdp_class *wdp_class,
+ const char *name,
+ uint8_t netflow_engine_type, uint8_t netflow_engine_id)
+{
+ wdp->wdp_class = wdp_class;
+ wdp->base_name = xstrdup(name);
+ wdp->full_name = xasprintf("%s@%s", wdp_class->type, name);
+ wdp->netflow_engine_type = netflow_engine_type;
+ wdp->netflow_engine_id = netflow_engine_id;
+}
+
+/* Undoes the results of initialization.
+ *
+ * Normally this function only needs to be called from wdp_close().
+ * However, it may be called by providers due to an error on opening
+ * that occurs after initialization. It this case wdp_close() would
+ * never be called. */
+void
+wdp_uninit(struct wdp *wdp, bool close)
+{
+ char *base_name = wdp->base_name;
+ char *full_name = wdp->full_name;
+
+ if (close) {
+ wdp->wdp_class->close(wdp);
+ }
+
+ free(base_name);
+ free(full_name);
+}
+\f
+static void
+log_operation(const struct wdp *wdp, const char *operation, int error)
+{
+ if (!error) {
+ VLOG_DBG_RL(&wdpmsg_rl, "%s: %s success", wdp_name(wdp), operation);
+ } else {
+ VLOG_WARN_RL(&error_rl, "%s: %s failed (%s)",
+ wdp_name(wdp), operation, strerror(error));
+ }
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef WDP_H
+#define WDP_H 1
+
+#include "classifier.h"
+#include "list.h"
+
+struct ofpbuf;
+struct svec;
+struct wdp;
+struct wdp_class;
+union ofp_action;
+
+struct wdp_table_stats {
+ /* Flows. */
+ unsigned int n_flows; /* Number of flows in table. */
+ unsigned int cur_capacity; /* Current flow table capacity. */
+ unsigned int max_capacity; /* Maximum expansion of flow table capacity. */
+
+ /* Lookups. */
+ unsigned long long int n_hit; /* Number of flow table matches. */
+ unsigned long long int n_missed; /* Number of flow table misses. */
+ unsigned long long int n_lost; /* Misses dropped due to buffer limits. */
+};
+
+struct wdp_stats {
+ struct wdp_table_stats exact;
+ struct wdp_table_stats wild;
+
+ /* Ports. */
+ unsigned int n_ports; /* Current number of ports. */
+ unsigned int max_ports; /* Maximum supported number of ports. */
+
+ /* Lookups. */
+ unsigned long long int n_frags; /* Number of dropped IP fragments. */
+
+ /* Queues. */
+ unsigned int max_miss_queue; /* Max length of WDP_CHAN_MISS queue. */
+ unsigned int max_action_queue; /* Max length of WDP_CHAN_ACTION queue. */
+ unsigned int max_sflow_queue; /* Max length of WDP_CHAN_SFLOW queue. */
+};
+
+struct wdp_rule {
+ struct cls_rule cr;
+
+ union ofp_action *actions; /* OpenFlow actions. */
+ int n_actions; /* Number of elements in 'actions' array. */
+ long long int created; /* Time created, in ms since the epoch. */
+ uint16_t idle_timeout; /* In seconds from time of last use. */
+ uint16_t hard_timeout; /* In seconds from time of creation. */
+
+ void *client_data;
+};
+
+void wdp_rule_init(struct wdp_rule *, const union ofp_action *actions,
+ size_t n_actions);
+void wdp_rule_uninit(struct wdp_rule *);
+\f
+void wdp_run(void);
+void wdp_wait(void);
+
+int wdp_register_provider(const struct wdp_class *);
+int wdp_unregister_provider(const char *type);
+void wdp_enumerate_types(struct svec *types);
+
+int wdp_enumerate_names(const char *type, struct svec *names);
+void wdp_parse_name(const char *datapath_name, char **name, char **type);
+
+void wdp_run_expiration(struct wdp *);
+void wdp_run_revalidation(struct wdp *, bool revalidate_all);
+
+int wdp_open(const char *name, const char *type, struct wdp **);
+int wdp_create(const char *name, const char *type, struct wdp **);
+int wdp_create_and_open(const char *name, const char *type, struct wdp **);
+void wdp_close(struct wdp *);
+
+const char *wdp_name(const struct wdp *);
+const char *wdp_base_name(const struct wdp *);
+int wdp_get_all_names(const struct wdp *, struct svec *);
+
+int wdp_delete(struct wdp *);
+
+int wdp_get_features(const struct wdp *, struct ofpbuf **featuresp);
+int wdp_get_wdp_stats(const struct wdp *, struct wdp_stats *);
+
+int wdp_get_drop_frags(const struct wdp *, bool *drop_frags);
+int wdp_set_drop_frags(struct wdp *, bool drop_frags);
+
+struct wdp_port {
+ struct netdev *netdev;
+ struct ofp_phy_port opp; /* In host byte order. */
+ char *devname; /* Network device name. */
+ bool internal;
+};
+
+int wdp_port_add(struct wdp *, const char *devname, bool internal,
+ uint16_t *port_no);
+int wdp_port_del(struct wdp *, uint16_t port_no);
+int wdp_port_query_by_number(const struct wdp *, uint16_t port_no,
+ struct wdp_port **);
+int wdp_port_query_by_name(const struct wdp *, const char *devname,
+ struct wdp_port **);
+int wdp_port_get_name(struct wdp *, uint16_t port_no, char **namep);
+int wdp_port_list(const struct wdp *,
+ struct wdp_port ***, size_t *n_ports);
+
+int wdp_port_set_config(struct wdp *, uint16_t port_no, uint32_t config);
+
+int wdp_port_poll(const struct wdp *, char **devnamep);
+void wdp_port_poll_wait(const struct wdp *);
+
+int wdp_flow_flush(struct wdp *);
+
+struct wdp_flow_stats {
+ unsigned long long int n_packets; /* Number of matched packets. */
+ unsigned long long int n_bytes; /* Number of matched bytes. */
+ long long int inserted; /* Time inserted into flow table. */
+ long long int used; /* Time last used. */
+ uint8_t tcp_flags; /* Bitwise-OR of TCP flags seen. */
+ uint8_t ip_tos; /* IP TOS for most recent packet. */
+};
+
+/* Finding and inspecting flows. */
+struct wdp_rule *wdp_flow_get(struct wdp *, const flow_t *);
+struct wdp_rule *wdp_flow_match(struct wdp *, const flow_t *);
+
+typedef void wdp_flow_cb_func(struct wdp_rule *, void *aux);
+void wdp_flow_for_each_match(const struct wdp *, const flow_t *,
+ int include, wdp_flow_cb_func *, void *aux);
+
+int wdp_flow_get_stats(const struct wdp *, const struct wdp_rule *,
+ struct wdp_flow_stats *);
+bool wdp_flow_overlaps(const struct wdp *, const flow_t *);
+
+/* Modifying flows. */
+enum wdp_flow_put_flags {
+ /* At least one of these flags should be set. */
+ WDP_PUT_CREATE = 1 << 0, /* Allow creating a new flow. */
+ WDP_PUT_MODIFY = 1 << 1, /* Allow modifying an existing flow. */
+
+ /* Options used only for modifying existing flows. */
+ WDP_PUT_COUNTERS = 1 << 2, /* Clear counters, TCP flags, IP TOS, used. */
+ WDP_PUT_ACTIONS = 1 << 3, /* Update actions. */
+ WDP_PUT_INSERTED = 1 << 4, /* Update 'inserted' to current time. */
+ WDP_PUT_TIMEOUTS = 1 << 5, /* Update 'idle_timeout' and 'hard_timeout'. */
+ WDP_PUT_ALL = (WDP_PUT_COUNTERS | WDP_PUT_ACTIONS
+ | WDP_PUT_INSERTED | WDP_PUT_TIMEOUTS)
+};
+
+struct wdp_flow_put {
+ enum wdp_flow_put_flags flags;
+
+ const flow_t *flow;
+
+ const union ofp_action *actions;
+ size_t n_actions;
+
+ unsigned short int idle_timeout;
+ unsigned short int hard_timeout;
+};
+
+int wdp_flow_put(struct wdp *, struct wdp_flow_put *,
+ struct wdp_flow_stats *old_stats,
+ struct wdp_rule **rulep);
+int wdp_flow_delete(struct wdp *, struct wdp_rule *,
+ struct wdp_flow_stats *final_stats);
+
+/* Sending packets in flows. */
+int wdp_flow_inject(struct wdp *, struct wdp_rule *,
+ uint16_t in_port, const struct ofpbuf *);
+int wdp_execute(struct wdp *, uint16_t in_port,
+ const union ofp_action[], size_t n_actions,
+ const struct ofpbuf *);
+
+/* Receiving packets that miss the flow table. */
+enum wdp_channel {
+ WDP_CHAN_MISS, /* Packet missed in flow table. */
+ WDP_CHAN_ACTION, /* Packet output to OFPP_CONTROLLER. */
+ WDP_CHAN_SFLOW, /* sFlow samples. */
+ WDP_N_CHANS
+};
+
+struct wdp_packet {
+ struct list list;
+ enum wdp_channel channel;
+ uint16_t in_port;
+ int send_len;
+ struct ofpbuf *payload;
+};
+
+void wdp_packet_destroy(struct wdp_packet *);
+
+int wdp_recv_get_mask(const struct wdp *, int *listen_mask);
+int wdp_recv_set_mask(struct wdp *, int listen_mask);
+int wdp_get_sflow_probability(const struct wdp *, uint32_t *probability);
+int wdp_set_sflow_probability(struct wdp *, uint32_t probability);
+int wdp_recv(struct wdp *, struct wdp_packet *);
+int wdp_recv_purge(struct wdp *);
+void wdp_recv_wait(struct wdp *);
+
+void wdp_get_netflow_ids(const struct wdp *,
+ uint8_t *engine_type, uint8_t *engine_id);
+
+#endif /* wdp.h */
n_exact = 0;
for (i = 0; i < tcls->n_rules; i++) {
- n_exact += tcls->rules[i]->cls_rule.wc.wildcards == 0;
+ n_exact += tcls->rules[i]->cls_rule.flow.wildcards == 0;
}
return n_exact;
}
{
size_t i;
- assert(rule->cls_rule.wc.wildcards || rule->cls_rule.priority == UINT_MAX);
+ assert(rule->cls_rule.flow.wildcards || rule->cls_rule.flow.priority == UINT_MAX);
for (i = 0; i < tcls->n_rules; i++) {
const struct cls_rule *pos = &tcls->rules[i]->cls_rule;
- if (pos->priority == rule->cls_rule.priority
- && pos->wc.wildcards == rule->cls_rule.wc.wildcards
+ if (pos->flow.priority == rule->cls_rule.flow.priority
+ && pos->flow.wildcards == rule->cls_rule.flow.wildcards
&& flow_equal(&pos->flow, &rule->cls_rule.flow)) {
/* Exact match.
* XXX flow_equal should ignore wildcarded fields */
free(tcls->rules[i]);
tcls->rules[i] = xmemdup(rule, sizeof *rule);
return tcls->rules[i];
- } else if (pos->priority < rule->cls_rule.priority) {
+ } else if (pos->flow.priority < rule->cls_rule.flow.priority) {
break;
}
}
void *wild_field = (char *) &wild->flow + f->ofs;
void *fixed_field = (char *) fixed + f->ofs;
- if ((wild->wc.wildcards & f->wildcards) == f->wildcards ||
+ if ((wild->flow.wildcards & f->wildcards) == f->wildcards ||
!memcmp(wild_field, fixed_field, f->len)) {
/* Definite match. */
continue;
}
- if (wild->wc.wildcards & f->wildcards) {
+ if (wild->flow.wildcards & f->wildcards) {
uint32_t test = read_uint32(wild_field);
uint32_t ip = read_uint32(fixed_field);
int shift = (f_idx == CLS_F_IDX_NW_SRC
? OFPFW_NW_SRC_SHIFT : OFPFW_NW_DST_SHIFT);
- uint32_t mask = flow_nw_bits_to_mask(wild->wc.wildcards, shift);
+ uint32_t mask = flow_nw_bits_to_mask(wild->flow.wildcards, shift);
if (!((test ^ ip) & mask)) {
continue;
}
for (i = 0; i < cls->n_rules; i++) {
struct test_rule *pos = cls->rules[i];
- uint32_t wildcards = pos->cls_rule.wc.wildcards;
+ uint32_t wildcards = pos->cls_rule.flow.wildcards;
if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
&& match(&pos->cls_rule, flow)) {
return &pos->cls_rule;
for (i = 0; i < cls->n_rules; ) {
struct test_rule *pos = cls->rules[i];
- uint32_t wildcards = pos->cls_rule.wc.wildcards;
+ uint32_t wildcards = pos->cls_rule.flow.wildcards;
if (include & (wildcards ? CLS_INC_WILD : CLS_INC_EXACT)
&& match(target, &pos->cls_rule.flow)) {
tcls_remove(cls, pos);
const struct test_rule *tr1 = test_rule_from_cls_rule(cr1);
assert(flow_equal(&cr0->flow, &cr1->flow));
- assert(cr0->wc.wildcards == cr1->wc.wildcards);
- assert(cr0->priority == cr1->priority);
- /* Skip nw_src_mask and nw_dst_mask, because they are derived
- * members whose values are used only for optimization. */
+ assert(cr0->flow.wildcards == cr1->flow.wildcards);
+ assert(cr0->flow.priority == cr1->flow.priority);
+ /* Skip nw_src_mask, nw_dst_mask, and dl_tci_mask, because they
+ * are derived members used only for optimization. */
assert(tr0->aux == tr1->aux);
}
}
{
const struct cls_field *f;
struct test_rule *rule;
- uint32_t wildcards;
flow_t flow;
- wildcards = 0;
memset(&flow, 0, sizeof flow);
+ flow.priority = priority;
for (f = &cls_fields[0]; f < &cls_fields[CLS_N_FIELDS]; f++) {
int f_idx = f - cls_fields;
if (wc_fields & (1u << f_idx)) {
- wildcards |= f->wildcards;
+ flow.wildcards |= f->wildcards;
} else {
int value_idx = (value_pat & (1u << f_idx)) != 0;
memcpy((char *) &flow + f->ofs, values[f_idx][value_idx], f->len);
}
rule = xzalloc(sizeof *rule);
- cls_rule_from_flow(&rule->cls_rule, &flow, wildcards,
- !wildcards ? UINT_MAX : priority);
+ cls_rule_from_flow(&rule->cls_rule, &flow);
return rule;
}
struct test_rule *rule = xmemdup(tcls.rules[rand() % tcls.n_rules],
sizeof(struct test_rule));
int include = rand() % 2 ? CLS_INC_WILD : CLS_INC_EXACT;
- include |= (rule->cls_rule.wc.wildcards
+ include |= (rule->cls_rule.flow.wildcards
? CLS_INC_WILD : CLS_INC_EXACT);
- classifier_for_each_match(&cls, &rule->cls_rule, include,
+ classifier_for_each_match(&cls, &rule->cls_rule.flow, include,
free_rule, &cls);
tcls_delete_matches(&tcls, &rule->cls_rule, include);
compare_classifiers(&cls, &tcls);
}
flow_extract(packet, 1, &flow);
- flow_to_match(&flow, 0, &extracted_match);
+ flow_to_match(&flow, &extracted_match);
if (memcmp(&expected_match, &extracted_match, sizeof expected_match)) {
char *exp_s = ofp_match_to_string(&expected_match, 2);
#include "netdev.h"
#include "ofpbuf.h"
#include "ofproto/ofproto.h"
+#include "ofproto/wdp.h"
#include "openflow/openflow.h"
#include "packets.h"
#include "poll-loop.h"
ovs_fatal(error, "unrecoverable datapath error");
}
unixctl_server_run(unixctl);
- xf_run();
+ wdp_run();
netdev_run();
ofproto_wait(ofproto);
unixctl_server_wait(unixctl);
- xf_wait();
+ wdp_wait();
netdev_wait();
poll_block();
}
netdev_options.args = &options;
netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
netdev_options.may_create = true;
+ netdev_options.may_open = true;
if (iface_is_internal(iface->port->bridge, iface_cfg->name)) {
netdev_options.may_open = true;
}
return error;
}
- mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
+ //XXX mac_learning_run(br->ml, ofproto_get_revalidate_set(br->ofproto));
bond_run(br);
error = ofproto_run2(br->ofproto, br->flush);
action.output.len = htons(sizeof action);
action.output.port = htons(OFPP_NORMAL);
memset(&flow, 0, sizeof flow);
- ofproto_add_flow(br->ofproto, &flow, OFPFW_ALL, 0,
- &action, 1, 0);
+ flow.wildcards = OFPFW_ALL;
+ ofproto_add_flow(br->ofproto, &flow, &action, 1, 0);
ofproto_set_in_band(br->ofproto, false);
ofproto_set_max_backoff(br->ofproto, 1);
#include "daemon.h"
#include "leak-checker.h"
#include "netdev.h"
+#include "ofproto/wdp.h"
#include "ovsdb-idl.h"
#include "poll-loop.h"
#include "proc-net-compat.h"
}
}
unixctl_server_run(unixctl);
- xf_run();
+ wdp_run();
netdev_run();
signal_wait(sighup);
}
ovsdb_idl_wait(idl);
unixctl_server_wait(unixctl);
- xf_wait();
+ wdp_wait();
netdev_wait();
poll_block();
}