COVERAGE_DEFINE(facet_revalidate);
COVERAGE_DEFINE(facet_unexpected);
COVERAGE_DEFINE(facet_suppress);
+COVERAGE_DEFINE(subfacet_install_fail);
struct flow_miss;
struct facet;
static bool facet_is_controller_flow(struct facet *);
+/* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
+ * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
+ * traffic egressing the 'ofport' with that priority should be marked with. */
+struct priority_to_dscp {
+ struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
+ uint32_t priority; /* Priority of this queue (see struct flow). */
+
+ uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
+};
+
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
*
* This is deprecated. It is only for compatibility with broken device drivers
struct vlan_splinter {
struct hmap_node realdev_vid_node;
struct hmap_node vlandev_node;
- uint16_t realdev_ofp_port;
- uint16_t vlandev_ofp_port;
+ ofp_port_t realdev_ofp_port;
+ ofp_port_t vlandev_ofp_port;
int vid;
};
static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
static void vsp_remove(struct ofport_dpif *);
-static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
+static void vsp_add(struct ofport_dpif *, ofp_port_t realdev_ofp_port, int vid);
-static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
- uint32_t odp_port);
+static ofp_port_t odp_port_to_ofp_port(const struct ofproto_dpif *,
+ odp_port_t odp_port);
static struct ofport_dpif *
ofport_dpif_cast(const struct ofport *ofport)
static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_clear_priorities(struct ofport_dpif *);
+static void ofport_update_peer(struct ofport_dpif *);
static void run_fast_rl(void);
struct dpif_completion {
struct ofoperation *op;
};
+/* Reasons that we might need to revalidate every facet, and corresponding
+ * coverage counters.
+ *
+ * A value of 0 means that there is no need to revalidate.
+ *
+ * It would be nice to have some cleaner way to integrate with coverage
+ * counters, but with only a few reasons I guess this is good enough for
+ * now. */
+enum revalidate_reason {
+ REV_RECONFIGURE = 1, /* Switch configuration changed. */
+ REV_STP, /* Spanning tree protocol port status change. */
+ REV_PORT_TOGGLED, /* Port enabled or disabled by CFM, LACP, ...*/
+ REV_FLOW_TABLE, /* Flow table changed. */
+ REV_INCONSISTENCY /* Facet self-check failed. */
+};
COVERAGE_DEFINE(rev_reconfigure);
COVERAGE_DEFINE(rev_stp);
COVERAGE_DEFINE(rev_port_toggled);
size_t key_len;
};
+struct avg_subfacet_rates {
+ double add_rate; /* Moving average of new flows created per minute. */
+ double del_rate; /* Moving average of flows deleted per minute. */
+};
+
+/* All datapaths of a given type share a single dpif backer instance. */
+struct dpif_backer {
+ char *type;
+ int refcount;
+ struct dpif *dpif;
+ struct timer next_expiration;
+ struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
+
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
+
+ /* Facet revalidation flags applying to facets which use this backer. */
+ enum revalidate_reason need_revalidate; /* Revalidate every facet. */
+ struct tag_set revalidate_set; /* Revalidate only matching facets. */
+
+ struct hmap drop_keys; /* Set of dropped odp keys. */
+ bool recv_set_enable; /* Enables or disables receiving packets. */
+
+ struct hmap subfacets;
+ struct governor *governor;
+
+ /* Subfacet statistics.
+ *
+ * These keep track of the total number of subfacets added and deleted and
+ * flow life span. They are useful for computing the flow rates stats
+ * exposed via "ovs-appctl dpif/show". The goal is to learn about
+ * traffic patterns in ways that we can use later to improve Open vSwitch
+ * performance in new situations. */
+ long long int created; /* Time when it is created. */
+ unsigned max_n_subfacet; /* Maximum number of flows */
+ unsigned avg_n_subfacet; /* Average number of flows. */
+ long long int avg_subfacet_life; /* Average life span of subfacets. */
+
+ /* The average number of subfacets... */
+ struct avg_subfacet_rates hourly; /* ...over the last hour. */
+ struct avg_subfacet_rates daily; /* ...over the last day. */
+ struct avg_subfacet_rates lifetime; /* ...over the switch lifetime. */
+ long long int last_minute; /* Last time 'hourly' was updated. */
+
+ /* Number of subfacets added or deleted since 'last_minute'. */
+ unsigned subfacet_add_count;
+ unsigned subfacet_del_count;
+
+ /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
+ unsigned long long int total_subfacet_add_count;
+ unsigned long long int total_subfacet_del_count;
+};
+
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
static void drop_key_clear(struct dpif_backer *);
static struct ofport_dpif *
-odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
+odp_port_to_ofport(const struct dpif_backer *, odp_port_t odp_port);
static void update_moving_averages(struct dpif_backer *backer);
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
* for debugging the asynchronous flow_mod implementation.) */
static bool clogged;
+/* By default, flows in the datapath are wildcarded (megaflows). They
+ * may be disabled with the "ovs-appctl dpif/disable-megaflows" command. */
+static bool enable_megaflows = true;
+
/* All existing ofproto_dpif instances, indexed by ->up.name. */
static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
const char *dp_port;
- if (!iter->tnl_port) {
+ if (!iter->is_tunnel) {
continue;
}
} else {
node = simap_find(&backer->tnl_backers, dp_port);
if (!node) {
- uint32_t odp_port = UINT32_MAX;
+ odp_port_t odp_port = ODPP_NONE;
if (!dpif_port_add(backer->dpif, iter->up.netdev,
&odp_port)) {
- simap_put(&backer->tnl_backers, dp_port, odp_port);
+ simap_put(&backer->tnl_backers, dp_port,
+ odp_to_u32(odp_port));
node = simap_find(&backer->tnl_backers, dp_port);
}
}
}
- iter->odp_port = node ? node->data : OVSP_NONE;
- if (tnl_port_reconfigure(&iter->up, iter->odp_port,
- &iter->tnl_port)) {
+ iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
+ if (tnl_port_reconfigure(iter, iter->up.netdev,
+ iter->odp_port)) {
backer->need_revalidate = REV_RECONFIGURE;
}
}
}
SIMAP_FOR_EACH (node, &tmp_backers) {
- dpif_port_del(backer->dpif, node->data);
+ dpif_port_del(backer->dpif, u32_to_odp(node->data));
}
simap_destroy(&tmp_backers);
/* Datapath port slated for removal from datapath. */
struct odp_garbage {
struct list list_node;
- uint32_t odp_port;
+ odp_port_t odp_port;
};
static int
free(backer_name);
if (error) {
VLOG_ERR("failed to open datapath of type %s: %s", type,
- strerror(error));
+ ovs_strerror(error));
free(backer);
return error;
}
error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
- type, strerror(error));
+ type, ovs_strerror(error));
close_dpif_backer(backer);
return error;
}
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct shash_node *node, *next;
- int max_ports;
+ odp_port_t max_ports;
int error;
int i;
}
max_ports = dpif_get_max_ports(ofproto->backer->dpif);
- ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
+ ofproto_init_max_ports(ofproto_, u16_to_ofp(MIN(odp_to_u32(max_ports),
+ ofp_to_u16(OFPP_MAX))));
ofproto->netflow = NULL;
ofproto->sflow = NULL;
ofproto_dpif_unixctl_init();
ofproto->has_mirrors = false;
- ofproto->has_bundle_action = false;
-
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
struct oftable *table;
int i;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
complete_operations(ofproto);
}
netflow_destroy(ofproto->netflow);
- dpif_sflow_destroy(ofproto->sflow);
+ dpif_sflow_unref(ofproto->sflow);
hmap_destroy(&ofproto->bundles);
- mac_learning_destroy(ofproto->ml);
+ mac_learning_unref(ofproto->ml);
classifier_destroy(&ofproto->facets);
port->may_enable = true;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
- port->tnl_port = NULL;
+ port->is_tunnel = false;
+ port->peer = NULL;
hmap_init(&port->priorities);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
* because the patch port represents an interface that sFlow considers
* to be "internal" to the switch as a whole, and therefore not an
* candidate for counter polling. */
- port->odp_port = OVSP_NONE;
+ port->odp_port = ODPP_NONE;
+ ofport_update_peer(port);
return 0;
}
port->odp_port = dpif_port.port_no;
if (netdev_get_tunnel_config(netdev)) {
- port->tnl_port = tnl_port_add(&port->up, port->odp_port);
+ tnl_port_add(port, port->up.netdev, port->odp_port);
+ port->is_tunnel = true;
} else {
/* Sanity-check that a mapping doesn't already exist. This
* shouldn't happen for non-tunnel ports. */
}
hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
- hash_int(port->odp_port, 0));
+ hash_odp_port(port->odp_port));
}
dpif_port_destroy(&dpif_port);
char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
const char *dp_port_name;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+
dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
sizeof namebuf);
if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
* happens when the ofproto is being destroyed, since the caller
* assumes that removal of attached ports will happen as part of
* destruction. */
- if (!port->tnl_port) {
+ if (!port->is_tunnel) {
dpif_port_del(ofproto->backer->dpif, port->odp_port);
}
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
- if (port->odp_port != OVSP_NONE && !port->tnl_port) {
+ if (port->peer) {
+ port->peer->peer = NULL;
+ port->peer = NULL;
+ }
+
+ if (port->odp_port != ODPP_NONE && !port->is_tunnel) {
hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
}
- tnl_port_del(port->tnl_port);
+ tnl_port_del(port);
sset_find_and_delete(&ofproto->ports, devname);
sset_find_and_delete(&ofproto->ghost_ports, devname);
- ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
set_bfd(port_, NULL);
if (port->cfm) {
cfm_set_netdev(port->cfm, port->up.netdev);
}
+
+ if (port->is_tunnel && tnl_port_reconfigure(port, port->up.netdev,
+ port->odp_port)) {
+ ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate =
+ REV_RECONFIGURE;
+ }
+
+ ofport_update_peer(port);
}
static void
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
- dpif_sflow_destroy(ds);
+ dpif_sflow_unref(ds);
ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
n_flow_exporters_options);
} else {
if (di) {
- dpif_ipfix_destroy(di);
+ dpif_ipfix_unref(di);
ofproto->ipfix = NULL;
}
}
error = EINVAL;
}
- cfm_destroy(ofport->cfm);
+ cfm_unref(ofport->cfm);
ofport->cfm = NULL;
return error;
}
}
}
-/* Returns true if STP should process 'flow'. */
-static bool
-stp_should_process_flow(const struct flow *flow)
+/* Returns true if STP should process 'flow'. Sets fields in 'wc' that
+ * were used to make the determination.*/
+bool
+stp_should_process_flow(const struct flow *flow, struct flow_wildcards *wc)
{
+ memset(&wc->masks.dl_dst, 0xff, sizeof wc->masks.dl_dst);
return eth_addr_equals(flow->dl_dst, eth_addr_stp);
}
-static void
+void
stp_process_packet(const struct ofport_dpif *ofport,
const struct ofpbuf *packet)
{
}
}
\f
-struct priority_to_dscp *
+int
+ofproto_dpif_queue_to_priority(const struct ofproto_dpif *ofproto,
+ uint32_t queue_id, uint32_t *priority)
+{
+ return dpif_queue_to_priority(ofproto->backer->dpif, queue_id, priority);
+}
+
+static struct priority_to_dscp *
get_priority(const struct ofport_dpif *ofport, uint32_t priority)
{
struct priority_to_dscp *pdscp;
return NULL;
}
+bool
+ofproto_dpif_dscp_from_priority(const struct ofport_dpif *ofport,
+ uint32_t priority, uint8_t *dscp)
+{
+ struct priority_to_dscp *pdscp = get_priority(ofport, priority);
+ *dscp = pdscp ? pdscp->dscp : 0;
+ return pdscp != NULL;
+}
+
static void
ofport_clear_priorities(struct ofport_dpif *ofport)
{
}
static bool
-bundle_add_port(struct ofbundle *bundle, uint16_t ofp_port,
+bundle_add_port(struct ofbundle *bundle, ofp_port_t ofp_port,
struct lacp_slave_settings *lacp)
{
struct ofport_dpif *port;
hmap_remove(&ofproto->bundles, &bundle->hmap_node);
free(bundle->name);
free(bundle->trunks);
- lacp_destroy(bundle->lacp);
- bond_destroy(bundle->bond);
+ lacp_unref(bundle->lacp);
+ bond_unref(bundle->bond);
free(bundle);
}
}
lacp_configure(bundle->lacp, s->lacp);
} else {
- lacp_destroy(bundle->lacp);
+ lacp_unref(bundle->lacp);
bundle->lacp = NULL;
}
bond_slave_register(bundle->bond, port, port->up.netdev);
}
} else {
- bond_destroy(bundle->bond);
+ bond_unref(bundle->bond);
bundle->bond = NULL;
}
if (list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
} else if (list_is_short(&bundle->ports)) {
- bond_destroy(bundle->bond);
+ bond_unref(bundle->bond);
bundle->bond = NULL;
}
}
} else {
VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
"%s (%s)", port->bundle->name,
- netdev_get_name(port->up.netdev), strerror(error));
+ netdev_get_name(port->up.netdev), ovs_strerror(error));
}
}
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
"packets, last error was: %s",
- bundle->name, n_errors, n_packets, strerror(error));
+ bundle->name, n_errors, n_packets, ovs_strerror(error));
} else {
VLOG_DBG("bond %s: sent %d gratuitous learning packets",
bundle->name, n_packets);
/* Ports. */
struct ofport_dpif *
-get_ofp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
+get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
{
struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
return ofport ? ofport_dpif_cast(ofport) : NULL;
}
struct ofport_dpif *
-get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
+get_odp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
{
struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
return port && &ofproto->up == port->up.ofproto ? port : NULL;
ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
}
-struct ofport_dpif *
-ofport_get_peer(const struct ofport_dpif *ofport_dpif)
+static void
+ofport_update_peer(struct ofport_dpif *ofport)
{
const struct ofproto_dpif *ofproto;
- const char *peer;
+ struct dpif_backer *backer;
+ const char *peer_name;
- peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
- if (!peer) {
- return NULL;
+ if (!netdev_vport_is_patch(ofport->up.netdev)) {
+ return;
+ }
+
+ backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
+ backer->need_revalidate = REV_RECONFIGURE;
+
+ if (ofport->peer) {
+ ofport->peer->peer = NULL;
+ ofport->peer = NULL;
+ }
+
+ peer_name = netdev_vport_patch_peer(ofport->up.netdev);
+ if (!peer_name) {
+ return;
}
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- struct ofport *ofport;
+ struct ofport *peer_ofport;
+ struct ofport_dpif *peer;
+ const char *peer_peer;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
- ofport = shash_find_data(&ofproto->up.port_by_name, peer);
- if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
- return ofport_dpif_cast(ofport);
+ peer_ofport = shash_find_data(&ofproto->up.port_by_name, peer_name);
+ if (!peer_ofport) {
+ continue;
+ }
+
+ peer = ofport_dpif_cast(peer_ofport);
+ peer_peer = netdev_vport_patch_peer(peer->up.netdev);
+ if (peer_peer && !strcmp(netdev_get_name(ofport->up.netdev),
+ peer_peer)) {
+ ofport->peer = peer;
+ ofport->peer->peer = ofport;
}
+
+ return;
}
- return NULL;
}
static void
port_run_fast(ofport);
- if (ofport->tnl_port
- && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
- &ofport->tnl_port)) {
- ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
- }
-
if (ofport->cfm) {
int cfm_opup = cfm_get_opup(ofport->cfm);
if (ofport->may_enable != enable) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
-
- if (ofproto->has_bundle_action) {
- ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
- }
+ ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
}
ofport->may_enable = enable;
dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
- uint32_t port_no = UINT32_MAX;
+ odp_port_t port_no = ODPP_NONE;
int error;
error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
return error;
}
if (netdev_get_tunnel_config(netdev)) {
- simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
+ simap_put(&ofproto->backer->tnl_backers,
+ dp_port_name, odp_to_u32(port_no));
}
}
}
static int
-port_del(struct ofproto *ofproto_, uint16_t ofp_port)
+port_del(struct ofproto *ofproto_, ofp_port_t ofp_port)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
sset_find_and_delete(&ofproto->ghost_ports,
netdev_get_name(ofport->up.netdev));
ofproto->backer->need_revalidate = REV_RECONFIGURE;
- if (!ofport->tnl_port) {
+ if (!ofport->is_tunnel) {
error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
if (!error) {
/* The caller is going to close ofport->up.netdev. If this is a
uint64_t slow_stub[128 / 8]; /* Buffer for compose_slow_path() */
struct xlate_out xout;
bool xout_garbage; /* 'xout' needs to be uninitialized? */
+
+ struct ofpbuf mask; /* Flow mask for "put" ops. */
+ struct odputil_keybuf maskbuf;
+
+ /* If this is a "put" op, then a pointer to the subfacet that should
+ * be marked as uninstalled if the operation fails. */
+ struct subfacet *subfacet;
};
/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
connmgr_send_packet_in(ofproto->up.connmgr, &pin);
}
-enum slow_path_reason
-process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofport_dpif *ofport, const struct ofpbuf *packet)
-{
- if (!ofport) {
- return 0;
- } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
- if (packet) {
- cfm_process_heartbeat(ofport->cfm, packet);
- }
- return SLOW_CFM;
- } else if (ofport->bfd && bfd_should_process_flow(flow)) {
- if (packet) {
- bfd_process_packet(ofport->bfd, flow, packet);
- }
- return SLOW_BFD;
- } else if (ofport->bundle && ofport->bundle->lacp
- && flow->dl_type == htons(ETH_TYPE_LACP)) {
- if (packet) {
- lacp_process_packet(ofport->bundle->lacp, ofport, packet);
- }
- return SLOW_LACP;
- } else if (ofproto->stp && stp_should_process_flow(flow)) {
- if (packet) {
- stp_process_packet(ofport, packet);
- }
- return SLOW_STP;
- } else {
- return 0;
- }
-}
-
static struct flow_miss *
flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
const struct flow *flow, uint32_t hash)
init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
struct flow_miss_op *op)
{
- if (miss->flow.in_port
- != vsp_realdev_to_vlandev(miss->ofproto, miss->flow.in_port,
+ if (miss->flow.in_port.ofp_port
+ != vsp_realdev_to_vlandev(miss->ofproto, miss->flow.in_port.ofp_port,
miss->flow.vlan_tci)) {
/* This packet was received on a VLAN splinter port. We
* added a VLAN to the packet to make the packet resemble
eth_pop_vlan(packet);
}
+ op->subfacet = NULL;
op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
op->dpif_op.u.execute.key_len = miss->key_len;
op->dpif_op.u.execute.packet = packet;
+ ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf);
}
/* Helper for handle_flow_miss_without_facet() and
struct dpif_backer *backer = miss->ofproto->backer;
uint32_t hash;
+ switch (flow_miss_model) {
+ case OFPROTO_HANDLE_MISS_AUTO:
+ break;
+ case OFPROTO_HANDLE_MISS_WITH_FACETS:
+ return true;
+ case OFPROTO_HANDLE_MISS_WITHOUT_FACETS:
+ return false;
+ }
+
if (!backer->governor) {
size_t n_subfacets;
subfacet->path = want_path;
+ ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf);
+ if (enable_megaflows) {
+ odp_flow_key_from_mask(&op->mask, &facet->xout.wc.masks,
+ &miss->flow, UINT32_MAX);
+ }
+
op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
+ op->subfacet = subfacet;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
put->key = miss->key;
put->key_len = miss->key_len;
+ put->mask = op->mask.data;
+ put->mask_len = op->mask.size;
+
if (want_path == SF_FAST_PATH) {
put->actions = facet->xout.odp_actions.data;
put->actions_len = facet->xout.odp_actions.size;
if (error && !VLOG_DROP_WARN(&rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
- VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
- ds_cstr(&ds));
+ VLOG_WARN("Failed to delete drop key (%s) (%s)",
+ ovs_strerror(error), ds_cstr(&ds));
ds_destroy(&ds);
}
ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
const struct nlattr *key, size_t key_len,
struct flow *flow, enum odp_key_fitness *fitnessp,
- struct ofproto_dpif **ofproto, uint32_t *odp_in_port)
+ struct ofproto_dpif **ofproto, odp_port_t *odp_in_port)
{
const struct ofport_dpif *port;
enum odp_key_fitness fitness;
}
if (odp_in_port) {
- *odp_in_port = flow->in_port;
+ *odp_in_port = flow->in_port.odp_port;
}
port = (tnl_port_should_receive(flow)
- ? ofport_dpif_cast(tnl_port_receive(flow))
- : odp_port_to_ofport(backer, flow->in_port));
- flow->in_port = port ? port->up.ofp_port : OFPP_NONE;
+ ? tnl_port_receive(flow)
+ : odp_port_to_ofport(backer, flow->in_port.odp_port));
+ flow->in_port.ofp_port = port ? port->up.ofp_port : OFPP_NONE;
if (!port) {
goto exit;
}
struct flow_miss *miss = &misses[n_misses];
struct flow_miss *existing_miss;
struct ofproto_dpif *ofproto;
- uint32_t odp_in_port;
+ odp_port_t odp_in_port;
struct flow flow;
uint32_t hash;
int error;
hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
hash_bytes(drop_key->key, drop_key->key_len, 0));
dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
- drop_key->key, drop_key->key_len, NULL, 0, NULL);
+ drop_key->key, drop_key->key_len,
+ NULL, 0, NULL, 0, NULL);
}
continue;
}
ofproto->n_missed++;
flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
- &flow.tunnel, flow.in_port, &miss->flow);
+ &flow.tunnel, &flow.in_port, &miss->flow);
/* Add other packets to a to-do list. */
hash = flow_hash(&miss->flow, 0);
}
dpif_operate(backer->dpif, dpif_ops, n_ops);
- /* Free memory. */
for (i = 0; i < n_ops; i++) {
+ if (dpif_ops[i]->error != 0
+ && flow_miss_ops[i].dpif_op.type == DPIF_OP_FLOW_PUT
+ && flow_miss_ops[i].subfacet) {
+ struct subfacet *subfacet = flow_miss_ops[i].subfacet;
+
+ COVERAGE_INC(subfacet_install_fail);
+
+ subfacet->path = SF_NOT_INSTALLED;
+ }
+
+ /* Free memory. */
if (flow_miss_ops[i].xout_garbage) {
xlate_out_uninit(&flow_miss_ops[i].xout);
}
struct ofproto_dpif *ofproto;
union user_action_cookie cookie;
struct flow flow;
- uint32_t odp_in_port;
+ odp_port_t odp_in_port;
if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
&flow, NULL, &ofproto, &odp_in_port)
{
const struct dpif_flow_stats *stats;
struct dpif_flow_dump dump;
- const struct nlattr *key;
- size_t key_len;
+ const struct nlattr *key, *mask;
+ size_t key_len, mask_len;
dpif_flow_dump_start(&dump, backer->dpif);
- while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
+ while (dpif_flow_dump_next(&dump, &key, &key_len,
+ &mask, &mask_len, NULL, NULL, &stats)) {
struct subfacet *subfacet;
uint32_t key_hash;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto, flow->in_port));
+ ofp_port_to_odp_port(ofproto, flow->in_port.ofp_port));
error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
odp_actions, actions_len, packet);
switch (nl_attr_type(a)) {
case OVS_ACTION_ATTR_OUTPUT:
- port = get_odp_port(ofproto, nl_attr_get_u32(a));
+ port = get_odp_port(ofproto, nl_attr_get_odp_port(a));
if (port && port->bundle && port->bundle->bond) {
bond_account(port->bundle->bond, &facet->flow,
vlan_tci_to_vid(vlan_tci), n_bytes);
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- in_port = get_ofp_port(ofproto, facet->flow.in_port);
- if (in_port && in_port->tnl_port) {
+ in_port = get_ofp_port(ofproto, facet->flow.in_port.ofp_port);
+ if (in_port && in_port->is_tunnel) {
netdev_vport_inc_rx(in_port->up.netdev, &stats);
}
enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
const struct nlattr *actions = odp_actions->data;
size_t actions_len = odp_actions->size;
+ struct odputil_keybuf maskbuf;
+ struct ofpbuf mask;
uint64_t slow_path_stub[128 / 8];
enum dpif_flow_put_flags flags;
&actions, &actions_len);
}
+ ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
+ if (enable_megaflows) {
+ odp_flow_key_from_mask(&mask, &facet->xout.wc.masks,
+ &facet->flow, UINT32_MAX);
+ }
+
ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key,
- subfacet->key_len, actions, actions_len, stats);
+ subfacet->key_len, mask.data, mask.size,
+ actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
}
- if (!ret) {
+ if (ret) {
+ COVERAGE_INC(subfacet_install_fail);
+ } else {
subfacet->path = path;
}
return ret;
return NULL;
}
+ if (wc) {
+ memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
+ wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
+ }
+
cls = &ofproto->up.tables[table_id].cls;
frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
{
struct ofport_dpif *port;
- port = get_ofp_port(ofproto, flow->in_port);
+ port = get_ofp_port(ofproto, flow->in_port.ofp_port);
if (!port) {
- VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
+ VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
+ flow->in_port.ofp_port);
return ofproto->miss_rule;
}
struct xlate_out xout;
struct xlate_in xin;
struct flow flow;
+ union flow_in_port in_port_;
int error;
ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
/* Use OFPP_NONE as the in_port to avoid special packet processing. */
- flow_extract(packet, 0, 0, NULL, OFPP_NONE, &flow);
+ in_port_.ofp_port = OFPP_NONE;
+ flow_extract(packet, 0, 0, NULL, &in_port_, &flow);
odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(ofproto,
OFPP_LOCAL));
dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
if (error) {
VLOG_WARN_RL(&rl, "%s: failed to send packet on port %s (%s)",
ofproto->up.name, netdev_get_name(ofport->up.netdev),
- strerror(error));
+ ovs_strerror(error));
}
ofproto->stats.tx_packets++;
ofpbuf_use_stack(&buf, stub, stub_size);
if (slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)) {
- uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
+ uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif,
+ ODPP_NONE);
odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf);
} else {
put_userspace_action(ofproto, &buf, flow, &cookie,
uint32_t pid;
pid = dpif_port_get_pid(ofproto->backer->dpif,
- ofp_port_to_odp_port(ofproto, flow->in_port));
+ ofp_port_to_odp_port(ofproto,
+ flow->in_port.ofp_port));
return odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
}
}
}
+tag_type
+calculate_flow_tag(struct ofproto_dpif *ofproto, const struct flow *flow,
+ uint8_t table_id, struct rule_dpif *rule)
+{
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ return (rule && rule->tag
+ ? rule->tag
+ : rule_calculate_tag(flow, &table->other_table->mask,
+ table->basis));
+ }
+ }
+
+ return 0;
+}
\f
/* Optimized flow revalidation.
*
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto, flow->in_port));
+ ofp_port_to_odp_port(ofproto,
+ flow->in_port.ofp_port));
dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
if (netflow_options) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
return netflow_set_options(ofproto->netflow, netflow_options);
- } else {
+ } else if (ofproto->netflow) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
netflow_destroy(ofproto->netflow);
ofproto->netflow = NULL;
- return 0;
}
+
+ return 0;
}
static void
ds_put_cstr(&ds, " port VLAN MAC Age\n");
LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
struct ofbundle *bundle = e->port.p;
- ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
- ofbundle_get_a_port(bundle)->odp_port,
- e->vlan, ETH_ADDR_ARGS(e->mac),
+ char name[OFP_MAX_PORT_NAME_LEN];
+
+ ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
+ name, sizeof name);
+ ds_put_format(&ds, "%5s %4d "ETH_ADDR_FMT" %3d\n",
+ name, e->vlan, ETH_ADDR_ARGS(e->mac),
mac_entry_age(ofproto->ml, e));
}
unixctl_command_reply(conn, ds_cstr(&ds));
};
static void
-trace_format_rule(struct ds *result, uint8_t table_id, int level,
- const struct rule_dpif *rule)
+trace_format_rule(struct ds *result, int level, const struct rule_dpif *rule)
{
ds_put_char_multiple(result, '\t', level);
if (!rule) {
}
ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
- table_id, ntohll(rule->up.flow_cookie));
+ rule ? rule->up.table_id : 0, ntohll(rule->up.flow_cookie));
cls_rule_format(&rule->up.cr, result);
ds_put_char(result, '\n');
}
static void
-trace_resubmit(struct xlate_ctx *ctx, struct rule_dpif *rule)
+trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
+ struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
struct ds *result = trace->result;
ds_put_char(result, '\n');
- trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
- trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
- trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
- trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
+ trace_format_flow(result, recurse + 1, "Resubmitted flow", trace);
+ trace_format_regs(result, recurse + 1, "Resubmitted regs", trace);
+ trace_format_odp(result, recurse + 1, "Resubmitted odp", trace);
+ trace_format_rule(result, recurse + 1, rule);
}
static void
-trace_report(struct xlate_ctx *ctx, const char *s)
+trace_report(struct xlate_in *xin, const char *s, int recurse)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
+ struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
struct ds *result = trace->result;
- ds_put_char_multiple(result, '\t', ctx->recurse);
+ ds_put_char_multiple(result, '\t', recurse);
ds_put_cstr(result, s);
ds_put_char(result, '\n');
}
{
const struct dpif_backer *backer;
struct ofproto_dpif *ofproto;
- struct ofpbuf odp_key;
+ struct ofpbuf odp_key, odp_mask;
struct ofpbuf *packet;
struct ds result;
struct flow flow;
backer = NULL;
ds_init(&result);
ofpbuf_init(&odp_key, 0);
+ ofpbuf_init(&odp_mask, 0);
/* Handle "-generate" or a hex string as the last argument. */
if (!strcmp(argv[argc - 1], "-generate")) {
* bridge is specified. If function odp_flow_key_from_string()
* returns 0, the flow is a odp_flow. If function
* parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
- if (!odp_flow_key_from_string(argv[argc - 1], NULL, &odp_key)) {
+ if (!odp_flow_from_string(argv[argc - 1], NULL, &odp_key, &odp_mask)) {
/* If the odp_flow is the second argument,
* the datapath name is the first argument. */
if (argc == 3) {
if (!packet->size) {
flow_compose(packet, &flow);
} else {
+ union flow_in_port in_port_;
+
+ in_port_ = flow.in_port;
ds_put_cstr(&result, "Packet: ");
s = ofp_packet_to_string(packet->data, packet->size);
ds_put_cstr(&result, s);
/* Use the metadata from the flow and the packet argument
* to reconstruct the flow. */
flow_extract(packet, flow.skb_priority, flow.skb_mark, NULL,
- flow.in_port, &flow);
+ &in_port_, &flow);
}
}
ds_destroy(&result);
ofpbuf_delete(packet);
ofpbuf_uninit(&odp_key);
+ ofpbuf_uninit(&odp_mask);
}
void
rule = rule_dpif_lookup(ofproto, flow, NULL);
- trace_format_rule(ds, 0, 0, rule);
+ trace_format_rule(ds, 0, rule);
if (rule == ofproto->miss_rule) {
ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
} else if (rule == ofproto->no_packet_in_rule) {
const struct shash_node *node = ports[j];
struct ofport *ofport = node->data;
struct smap config;
- uint32_t odp_port;
+ odp_port_t odp_port;
ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
ofport->ofp_port);
odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
- if (odp_port != OVSP_NONE) {
+ if (odp_port != ODPP_NONE) {
ds_put_format(ds, "%"PRIu32":", odp_port);
} else {
ds_put_cstr(ds, "none:");
ds_destroy(&ds);
}
+/* Disable using the megaflows.
+ *
+ * This command is only needed for advanced debugging, so it's not
+ * documented in the man page. */
+static void
+ofproto_unixctl_dpif_disable_megaflows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ struct ofproto_dpif *ofproto;
+
+ enable_megaflows = false;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ flush(&ofproto->up);
+ }
+
+ unixctl_command_reply(conn, "megaflows disabled");
+}
+
+/* Re-enable using megaflows.
+ *
+ * This command is only needed for advanced debugging, so it's not
+ * documented in the man page. */
+static void
+ofproto_unixctl_dpif_enable_megaflows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ struct ofproto_dpif *ofproto;
+
+ enable_megaflows = true;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ flush(&ofproto->up);
+ }
+
+ unixctl_command_reply(conn, "megaflows enabled");
+}
+
static void
ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
int argc OVS_UNUSED, const char *argv[],
ofproto_unixctl_dpif_del_flows, NULL);
unixctl_command_register("dpif/dump-megaflows", "bridge", 1, 1,
ofproto_unixctl_dpif_dump_megaflows, NULL);
+ unixctl_command_register("dpif/disable-megaflows", "", 0, 0,
+ ofproto_unixctl_dpif_disable_megaflows, NULL);
+ unixctl_command_register("dpif/enable-megaflows", "", 0, 0,
+ ofproto_unixctl_dpif_enable_megaflows, NULL);
}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
* widespread use, we will delete these interfaces. */
static int
-set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
+set_realdev(struct ofport *ofport_, ofp_port_t realdev_ofp_port, int vid)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
}
static uint32_t
-hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
+hash_realdev_vid(ofp_port_t realdev_ofp_port, int vid)
{
- return hash_2words(realdev_ofp_port, vid);
+ return hash_2words(ofp_to_u16(realdev_ofp_port), vid);
}
/* Returns the OFP port number of the Linux VLAN device that corresponds to
*
* Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
* function just returns its 'realdev_ofp_port' argument. */
-uint16_t
+ofp_port_t
vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
- uint16_t realdev_ofp_port, ovs_be16 vlan_tci)
+ ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
{
if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
int vid = vlan_tci_to_vid(vlan_tci);
}
static struct vlan_splinter *
-vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
+vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port)
{
struct vlan_splinter *vsp;
- HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
+ HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node,
+ hash_ofp_port(vlandev_ofp_port),
&ofproto->vlandev_map) {
if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
return vsp;
* Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
* VLAN device. Unless VLAN splinters are enabled, this is what this function
* always does.*/
-static uint16_t
+static ofp_port_t
vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
- uint16_t vlandev_ofp_port, int *vid)
+ ofp_port_t vlandev_ofp_port, int *vid)
{
if (!hmap_is_empty(&ofproto->vlandev_map)) {
const struct vlan_splinter *vsp;
static bool
vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
{
- uint16_t realdev;
+ ofp_port_t realdev;
int vid;
- realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
+ realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
if (!realdev) {
return false;
}
/* Cause the flow to be processed as if it came in on the real device with
* the VLAN device's VLAN ID. */
- flow->in_port = realdev;
+ flow->in_port.ofp_port = realdev;
flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
return true;
}
}
static void
-vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
+vsp_add(struct ofport_dpif *port, ofp_port_t realdev_ofp_port, int vid)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
vsp = xmalloc(sizeof *vsp);
hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
- hash_int(port->up.ofp_port, 0));
+ hash_ofp_port(port->up.ofp_port));
hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
hash_realdev_vid(realdev_ofp_port, vid));
vsp->realdev_ofp_port = realdev_ofp_port;
}
}
-uint32_t
-ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
+odp_port_t
+ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
{
const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
- return ofport ? ofport->odp_port : OVSP_NONE;
+ return ofport ? ofport->odp_port : ODPP_NONE;
}
static struct ofport_dpif *
-odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
+odp_port_to_ofport(const struct dpif_backer *backer, odp_port_t odp_port)
{
struct ofport_dpif *port;
- HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
- hash_int(odp_port, 0),
+ HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port),
&backer->odp_to_ofport_map) {
if (port->odp_port == odp_port) {
return port;
return NULL;
}
-static uint16_t
-odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
+static ofp_port_t
+odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
{
struct ofport_dpif *port;
forward_bpdu_changed,
set_mac_table_config,
set_realdev,
+ NULL, /* meter_get_features */
+ NULL, /* meter_set */
+ NULL, /* meter_get */
+ NULL, /* meter_del */
};