/*
- * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <errno.h>
-#include "autopath.h"
+#include "bfd.h"
#include "bond.h"
#include "bundle.h"
#include "byte-order.h"
#include "mac-learning.h"
#include "meta-flow.h"
#include "multipath.h"
+#include "netdev-vport.h"
#include "netdev.h"
#include "netlink.h"
#include "nx-match.h"
#include "ofp-parse.h"
#include "ofp-print.h"
#include "ofproto-dpif-governor.h"
+#include "ofproto-dpif-ipfix.h"
#include "ofproto-dpif-sflow.h"
#include "poll-loop.h"
#include "simap.h"
+#include "smap.h"
#include "timer.h"
+#include "tunnel.h"
#include "unaligned.h"
#include "unixctl.h"
#include "vlan-bitmap.h"
/* Maximum depth of flow table recursion (due to resubmit actions) in a
* flow translation. */
-#define MAX_RESUBMIT_RECURSION 32
+#define MAX_RESUBMIT_RECURSION 64
/* Number of implemented OpenFlow tables. */
enum { N_TABLES = 255 };
struct ofport_dpif;
struct ofproto_dpif;
+struct flow_miss;
+struct facet;
struct rule_dpif {
struct rule up;
static struct rule_dpif *rule_dpif_lookup__(struct ofproto_dpif *,
const struct flow *,
uint8_t table);
+static struct rule_dpif *rule_dpif_miss_rule(struct ofproto_dpif *ofproto,
+ const struct flow *flow);
+static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
static void rule_credit_stats(struct rule_dpif *,
const struct dpif_flow_stats *);
-static void flow_push_stats(struct rule_dpif *, const struct flow *,
- const struct dpif_flow_stats *);
static tag_type rule_calculate_tag(const struct flow *,
- const struct flow_wildcards *,
- uint32_t basis);
+ const struct minimask *, uint32_t basis);
static void rule_invalidate(const struct rule_dpif *);
#define MAX_MIRRORS 32
static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
-struct action_xlate_ctx {
-/* action_xlate_ctx_init() initializes these members. */
+struct xlate_ctx;
+
+/* Initial values of fields of the packet that may be changed during
+ * flow processing and needed later. */
+struct initial_vals {
+ /* This is the value of vlan_tci in the packet as actually received from
+ * dpif. This is the same as the facet's flow.vlan_tci unless the packet
+ * was received via a VLAN splinter. In that case, this value is 0
+ * (because the packet as actually received from the dpif had no 802.1Q
+ * tag) but the facet's flow.vlan_tci is set to the VLAN that the splinter
+ * represents.
+ *
+ * This member should be removed when the VLAN splinters feature is no
+ * longer needed. */
+ ovs_be16 vlan_tci;
+};
+
+struct xlate_out {
+ tag_type tags; /* Tags associated with actions. */
+ enum slow_path_reason slow; /* 0 if fast path may be used. */
+ bool has_learn; /* Actions include NXAST_LEARN? */
+ bool has_normal; /* Actions output to OFPP_NORMAL? */
+ bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
+ mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
+
+ uint64_t odp_actions_stub[256 / 8];
+ struct ofpbuf odp_actions;
+};
- /* The ofproto. */
+struct xlate_in {
struct ofproto_dpif *ofproto;
/* Flow to which the OpenFlow actions apply. xlate_actions() will modify
* this flow when actions change header fields. */
struct flow flow;
+ struct initial_vals initial_vals;
+
/* The packet corresponding to 'flow', or a null pointer if we are
* revalidating without a packet to refer to. */
const struct ofpbuf *packet;
* not if we are just revalidating. */
bool may_learn;
- /* The rule that we are currently translating, or NULL. */
+ /* The rule initiating translation or NULL. */
struct rule_dpif *rule;
+ /* The actions to translate. If 'rule' is not NULL, these may be NULL. */
+ const struct ofpact *ofpacts;
+ size_t ofpacts_len;
+
/* Union of the set of TCP flags seen so far in this flow. (Used only by
* NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
* timeouts.) */
* resubmit or OFPP_TABLE action didn't find a matching rule.
*
* This is normally null so the client has to set it manually after
- * calling action_xlate_ctx_init(). */
- void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *rule);
+ * calling xlate_in_init(). */
+ void (*resubmit_hook)(struct xlate_ctx *, struct rule_dpif *rule);
/* If nonnull, flow translation calls this function to report some
* significant decision, e.g. to explain why OFPP_NORMAL translation
* dropped a packet. */
- void (*report_hook)(struct action_xlate_ctx *, const char *s);
+ void (*report_hook)(struct xlate_ctx *, const char *s);
/* If nonnull, flow translation credits the specified statistics to each
* rule reached through a resubmit or OFPP_TABLE action.
*
* This is normally null so the client has to set it manually after
- * calling action_xlate_ctx_init(). */
+ * calling xlate_in_init(). */
const struct dpif_flow_stats *resubmit_stats;
+};
-/* xlate_actions() initializes and uses these members. The client might want
- * to look at them after it returns. */
+/* Context used by xlate_actions() and its callees. */
+struct xlate_ctx {
+ struct xlate_in *xin;
+ struct xlate_out *xout;
- struct ofpbuf *odp_actions; /* Datapath actions. */
- tag_type tags; /* Tags associated with actions. */
- enum slow_path_reason slow; /* 0 if fast path may be used. */
- bool has_learn; /* Actions include NXAST_LEARN? */
- bool has_normal; /* Actions output to OFPP_NORMAL? */
- bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
- uint16_t nf_output_iface; /* Output interface index for NetFlow. */
- mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
+ struct ofproto_dpif *ofproto;
+
+ /* Flow at the last commit. */
+ struct flow base_flow;
+
+ /* Tunnel IP destination address as received. This is stored separately
+ * as the base_flow.tunnel is cleared on init to reflect the datapath
+ * behavior. Used to make sure not to send tunneled output to ourselves,
+ * which might lead to an infinite loop. This could happen easily
+ * if a tunnel is marked as 'ip_remote=flow', and the flow does not
+ * actually set the tun_dst field. */
+ ovs_be32 orig_tunnel_ip_dst;
-/* xlate_actions() initializes and uses these members, but the client has no
- * reason to look at them. */
+ /* Stack for the push and pop actions. Each stack element is of type
+ * "union mf_subvalue". */
+ union mf_subvalue init_stack[1024 / sizeof(union mf_subvalue)];
+ struct ofpbuf stack;
+
+ /* The rule that we are currently translating, or NULL. */
+ struct rule_dpif *rule;
int recurse; /* Recursion level, via xlate_table_action. */
bool max_resubmit_trigger; /* Recursed too deeply during translation. */
- struct flow base_flow; /* Flow at the last commit. */
uint32_t orig_skb_priority; /* Priority when packet arrived. */
uint8_t table_id; /* OpenFlow table ID where flow was found. */
uint32_t sflow_n_outputs; /* Number of output ports. */
- uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
+ uint32_t sflow_odp_port; /* Output port for composing sFlow action. */
uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
bool exit; /* No further actions should be processed. */
- struct flow orig_flow; /* Copy of original flow. */
};
-static void action_xlate_ctx_init(struct action_xlate_ctx *,
- struct ofproto_dpif *, const struct flow *,
- ovs_be16 initial_tci, struct rule_dpif *,
- uint8_t tcp_flags, const struct ofpbuf *);
-static void xlate_actions(struct action_xlate_ctx *,
- const struct ofpact *ofpacts, size_t ofpacts_len,
- struct ofpbuf *odp_actions);
-static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
- const struct ofpact *ofpacts,
- size_t ofpacts_len);
+static void xlate_in_init(struct xlate_in *, struct ofproto_dpif *,
+ const struct flow *, const struct initial_vals *,
+ struct rule_dpif *, uint8_t tcp_flags,
+ const struct ofpbuf *);
+
+static void xlate_out_uninit(struct xlate_out *);
+
+static void xlate_actions(struct xlate_in *, struct xlate_out *);
+
+static void xlate_actions_for_side_effects(struct xlate_in *);
+
+static void xlate_table_action(struct xlate_ctx *, uint16_t in_port,
+ uint8_t table_id, bool may_packet_in);
static size_t put_userspace_action(const struct ofproto_dpif *,
struct ofpbuf *odp_actions,
const struct flow *,
- const union user_action_cookie *);
+ const union user_action_cookie *,
+ const size_t);
static void compose_slow_path(const struct ofproto_dpif *, const struct flow *,
enum slow_path_reason,
const struct nlattr **actionsp,
size_t *actions_lenp);
-static void xlate_report(struct action_xlate_ctx *ctx, const char *s);
+static void xlate_report(struct xlate_ctx *ctx, const char *s);
/* A subfacet (see "struct subfacet" below) has three possible installation
* states:
SF_SLOW_PATH, /* Send-to-userspace action is installed. */
};
-static const char *subfacet_path_to_string(enum subfacet_path);
-
/* A dpif flow and actions associated with a facet.
*
* See also the large comment on struct facet. */
struct list list_node; /* In struct facet's 'facets' list. */
struct facet *facet; /* Owning facet. */
- /* Key.
- *
- * To save memory in the common case, 'key' is NULL if 'key_fitness' is
- * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
- * regenerate the ODP flow key from ->facet->flow. */
enum odp_key_fitness key_fitness;
struct nlattr *key;
int key_len;
long long int used; /* Time last used; time created if not used. */
+ long long int created; /* Time created. */
uint64_t dp_packet_count; /* Last known packet count in the datapath. */
uint64_t dp_byte_count; /* Last known byte count in the datapath. */
- /* Datapath actions.
- *
- * These should be essentially identical for every subfacet in a facet, but
- * may differ in trivial ways due to VLAN splinters. */
- size_t actions_len; /* Number of bytes in actions[]. */
- struct nlattr *actions; /* Datapath actions. */
-
- enum slow_path_reason slow; /* 0 if fast path may be used. */
enum subfacet_path path; /* Installed in datapath? */
-
- /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
- * splinters can cause it to differ. This value should be removed when
- * the VLAN splinters feature is no longer needed. */
- ovs_be16 initial_tci; /* Initial VLAN TCI value. */
};
-static struct subfacet *subfacet_create(struct facet *, enum odp_key_fitness,
- const struct nlattr *key,
- size_t key_len, ovs_be16 initial_tci);
+#define SUBFACET_DESTROY_MAX_BATCH 50
+
+static struct subfacet *subfacet_create(struct facet *, struct flow_miss *miss,
+ long long int now);
static struct subfacet *subfacet_find(struct ofproto_dpif *,
- const struct nlattr *key, size_t key_len);
+ const struct nlattr *key, size_t key_len,
+ uint32_t key_hash);
static void subfacet_destroy(struct subfacet *);
static void subfacet_destroy__(struct subfacet *);
-static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
- struct ofpbuf *key);
+static void subfacet_destroy_batch(struct ofproto_dpif *,
+ struct subfacet **, int n);
static void subfacet_reset_dp_stats(struct subfacet *,
struct dpif_flow_stats *);
-static void subfacet_update_time(struct subfacet *, long long int used);
static void subfacet_update_stats(struct subfacet *,
const struct dpif_flow_stats *);
-static void subfacet_make_actions(struct subfacet *,
- const struct ofpbuf *packet,
- struct ofpbuf *odp_actions);
static int subfacet_install(struct subfacet *,
- const struct nlattr *actions, size_t actions_len,
- struct dpif_flow_stats *, enum slow_path_reason);
+ const struct ofpbuf *odp_actions,
+ struct dpif_flow_stats *);
static void subfacet_uninstall(struct subfacet *);
-static enum subfacet_path subfacet_want_path(enum slow_path_reason);
-
/* An exact-match instantiation of an OpenFlow flow.
*
* A facet associates a "struct flow", which represents the Open vSwitch
struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
- /* Properties of datapath actions.
- *
- * Every subfacet has its own actions because actions can differ slightly
- * between splintered and non-splintered subfacets due to the VLAN tag
- * being initially different (present vs. absent). All of them have these
- * properties in common so we just store one copy of them here. */
- bool has_learn; /* Actions include NXAST_LEARN? */
- bool has_normal; /* Actions output to OFPP_NORMAL? */
- bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
- tag_type tags; /* Tags that would require revalidation. */
- mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
+ struct xlate_out xout;
+
+ /* Initial values of the packet that may be needed later. */
+ struct initial_vals initial_vals;
/* Storage for a single subfacet, to reduce malloc() time and space
* overhead. (A facet always has at least one subfacet and in the common
- * case has exactly one subfacet.) */
+ * case has exactly one subfacet. However, 'one_subfacet' may not
+ * always be valid, since it could have been removed after newer
+ * subfacets were pushed onto the 'subfacets' list.) */
struct subfacet one_subfacet;
+
+ long long int learn_rl; /* Rate limiter for facet_learn(). */
};
-static struct facet *facet_create(struct rule_dpif *,
- const struct flow *, uint32_t hash);
+static struct facet *facet_create(const struct flow_miss *, uint32_t hash);
static void facet_remove(struct facet *);
static void facet_free(struct facet *);
const struct flow *, uint32_t hash);
static struct facet *facet_lookup_valid(struct ofproto_dpif *,
const struct flow *, uint32_t hash);
-static void facet_revalidate(struct facet *);
+static bool facet_revalidate(struct facet *);
static bool facet_check_consistency(struct facet *);
static void facet_flush_stats(struct facet *);
-static void facet_update_time(struct facet *, long long int used);
static void facet_reset_counters(struct facet *);
-static void facet_push_stats(struct facet *);
+static void facet_push_stats(struct facet *, bool may_learn);
static void facet_learn(struct facet *);
static void facet_account(struct facet *);
+static void push_all_stats(void);
static bool facet_is_controller_flow(struct facet *);
struct ofport_dpif {
+ struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
struct ofport up;
uint32_t odp_port;
struct ofbundle *bundle; /* Bundle that contains this port, if any. */
struct list bundle_node; /* In struct ofbundle's "ports" list. */
struct cfm *cfm; /* Connectivity Fault Management, if any. */
+ struct bfd *bfd; /* BFD, if any. */
tag_type tag; /* Tag associated with this port. */
- uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
bool may_enable; /* May be enabled in bonds. */
long long int carrier_seq; /* Carrier status changes. */
+ struct tnl_port *tnl_port; /* Tunnel handle, or null. */
/* Spanning tree. */
struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
int vid;
};
-static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
- uint32_t realdev, ovs_be16 vlan_tci);
+static uint16_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
+ uint16_t realdev_ofp_port,
+ ovs_be16 vlan_tci);
static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
static void vsp_remove(struct ofport_dpif *);
static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
+static uint32_t ofp_port_to_odp_port(const struct ofproto_dpif *,
+ uint16_t ofp_port);
+static uint16_t odp_port_to_ofp_port(const struct ofproto_dpif *,
+ uint32_t odp_port);
+
static struct ofport_dpif *
ofport_dpif_cast(const struct ofport *ofport)
{
- assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
}
static void port_run(struct ofport_dpif *);
static void port_run_fast(struct ofport_dpif *);
static void port_wait(struct ofport_dpif *);
+static int set_bfd(struct ofport *, const struct smap *);
static int set_cfm(struct ofport *, const struct cfm_settings *);
static void ofport_clear_priorities(struct ofport_dpif *);
+static void run_fast_rl(void);
struct dpif_completion {
struct list list_node;
COVERAGE_DEFINE(rev_flow_table);
COVERAGE_DEFINE(rev_inconsistency);
+/* Drop keys are odp flow keys which have drop flows installed in the kernel.
+ * These are datapath flows which have no associated ofproto, if they did we
+ * would use facets. */
+struct drop_key {
+ struct hmap_node hmap_node;
+ struct nlattr *key;
+ size_t key_len;
+};
+
+/* All datapaths of a given type share a single dpif backer instance. */
+struct dpif_backer {
+ char *type;
+ int refcount;
+ struct dpif *dpif;
+ struct timer next_expiration;
+ struct hmap odp_to_ofport_map; /* ODP port to ofport mapping. */
+
+ struct simap tnl_backers; /* Set of dpif ports backing tunnels. */
+
+ /* Facet revalidation flags applying to facets which use this backer. */
+ enum revalidate_reason need_revalidate; /* Revalidate every facet. */
+ struct tag_set revalidate_set; /* Revalidate only matching facets. */
+
+ struct hmap drop_keys; /* Set of dropped odp keys. */
+ bool recv_set_enable; /* Enables or disables receiving packets. */
+};
+
+/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
+static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
+
+static void drop_key_clear(struct dpif_backer *);
+static struct ofport_dpif *
+odp_port_to_ofport(const struct dpif_backer *, uint32_t odp_port);
+
+struct avg_subfacet_rates {
+ double add_rate; /* Moving average of new flows created per minute. */
+ double del_rate; /* Moving average of flows deleted per minute. */
+};
+static void show_dp_rates(struct ds *ds, const char *heading,
+ const struct avg_subfacet_rates *rates);
+static void exp_mavg(double *avg, int base, double new);
+
struct ofproto_dpif {
struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
struct ofproto up;
- struct dpif *dpif;
- int max_ports;
+ struct dpif_backer *backer;
/* Special OpenFlow rules. */
struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
- /* Statistics. */
- uint64_t n_matches;
-
/* Bridging. */
struct netflow *netflow;
struct dpif_sflow *sflow;
+ struct dpif_ipfix *ipfix;
struct hmap bundles; /* Contains "struct ofbundle"s. */
struct mac_learning *ml;
struct ofmirror *mirrors[MAX_MIRRORS];
bool has_mirrors;
bool has_bonded_bundles;
- /* Expiration. */
- struct timer next_expiration;
-
/* Facets. */
struct hmap facets;
struct hmap subfacets;
struct governor *governor;
+ long long int consistency_rl;
/* Revalidation. */
struct table_dpif tables[N_TABLES];
- enum revalidate_reason need_revalidate;
- struct tag_set revalidate_set;
/* Support for debugging async flow mods. */
struct list completions;
/* VLAN splinters. */
struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
+
+ /* Ports. */
+ struct sset ports; /* Set of standard port names. */
+ struct sset ghost_ports; /* Ports with no datapath port. */
+ struct sset port_poll_set; /* Queued names for port_poll() reply. */
+ int port_poll_errno; /* Last errno for port_poll() reply. */
+
+ /* Per ofproto's dpif stats. */
+ uint64_t n_hit;
+ uint64_t n_missed;
+
+ /* Subfacet statistics.
+ *
+ * These keep track of the total number of subfacets added and deleted and
+ * flow life span. They are useful for computing the flow rates stats
+ * exposed via "ovs-appctl dpif/show". The goal is to learn about
+ * traffic patterns in ways that we can use later to improve Open vSwitch
+ * performance in new situations. */
+ long long int created; /* Time when it is created. */
+ unsigned int max_n_subfacet; /* Maximum number of flows */
+
+ /* The average number of subfacets... */
+ struct avg_subfacet_rates hourly; /* ...over the last hour. */
+ struct avg_subfacet_rates daily; /* ...over the last day. */
+ long long int last_minute; /* Last time 'hourly' was updated. */
+
+ /* Number of subfacets added or deleted since 'last_minute'. */
+ unsigned int subfacet_add_count;
+ unsigned int subfacet_del_count;
+
+ /* Number of subfacets added or deleted from 'created' to 'last_minute.' */
+ unsigned long long int total_subfacet_add_count;
+ unsigned long long int total_subfacet_del_count;
+
+ /* Sum of the number of milliseconds that each subfacet existed,
+ * over the subfacets that have been added and then later deleted. */
+ unsigned long long int total_subfacet_life_span;
+
+ /* Incremented by the number of currently existing subfacets, each
+ * time we pull statistics from the kernel. */
+ unsigned long long int total_subfacet_count;
+
+ /* Number of times we pull statistics from the kernel. */
+ unsigned long long int n_update_stats;
};
+static unsigned long long int avg_subfacet_life_span(
+ const struct ofproto_dpif *);
+static double avg_subfacet_count(const struct ofproto_dpif *ofproto);
+static void update_moving_averages(struct ofproto_dpif *ofproto);
+static void update_max_subfacet_count(struct ofproto_dpif *ofproto);
/* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
* for debugging the asynchronous flow_mod implementation.) */
static struct ofproto_dpif *
ofproto_dpif_cast(const struct ofproto *ofproto)
{
- assert(ofproto->ofproto_class == &ofproto_dpif_class);
+ ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
}
static struct ofport_dpif *get_odp_port(const struct ofproto_dpif *,
uint32_t odp_port);
static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
- const struct ofpbuf *, ovs_be16 initial_tci,
- struct ds *);
+ const struct ofpbuf *,
+ const struct initial_vals *, struct ds *);
/* Packet processing. */
static void update_learning_table(struct ofproto_dpif *,
struct ofbundle *);
/* Upcalls. */
#define FLOW_MISS_MAX_BATCH 50
-static int handle_upcalls(struct ofproto_dpif *, unsigned int max_batch);
+static int handle_upcalls(struct dpif_backer *, unsigned int max_batch);
/* Flow expiration. */
-static int expire(struct ofproto_dpif *);
+static int expire(struct dpif_backer *);
/* NetFlow. */
static void send_netflow_active_timeouts(struct ofproto_dpif *);
static size_t compose_sflow_action(const struct ofproto_dpif *,
struct ofpbuf *odp_actions,
const struct flow *, uint32_t odp_port);
-static void add_mirror_actions(struct action_xlate_ctx *ctx,
+static void compose_ipfix_action(const struct ofproto_dpif *,
+ struct ofpbuf *odp_actions,
+ const struct flow *);
+static void add_mirror_actions(struct xlate_ctx *ctx,
const struct flow *flow);
/* Global variables. */
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+/* Initial mappings of port to bridge mappings. */
+static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
\f
/* Factory functions. */
+static void
+init(const struct shash *iface_hints)
+{
+ struct shash_node *node;
+
+ /* Make a local copy, since we don't own 'iface_hints' elements. */
+ SHASH_FOR_EACH(node, iface_hints) {
+ const struct iface_hint *orig_hint = node->data;
+ struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
+
+ new_hint->br_name = xstrdup(orig_hint->br_name);
+ new_hint->br_type = xstrdup(orig_hint->br_type);
+ new_hint->ofp_port = orig_hint->ofp_port;
+
+ shash_add(&init_ofp_ports, node->name, new_hint);
+ }
+}
+
static void
enumerate_types(struct sset *types)
{
static int
enumerate_names(const char *type, struct sset *names)
{
- return dp_enumerate_names(type, names);
+ struct ofproto_dpif *ofproto;
+
+ sset_clear(names);
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (strcmp(type, ofproto->up.type)) {
+ continue;
+ }
+ sset_add(names, ofproto->up.name);
+ }
+
+ return 0;
}
static int
return error;
}
\f
+static const char *
+port_open_type(const char *datapath_type, const char *port_type)
+{
+ return dpif_port_open_type(datapath_type, port_type);
+}
+
+/* Type functions. */
+
+static struct ofproto_dpif *
+lookup_ofproto_dpif_by_port_name(const char *name)
+{
+ struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ if (sset_contains(&ofproto->ports, name)) {
+ return ofproto;
+ }
+ }
+
+ return NULL;
+}
+
+static int
+type_run(const char *type)
+{
+ static long long int push_timer = LLONG_MIN;
+ struct dpif_backer *backer;
+ char *devname;
+ int error;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ dpif_run(backer->dpif);
+
+ /* The most natural place to push facet statistics is when they're pulled
+ * from the datapath. However, when there are many flows in the datapath,
+ * this expensive operation can occur so frequently, that it reduces our
+ * ability to quickly set up flows. To reduce the cost, we push statistics
+ * here instead. */
+ if (time_msec() > push_timer) {
+ push_timer = time_msec() + 2000;
+ push_all_stats();
+ }
+
+ /* If vswitchd started with other_config:flow_restore_wait set as "true",
+ * and the configuration has now changed to "false", enable receiving
+ * packets from the datapath. */
+ if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
+ backer->recv_set_enable = true;
+
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
+ if (error) {
+ VLOG_ERR("Failed to enable receiving packets in dpif.");
+ return error;
+ }
+ dpif_flow_flush(backer->dpif);
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+
+ if (backer->need_revalidate
+ || !tag_set_is_empty(&backer->revalidate_set)) {
+ struct tag_set revalidate_set = backer->revalidate_set;
+ bool need_revalidate = backer->need_revalidate;
+ struct ofproto_dpif *ofproto;
+ struct simap_node *node;
+ struct simap tmp_backers;
+
+ /* Handle tunnel garbage collection. */
+ simap_init(&tmp_backers);
+ simap_swap(&backer->tnl_backers, &tmp_backers);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport_dpif *iter;
+
+ if (backer != ofproto->backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
+ const char *dp_port;
+
+ if (!iter->tnl_port) {
+ continue;
+ }
+
+ dp_port = netdev_vport_get_dpif_port(iter->up.netdev);
+ node = simap_find(&tmp_backers, dp_port);
+ if (node) {
+ simap_put(&backer->tnl_backers, dp_port, node->data);
+ simap_delete(&tmp_backers, node);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ } else {
+ node = simap_find(&backer->tnl_backers, dp_port);
+ if (!node) {
+ uint32_t odp_port = UINT32_MAX;
+
+ if (!dpif_port_add(backer->dpif, iter->up.netdev,
+ &odp_port)) {
+ simap_put(&backer->tnl_backers, dp_port, odp_port);
+ node = simap_find(&backer->tnl_backers, dp_port);
+ }
+ }
+ }
+
+ iter->odp_port = node ? node->data : OVSP_NONE;
+ if (tnl_port_reconfigure(&iter->up, iter->odp_port,
+ &iter->tnl_port)) {
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+ }
+ }
+
+ SIMAP_FOR_EACH (node, &tmp_backers) {
+ dpif_port_del(backer->dpif, node->data);
+ }
+ simap_destroy(&tmp_backers);
+
+ switch (backer->need_revalidate) {
+ case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
+ case REV_STP: COVERAGE_INC(rev_stp); break;
+ case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
+ case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
+ case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
+ }
+
+ if (backer->need_revalidate) {
+ /* Clear the drop_keys in case we should now be accepting some
+ * formerly dropped flows. */
+ drop_key_clear(backer);
+ }
+
+ /* Clear the revalidation flags. */
+ tag_set_init(&backer->revalidate_set);
+ backer->need_revalidate = 0;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet, *next;
+
+ if (ofproto->backer != backer) {
+ continue;
+ }
+
+ HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
+ if (need_revalidate
+ || tag_set_intersects(&revalidate_set, facet->xout.tags)) {
+ facet_revalidate(facet);
+ run_fast_rl();
+ }
+ }
+ }
+ }
+
+ if (!backer->recv_set_enable) {
+ /* Wake up before a max of 1000ms. */
+ timer_set_duration(&backer->next_expiration, 1000);
+ } else if (timer_expired(&backer->next_expiration)) {
+ int delay = expire(backer);
+ timer_set_duration(&backer->next_expiration, delay);
+ }
+
+ /* Check for port changes in the dpif. */
+ while ((error = dpif_port_poll(backer->dpif, &devname)) == 0) {
+ struct ofproto_dpif *ofproto;
+ struct dpif_port port;
+
+ /* Don't report on the datapath's device. */
+ if (!strcmp(devname, dpif_base_name(backer->dpif))) {
+ goto next;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
+ goto next;
+ }
+ }
+
+ ofproto = lookup_ofproto_dpif_by_port_name(devname);
+ if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
+ /* The port was removed. If we know the datapath,
+ * report it through poll_set(). If we don't, it may be
+ * notifying us of a removal we initiated, so ignore it.
+ * If there's a pending ENOBUFS, let it stand, since
+ * everything will be reevaluated. */
+ if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
+ sset_add(&ofproto->port_poll_set, devname);
+ ofproto->port_poll_errno = 0;
+ }
+ } else if (!ofproto) {
+ /* The port was added, but we don't know with which
+ * ofproto we should associate it. Delete it. */
+ dpif_port_del(backer->dpif, port.port_no);
+ }
+ dpif_port_destroy(&port);
+
+ next:
+ free(devname);
+ }
+
+ if (error != EAGAIN) {
+ struct ofproto_dpif *ofproto;
+
+ /* There was some sort of error, so propagate it to all
+ * ofprotos that use this backer. */
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
+ &all_ofproto_dpifs) {
+ if (ofproto->backer == backer) {
+ sset_clear(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = error;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int
+dpif_backer_run_fast(struct dpif_backer *backer, int max_batch)
+{
+ unsigned int work;
+
+ /* If recv_set_enable is false, we should not handle upcalls. */
+ if (!backer->recv_set_enable) {
+ return 0;
+ }
+
+ /* Handle one or more batches of upcalls, until there's nothing left to do
+ * or until we do a fixed total amount of work.
+ *
+ * We do work in batches because it can be much cheaper to set up a number
+ * of flows and fire off their patches all at once. We do multiple batches
+ * because in some cases handling a packet can cause another packet to be
+ * queued almost immediately as part of the return flow. Both
+ * optimizations can make major improvements on some benchmarks and
+ * presumably for real traffic as well. */
+ work = 0;
+ while (work < max_batch) {
+ int retval = handle_upcalls(backer, max_batch - work);
+ if (retval <= 0) {
+ return -retval;
+ }
+ work += retval;
+ }
+
+ return 0;
+}
+
+static int
+type_run_fast(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return 0;
+ }
+
+ return dpif_backer_run_fast(backer, FLOW_MISS_MAX_BATCH);
+}
+
+static void
+run_fast_rl(void)
+{
+ static long long int port_rl = LLONG_MIN;
+ static unsigned int backer_rl = 0;
+
+ if (time_msec() >= port_rl) {
+ struct ofproto_dpif *ofproto;
+ struct ofport_dpif *ofport;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+
+ HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
+ port_run_fast(ofport);
+ }
+ }
+ port_rl = time_msec() + 200;
+ }
+
+ /* XXX: We have to be careful not to do too much work in this function. If
+ * we call dpif_backer_run_fast() too often, or with too large a batch,
+ * performance improves signifcantly, but at a cost. It's possible for the
+ * number of flows in the datapath to increase without bound, and for poll
+ * loops to take 10s of seconds. The correct solution to this problem,
+ * long term, is to separate flow miss handling into it's own thread so it
+ * isn't affected by revalidations, and expirations. Until then, this is
+ * the best we can do. */
+ if (++backer_rl >= 10) {
+ struct shash_node *node;
+
+ backer_rl = 0;
+ SHASH_FOR_EACH (node, &all_dpif_backers) {
+ dpif_backer_run_fast(node->data, 1);
+ }
+ }
+}
+
+static void
+type_wait(const char *type)
+{
+ struct dpif_backer *backer;
+
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (!backer) {
+ /* This is not necessarily a problem, since backers are only
+ * created on demand. */
+ return;
+ }
+
+ timer_wait(&backer->next_expiration);
+}
+\f
/* Basic life-cycle. */
static int add_internal_flows(struct ofproto_dpif *);
free(ofproto);
}
-static int
-construct(struct ofproto *ofproto_)
+static void
+close_dpif_backer(struct dpif_backer *backer)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- const char *name = ofproto->up.name;
+ struct shash_node *node;
+
+ ovs_assert(backer->refcount > 0);
+
+ if (--backer->refcount) {
+ return;
+ }
+
+ drop_key_clear(backer);
+ hmap_destroy(&backer->drop_keys);
+
+ simap_destroy(&backer->tnl_backers);
+ hmap_destroy(&backer->odp_to_ofport_map);
+ node = shash_find(&all_dpif_backers, backer->type);
+ free(backer->type);
+ shash_delete(&all_dpif_backers, node);
+ dpif_close(backer->dpif);
+
+ free(backer);
+}
+
+/* Datapath port slated for removal from datapath. */
+struct odp_garbage {
+ struct list list_node;
+ uint32_t odp_port;
+};
+
+static int
+open_dpif_backer(const char *type, struct dpif_backer **backerp)
+{
+ struct dpif_backer *backer;
+ struct dpif_port_dump port_dump;
+ struct dpif_port port;
+ struct shash_node *node;
+ struct list garbage_list;
+ struct odp_garbage *garbage, *next;
+ struct sset names;
+ char *backer_name;
+ const char *name;
int error;
- int i;
- error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
+ backer = shash_find_data(&all_dpif_backers, type);
+ if (backer) {
+ backer->refcount++;
+ *backerp = backer;
+ return 0;
+ }
+
+ backer_name = xasprintf("ovs-%s", type);
+
+ /* Remove any existing datapaths, since we assume we're the only
+ * userspace controlling the datapath. */
+ sset_init(&names);
+ dp_enumerate_names(type, &names);
+ SSET_FOR_EACH(name, &names) {
+ struct dpif *old_dpif;
+
+ /* Don't remove our backer if it exists. */
+ if (!strcmp(name, backer_name)) {
+ continue;
+ }
+
+ if (dpif_open(name, type, &old_dpif)) {
+ VLOG_WARN("couldn't open old datapath %s to remove it", name);
+ } else {
+ dpif_delete(old_dpif);
+ dpif_close(old_dpif);
+ }
+ }
+ sset_destroy(&names);
+
+ backer = xmalloc(sizeof *backer);
+
+ error = dpif_create_and_open(backer_name, type, &backer->dpif);
+ free(backer_name);
+ if (error) {
+ VLOG_ERR("failed to open datapath of type %s: %s", type,
+ strerror(error));
+ free(backer);
+ return error;
+ }
+
+ backer->type = xstrdup(type);
+ backer->refcount = 1;
+ hmap_init(&backer->odp_to_ofport_map);
+ hmap_init(&backer->drop_keys);
+ timer_set_duration(&backer->next_expiration, 1000);
+ backer->need_revalidate = 0;
+ simap_init(&backer->tnl_backers);
+ tag_set_init(&backer->revalidate_set);
+ backer->recv_set_enable = !ofproto_get_flow_restore_wait();
+ *backerp = backer;
+
+ if (backer->recv_set_enable) {
+ dpif_flow_flush(backer->dpif);
+ }
+
+ /* Loop through the ports already on the datapath and remove any
+ * that we don't need anymore. */
+ list_init(&garbage_list);
+ dpif_port_dump_start(&port_dump, backer->dpif);
+ while (dpif_port_dump_next(&port_dump, &port)) {
+ node = shash_find(&init_ofp_ports, port.name);
+ if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
+ garbage = xmalloc(sizeof *garbage);
+ garbage->odp_port = port.port_no;
+ list_push_front(&garbage_list, &garbage->list_node);
+ }
+ }
+ dpif_port_dump_done(&port_dump);
+
+ LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
+ dpif_port_del(backer->dpif, garbage->odp_port);
+ list_remove(&garbage->list_node);
+ free(garbage);
+ }
+
+ shash_add(&all_dpif_backers, type, backer);
+
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
- VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
+ VLOG_ERR("failed to listen on datapath of type %s: %s",
+ type, strerror(error));
+ close_dpif_backer(backer);
return error;
}
- ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
- ofproto->n_matches = 0;
+ return error;
+}
- dpif_flow_flush(ofproto->dpif);
- dpif_recv_purge(ofproto->dpif);
+static int
+construct(struct ofproto *ofproto_)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct shash_node *node, *next;
+ int max_ports;
+ int error;
+ int i;
- error = dpif_recv_set(ofproto->dpif, true);
+ error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
if (error) {
- VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
- dpif_close(ofproto->dpif);
return error;
}
+ max_ports = dpif_get_max_ports(ofproto->backer->dpif);
+ ofproto_init_max_ports(ofproto_, MIN(max_ports, OFPP_MAX));
+
ofproto->netflow = NULL;
ofproto->sflow = NULL;
+ ofproto->ipfix = NULL;
ofproto->stp = NULL;
hmap_init(&ofproto->bundles);
ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
}
ofproto->has_bonded_bundles = false;
- timer_set_duration(&ofproto->next_expiration, 1000);
-
hmap_init(&ofproto->facets);
hmap_init(&ofproto->subfacets);
ofproto->governor = NULL;
+ ofproto->consistency_rl = LLONG_MIN;
for (i = 0; i < N_TABLES; i++) {
struct table_dpif *table = &ofproto->tables[i];
table->other_table = NULL;
table->basis = random_uint32();
}
- ofproto->need_revalidate = 0;
- tag_set_init(&ofproto->revalidate_set);
list_init(&ofproto->completions);
hmap_init(&ofproto->vlandev_map);
hmap_init(&ofproto->realdev_vid_map);
+ sset_init(&ofproto->ports);
+ sset_init(&ofproto->ghost_ports);
+ sset_init(&ofproto->port_poll_set);
+ ofproto->port_poll_errno = 0;
+
+ SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
+ struct iface_hint *iface_hint = node->data;
+
+ if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
+ /* Check if the datapath already has this port. */
+ if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
+ sset_add(&ofproto->ports, node->name);
+ }
+
+ free(iface_hint->br_name);
+ free(iface_hint->br_type);
+ free(iface_hint);
+ shash_delete(&init_ofp_ports, node);
+ }
+ }
+
hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
hash_string(ofproto->up.name, 0));
memset(&ofproto->stats, 0, sizeof ofproto->stats);
error = add_internal_flows(ofproto);
ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
+ ofproto->n_hit = 0;
+ ofproto->n_missed = 0;
+
+ ofproto->max_n_subfacet = 0;
+ ofproto->created = time_msec();
+ ofproto->last_minute = ofproto->created;
+ memset(&ofproto->hourly, 0, sizeof ofproto->hourly);
+ memset(&ofproto->daily, 0, sizeof ofproto->daily);
+ ofproto->subfacet_add_count = 0;
+ ofproto->subfacet_del_count = 0;
+ ofproto->total_subfacet_add_count = 0;
+ ofproto->total_subfacet_del_count = 0;
+ ofproto->total_subfacet_life_span = 0;
+ ofproto->total_subfacet_count = 0;
+ ofproto->n_update_stats = 0;
+
return error;
}
struct ofputil_flow_mod fm;
int error;
- cls_rule_init_catchall(&fm.cr, 0);
- cls_rule_set_reg(&fm.cr, 0, id);
+ match_init_catchall(&fm.match);
+ fm.priority = 0;
+ match_set_reg(&fm.match, 0, id);
fm.new_cookie = htonll(0);
fm.cookie = htonll(0);
fm.cookie_mask = htonll(0);
return error;
}
- *rulep = rule_dpif_lookup__(ofproto, &fm.cr.flow, TBL_INTERNAL);
- assert(*rulep != NULL);
+ *rulep = rule_dpif_lookup__(ofproto, &fm.match.flow, TBL_INTERNAL);
+ ovs_assert(*rulep != NULL);
return 0;
}
hmap_destroy(&ofproto->vlandev_map);
hmap_destroy(&ofproto->realdev_vid_map);
- dpif_close(ofproto->dpif);
+ sset_destroy(&ofproto->ports);
+ sset_destroy(&ofproto->ghost_ports);
+ sset_destroy(&ofproto->port_poll_set);
+
+ close_dpif_backer(ofproto->backer);
}
static int
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport;
- unsigned int work;
+
+ /* Do not perform any periodic activity required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run_fast(ofport);
}
- /* Handle one or more batches of upcalls, until there's nothing left to do
- * or until we do a fixed total amount of work.
- *
- * We do work in batches because it can be much cheaper to set up a number
- * of flows and fire off their patches all at once. We do multiple batches
- * because in some cases handling a packet can cause another packet to be
- * queued almost immediately as part of the return flow. Both
- * optimizations can make major improvements on some benchmarks and
- * presumably for real traffic as well. */
- work = 0;
- while (work < FLOW_MISS_MAX_BATCH) {
- int retval = handle_upcalls(ofproto, FLOW_MISS_MAX_BATCH - work);
- if (retval <= 0) {
- return -retval;
- }
- work += retval;
- }
return 0;
}
if (!clogged) {
complete_operations(ofproto);
}
- dpif_run(ofproto->dpif);
+
+ /* Do not perform any periodic activity below required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
error = run_fast(ofproto_);
if (error) {
return error;
}
- if (timer_expired(&ofproto->next_expiration)) {
- int delay = expire(ofproto);
- timer_set_duration(&ofproto->next_expiration, delay);
- }
-
if (ofproto->netflow) {
if (netflow_run(ofproto->netflow)) {
send_netflow_active_timeouts(ofproto);
}
stp_run(ofproto);
- mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
-
- /* Now revalidate if there's anything to do. */
- if (ofproto->need_revalidate
- || !tag_set_is_empty(&ofproto->revalidate_set)) {
- struct tag_set revalidate_set = ofproto->revalidate_set;
- bool revalidate_all = ofproto->need_revalidate;
- struct facet *facet;
-
- switch (ofproto->need_revalidate) {
- case REV_RECONFIGURE: COVERAGE_INC(rev_reconfigure); break;
- case REV_STP: COVERAGE_INC(rev_stp); break;
- case REV_PORT_TOGGLED: COVERAGE_INC(rev_port_toggled); break;
- case REV_FLOW_TABLE: COVERAGE_INC(rev_flow_table); break;
- case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
- }
-
- /* Clear the revalidation flags. */
- tag_set_init(&ofproto->revalidate_set);
- ofproto->need_revalidate = 0;
-
- HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
- if (revalidate_all
- || tag_set_intersects(&revalidate_set, facet->tags)) {
- facet_revalidate(facet);
- }
- }
- }
+ mac_learning_run(ofproto->ml, &ofproto->backer->revalidate_set);
/* Check the consistency of a random facet, to aid debugging. */
- if (!hmap_is_empty(&ofproto->facets) && !ofproto->need_revalidate) {
+ if (time_msec() >= ofproto->consistency_rl
+ && !hmap_is_empty(&ofproto->facets)
+ && !ofproto->backer->need_revalidate) {
struct facet *facet;
+ ofproto->consistency_rl = time_msec() + 250;
+
facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
struct facet, hmap_node);
- if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
+ if (!tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->xout.tags)) {
if (!facet_check_consistency(facet)) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
}
}
poll_immediate_wake();
}
- dpif_wait(ofproto->dpif);
- dpif_recv_wait(ofproto->dpif);
+ if (ofproto_get_flow_restore_wait()) {
+ return;
+ }
+
+ dpif_wait(ofproto->backer->dpif);
+ dpif_recv_wait(ofproto->backer->dpif);
if (ofproto->sflow) {
dpif_sflow_wait(ofproto->sflow);
}
- if (!tag_set_is_empty(&ofproto->revalidate_set)) {
+ if (!tag_set_is_empty(&ofproto->backer->revalidate_set)) {
poll_immediate_wake();
}
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
}
mac_learning_wait(ofproto->ml);
stp_wait(ofproto);
- if (ofproto->need_revalidate) {
+ if (ofproto->backer->need_revalidate) {
/* Shouldn't happen, but if it does just go around again. */
VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
poll_immediate_wake();
- } else {
- timer_wait(&ofproto->next_expiration);
}
if (ofproto->governor) {
governor_wait(ofproto->governor);
flush(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct facet *facet, *next_facet;
-
- HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
- /* Mark the facet as not installed so that facet_remove() doesn't
- * bother trying to uninstall it. There is no point in uninstalling it
- * individually since we are about to blow away all the facets with
- * dpif_flow_flush(). */
- struct subfacet *subfacet;
+ struct subfacet *subfacet, *next_subfacet;
+ struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
+ int n_batch;
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- subfacet->path = SF_NOT_INSTALLED;
- subfacet->dp_packet_count = 0;
- subfacet->dp_byte_count = 0;
+ n_batch = 0;
+ HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
+ &ofproto->subfacets) {
+ if (subfacet->path != SF_NOT_INSTALLED) {
+ batch[n_batch++] = subfacet;
+ if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
+ n_batch = 0;
+ }
+ } else {
+ subfacet_destroy(subfacet);
}
- facet_remove(facet);
}
- dpif_flow_flush(ofproto->dpif);
+
+ if (n_batch > 0) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
+ }
}
static void
}
static void
-get_tables(struct ofproto *ofproto_, struct ofp10_table_stats *ots)
+get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct dpif_dp_stats s;
+ uint64_t n_miss, n_no_pkt_in, n_bytes;
+ uint64_t n_lookup;
strcpy(ots->name, "classifier");
- dpif_get_dp_stats(ofproto->dpif, &s);
- put_32aligned_be64(&ots->lookup_count, htonll(s.n_hit + s.n_missed));
- put_32aligned_be64(&ots->matched_count,
- htonll(s.n_hit + ofproto->n_matches));
+ dpif_get_dp_stats(ofproto->backer->dpif, &s);
+ rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes);
+ rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes);
+
+ n_lookup = s.n_hit + s.n_missed;
+ ots->lookup_count = htonll(n_lookup);
+ ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in);
}
static struct ofport *
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ const struct netdev *netdev = port->up.netdev;
+ struct dpif_port dpif_port;
+ int error;
- ofproto->need_revalidate = REV_RECONFIGURE;
- port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
port->bundle = NULL;
port->cfm = NULL;
+ port->bfd = NULL;
port->tag = tag_create_random();
port->may_enable = true;
port->stp_port = NULL;
port->stp_state = STP_DISABLED;
+ port->tnl_port = NULL;
hmap_init(&port->priorities);
port->realdev_ofp_port = 0;
port->vlandev_vid = 0;
- port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
+ port->carrier_seq = netdev_get_carrier_resets(netdev);
+
+ if (netdev_vport_is_patch(netdev)) {
+ /* By bailing out here, we don't submit the port to the sFlow module
+ * to be considered for counter polling export. This is correct
+ * because the patch port represents an interface that sFlow considers
+ * to be "internal" to the switch as a whole, and therefore not an
+ * candidate for counter polling. */
+ port->odp_port = OVSP_NONE;
+ return 0;
+ }
+
+ error = dpif_port_query_by_name(ofproto->backer->dpif,
+ netdev_vport_get_dpif_port(netdev),
+ &dpif_port);
+ if (error) {
+ return error;
+ }
+
+ port->odp_port = dpif_port.port_no;
+
+ if (netdev_get_tunnel_config(netdev)) {
+ port->tnl_port = tnl_port_add(&port->up, port->odp_port);
+ } else {
+ /* Sanity-check that a mapping doesn't already exist. This
+ * shouldn't happen for non-tunnel ports. */
+ if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
+ VLOG_ERR("port %s already has an OpenFlow port number",
+ dpif_port.name);
+ dpif_port_destroy(&dpif_port);
+ return EBUSY;
+ }
+
+ hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
+ hash_int(port->odp_port, 0));
+ }
+ dpif_port_destroy(&dpif_port);
if (ofproto->sflow) {
- dpif_sflow_add_port(ofproto->sflow, port_);
+ dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
}
return 0;
{
struct ofport_dpif *port = ofport_dpif_cast(port_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
+ const char *dp_port_name = netdev_vport_get_dpif_port(port->up.netdev);
+ const char *devname = netdev_get_name(port->up.netdev);
+
+ if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ /* The underlying device is still there, so delete it. This
+ * happens when the ofproto is being destroyed, since the caller
+ * assumes that removal of attached ports will happen as part of
+ * destruction. */
+ if (!port->tnl_port) {
+ dpif_port_del(ofproto->backer->dpif, port->odp_port);
+ }
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
+
+ if (port->odp_port != OVSP_NONE && !port->tnl_port) {
+ hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
+ }
- ofproto->need_revalidate = REV_RECONFIGURE;
+ tnl_port_del(port->tnl_port);
+ sset_find_and_delete(&ofproto->ports, devname);
+ sset_find_and_delete(&ofproto->ghost_ports, devname);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle_remove(port_);
set_cfm(port_, NULL);
+ set_bfd(port_, NULL);
if (ofproto->sflow) {
dpif_sflow_del_port(ofproto->sflow, port->odp_port);
}
if (port->bundle && port->bundle->bond) {
bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
}
+
+ if (port->cfm) {
+ cfm_set_netdev(port->cfm, port->up.netdev);
+ }
}
static void
if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
OFPUTIL_PC_NO_PACKET_IN)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
bundle_update(port->bundle);
if (!ds) {
struct ofport_dpif *ofport;
- ds = ofproto->sflow = dpif_sflow_create(ofproto->dpif);
+ ds = ofproto->sflow = dpif_sflow_create();
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
- dpif_sflow_add_port(ds, &ofport->up);
+ dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
dpif_sflow_set_options(ds, sflow_options);
} else {
if (ds) {
dpif_sflow_destroy(ds);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->sflow = NULL;
}
}
return 0;
}
+static int
+set_ipfix(
+ struct ofproto *ofproto_,
+ const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
+ const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
+ size_t n_flow_exporters_options)
+{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
+ struct dpif_ipfix *di = ofproto->ipfix;
+
+ if (bridge_exporter_options || flow_exporters_options) {
+ if (!di) {
+ di = ofproto->ipfix = dpif_ipfix_create();
+ }
+ dpif_ipfix_set_options(
+ di, bridge_exporter_options, flow_exporters_options,
+ n_flow_exporters_options);
+ } else {
+ if (di) {
+ dpif_ipfix_destroy(di);
+ ofproto->ipfix = NULL;
+ }
+ }
+ return 0;
+}
+
static int
set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
{
struct ofproto_dpif *ofproto;
ofproto = ofproto_dpif_cast(ofport->up.ofproto);
- ofproto->need_revalidate = REV_RECONFIGURE;
- ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ ofport->cfm = cfm_create(ofport->up.netdev);
}
if (cfm_configure(ofport->cfm, s)) {
return error;
}
-static int
-get_cfm_fault(const struct ofport *ofport_)
+static bool
+get_cfm_status(const struct ofport *ofport_,
+ struct ofproto_cfm_status *status)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
- return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
+ if (ofport->cfm) {
+ status->faults = cfm_get_fault(ofport->cfm);
+ status->remote_opstate = cfm_get_opup(ofport->cfm);
+ status->health = cfm_get_health(ofport->cfm);
+ cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
+ return true;
+ } else {
+ return false;
+ }
}
static int
-get_cfm_opup(const struct ofport *ofport_)
+set_bfd(struct ofport *ofport_, const struct smap *cfg)
{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
+ struct bfd *old;
- return ofport->cfm ? cfm_get_opup(ofport->cfm) : -1;
+ old = ofport->bfd;
+ ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev), cfg);
+ if (ofport->bfd != old) {
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ }
+
+ return 0;
}
static int
-get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
- size_t *n_rmps)
+get_bfd_status(struct ofport *ofport_, struct smap *smap)
{
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
- if (ofport->cfm) {
- cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
+ if (ofport->bfd) {
+ bfd_get_status(ofport->bfd, smap);
return 0;
} else {
- return -1;
+ return ENOENT;
}
}
-
-static int
-get_cfm_health(const struct ofport *ofport_)
-{
- struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
-
- return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
-}
\f
/* Spanning Tree. */
/* Only revalidate flows if the configuration changed. */
if (!s != !ofproto->stp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (s) {
if (stp_learn_in_state(ofport->stp_state)
!= stp_learn_in_state(state)) {
/* xxx Learning action flows should also be flushed. */
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
}
fwd_change = stp_forward_in_state(ofport->stp_state)
!= stp_forward_in_state(state);
- ofproto->need_revalidate = REV_STP;
+ ofproto->backer->need_revalidate = REV_STP;
ofport->stp_state = state;
ofport->stp_state_entered = time_msec();
}
if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
}
uint8_t dscp;
dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
- if (dpif_queue_to_priority(ofproto->dpif, qdscp_list[i].queue,
+ if (dpif_queue_to_priority(ofproto->backer->dpif, qdscp_list[i].queue,
&priority)) {
continue;
}
pdscp = xmalloc(sizeof *pdscp);
pdscp->priority = priority;
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
if (pdscp->dscp != dscp) {
pdscp->dscp = dscp;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
if (!hmap_is_empty(&ofport->priorities)) {
ofport_clear_priorities(ofport);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
hmap_swap(&new, &ofport->priorities);
struct mac_learning *ml = ofproto->ml;
struct mac_entry *mac, *next_mac;
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
if (mac->port.p == bundle) {
if (all_ofprotos) {
e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
NULL);
if (e) {
- tag_set_add(&o->revalidate_set, e->tag);
mac_learning_expire(o->ml, e);
}
}
{
struct ofbundle *bundle = port->bundle;
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
list_remove(&port->bundle_node);
port->bundle = NULL;
}
static bool
-bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
- struct lacp_slave_settings *lacp,
- uint32_t bond_stable_id)
+bundle_add_port(struct ofbundle *bundle, uint16_t ofp_port,
+ struct lacp_slave_settings *lacp)
{
struct ofport_dpif *port;
}
if (port->bundle != bundle) {
- bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (port->bundle) {
bundle_del_port(port);
}
}
}
if (lacp) {
- port->bundle->ofproto->need_revalidate = REV_RECONFIGURE;
+ bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
lacp_slave_register(bundle->lacp, port, lacp);
}
- port->bond_stable_id = bond_stable_id;
-
return true;
}
mirror_destroy(m);
} else if (hmapx_find_and_delete(&m->srcs, bundle)
|| hmapx_find_and_delete(&m->dsts, bundle)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
}
}
return 0;
}
- assert(s->n_slaves == 1 || s->bond != NULL);
- assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
+ ovs_assert(s->n_slaves == 1 || s->bond != NULL);
+ ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
bundle = bundle_lookup(ofproto, aux);
if (!bundle) {
/* LACP. */
if (s->lacp) {
if (!bundle->lacp) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
bundle->lacp = lacp_create();
}
lacp_configure(bundle->lacp, s->lacp);
ok = true;
for (i = 0; i < s->n_slaves; i++) {
if (!bundle_add_port(bundle, s->slaves[i],
- s->lacp ? &s->lacp_slaves[i] : NULL,
- s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
+ s->lacp ? &s->lacp_slaves[i] : NULL)) {
ok = false;
}
}
found: ;
}
}
- assert(list_size(&bundle->ports) <= s->n_slaves);
+ ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
if (list_is_empty(&bundle->ports)) {
bundle_destroy(bundle);
bundle->ofproto->has_bonded_bundles = true;
if (bundle->bond) {
if (bond_reconfigure(bundle->bond, s->bond)) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
} else {
bundle->bond = bond_create(s->bond);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
- bond_slave_register(bundle->bond, port, port->bond_stable_id,
- port->up.netdev);
+ bond_slave_register(bundle->bond, port, port->up.netdev);
}
} else {
bond_destroy(bundle->bond);
bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
}
- bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
+ bond_run(bundle->bond, &bundle->ofproto->backer->revalidate_set,
lacp_status(bundle->lacp));
if (bond_should_send_learning_packets(bundle->bond)) {
bundle_send_learning_packets(bundle);
}
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
ofproto->has_mirrors = true;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml,
+ &ofproto->backer->revalidate_set);
mirror_update_dups(ofproto);
return 0;
}
ofproto = mirror->ofproto;
- ofproto->need_revalidate = REV_RECONFIGURE;
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
return 0;
}
+ push_all_stats();
+
*packets = mirror->packet_count;
*bytes = mirror->byte_count;
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
return 0;
}
forward_bpdu_changed(struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
}
static void
-set_mac_idle_time(struct ofproto *ofproto_, unsigned int idle_time)
+set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
+ size_t max_entries)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
mac_learning_set_idle_time(ofproto->ml, idle_time);
+ mac_learning_set_max_entries(ofproto->ml, max_entries);
}
\f
/* Ports. */
static struct ofport_dpif *
get_odp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
{
- return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
+ struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
+ return port && &ofproto->up == port->up.ofproto ? port : NULL;
}
static void
-ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
+ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
+ struct ofproto_port *ofproto_port,
struct dpif_port *dpif_port)
{
ofproto_port->name = dpif_port->name;
ofproto_port->type = dpif_port->type;
- ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
+ ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
+}
+
+static struct ofport_dpif *
+ofport_get_peer(const struct ofport_dpif *ofport_dpif)
+{
+ const struct ofproto_dpif *ofproto;
+ const char *peer;
+
+ peer = netdev_vport_patch_peer(ofport_dpif->up.netdev);
+ if (!peer) {
+ return NULL;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, peer);
+ if (ofport && ofport->ofproto->ofproto_class == &ofproto_dpif_class) {
+ return ofport_dpif_cast(ofport);
+ }
+ }
+ return NULL;
}
static void
send_packet(ofport, &packet);
ofpbuf_uninit(&packet);
}
+
+ if (ofport->bfd && bfd_should_send_packet(ofport->bfd)) {
+ struct ofpbuf packet;
+
+ ofpbuf_init(&packet, 0);
+ bfd_put_packet(ofport->bfd, &packet, ofport->up.pp.hw_addr);
+ send_packet(ofport, &packet);
+ ofpbuf_uninit(&packet);
+ }
}
static void
ofport->carrier_seq = carrier_seq;
port_run_fast(ofport);
+
+ if (ofport->tnl_port
+ && tnl_port_reconfigure(&ofport->up, ofport->odp_port,
+ &ofport->tnl_port)) {
+ ofproto_dpif_cast(ofport->up.ofproto)->backer->need_revalidate = true;
+ }
+
if (ofport->cfm) {
int cfm_opup = cfm_get_opup(ofport->cfm);
}
}
+ if (ofport->bfd) {
+ bfd_run(ofport->bfd);
+ enable = enable && bfd_forwarding(ofport->bfd);
+ }
+
if (ofport->bundle) {
enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
if (carrier_changed) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
if (ofproto->has_bundle_action) {
- ofproto->need_revalidate = REV_PORT_TOGGLED;
+ ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
}
}
if (ofport->cfm) {
cfm_wait(ofport->cfm);
}
+
+ if (ofport->bfd) {
+ bfd_wait(ofport->bfd);
+ }
}
static int
struct dpif_port dpif_port;
int error;
- error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
+ if (sset_contains(&ofproto->ghost_ports, devname)) {
+ const char *type = netdev_get_type_from_name(devname);
+
+ /* We may be called before ofproto->up.port_by_name is populated with
+ * the appropriate ofport. For this reason, we must get the name and
+ * type from the netdev layer directly. */
+ if (type) {
+ const struct ofport *ofport;
+
+ ofport = shash_find_data(&ofproto->up.port_by_name, devname);
+ ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
+ ofproto_port->name = xstrdup(devname);
+ ofproto_port->type = xstrdup(type);
+ return 0;
+ }
+ return ENODEV;
+ }
+
+ if (!sset_contains(&ofproto->ports, devname)) {
+ return ENODEV;
+ }
+ error = dpif_port_query_by_name(ofproto->backer->dpif,
+ devname, &dpif_port);
if (!error) {
- ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
+ ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
}
return error;
}
static int
-port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
+port_add(struct ofproto *ofproto_, struct netdev *netdev)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- uint16_t odp_port = UINT16_MAX;
- int error;
+ const char *dp_port_name = netdev_vport_get_dpif_port(netdev);
+ const char *devname = netdev_get_name(netdev);
- error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
- if (!error) {
- *ofp_portp = odp_port_to_ofp_port(odp_port);
+ if (netdev_vport_is_patch(netdev)) {
+ sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
+ return 0;
}
- return error;
+
+ if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
+ uint32_t port_no = UINT32_MAX;
+ int error;
+
+ error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
+ if (error) {
+ return error;
+ }
+ if (netdev_get_tunnel_config(netdev)) {
+ simap_put(&ofproto->backer->tnl_backers, dp_port_name, port_no);
+ }
+ }
+
+ if (netdev_get_tunnel_config(netdev)) {
+ sset_add(&ofproto->ghost_ports, devname);
+ } else {
+ sset_add(&ofproto->ports, devname);
+ }
+ return 0;
}
static int
port_del(struct ofproto *ofproto_, uint16_t ofp_port)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- int error;
+ struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ int error = 0;
- error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
- if (!error) {
- struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
- if (ofport) {
+ if (!ofport) {
+ return 0;
+ }
+
+ sset_find_and_delete(&ofproto->ghost_ports,
+ netdev_get_name(ofport->up.netdev));
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
+ if (!ofport->tnl_port) {
+ error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
+ if (!error) {
/* The caller is going to close ofport->up.netdev. If this is a
* bonded port, then the bond is using that netdev, so remove it
* from the bond. The client will need to reconfigure everything
struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
int error;
+ push_all_stats();
+
error = netdev_get_stats(ofport->up.netdev, stats);
- if (!error && ofport->odp_port == OVSP_LOCAL) {
+ if (!error && ofport_->ofp_port == OFPP_LOCAL) {
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
/* ofproto->stats.tx_packets represents packets that we created
/* ofproto->stats.rx_packets represents packets that were received on
* some port and we processed internally and dropped (e.g. STP).
- * Account fro them as if they had been forwarded to OFPP_LOCAL. */
+ * Account for them as if they had been forwarded to OFPP_LOCAL. */
if (stats->tx_packets != UINT64_MAX) {
stats->tx_packets += ofproto->stats.rx_packets;
return error;
}
-/* Account packets for LOCAL port. */
-static void
-ofproto_update_local_port_stats(const struct ofproto *ofproto_,
- size_t tx_size, size_t rx_size)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-
- if (rx_size) {
- ofproto->stats.rx_packets++;
- ofproto->stats.rx_bytes += rx_size;
- }
- if (tx_size) {
- ofproto->stats.tx_packets++;
- ofproto->stats.tx_bytes += tx_size;
- }
-}
-
struct port_dump_state {
- struct dpif_port_dump dump;
- bool done;
+ uint32_t bucket;
+ uint32_t offset;
+ bool ghost;
+
+ struct ofproto_port port;
+ bool has_port;
};
static int
-port_dump_start(const struct ofproto *ofproto_, void **statep)
+port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct port_dump_state *state;
-
- *statep = state = xmalloc(sizeof *state);
- dpif_port_dump_start(&state->dump, ofproto->dpif);
- state->done = false;
+ *statep = xzalloc(sizeof(struct port_dump_state));
return 0;
}
static int
-port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
+port_dump_next(const struct ofproto *ofproto_, void *state_,
struct ofproto_port *port)
{
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct port_dump_state *state = state_;
- struct dpif_port dpif_port;
+ const struct sset *sset;
+ struct sset_node *node;
- if (dpif_port_dump_next(&state->dump, &dpif_port)) {
- ofproto_port_from_dpif_port(port, &dpif_port);
- return 0;
- } else {
- int error = dpif_port_dump_done(&state->dump);
- state->done = true;
- return error ? error : EOF;
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
+ state->has_port = false;
+ }
+ sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
+ while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
+ int error;
+
+ error = port_query_by_name(ofproto_, node->name, &state->port);
+ if (!error) {
+ *port = state->port;
+ state->has_port = true;
+ return 0;
+ } else if (error != ENODEV) {
+ return error;
+ }
+ }
+
+ if (!state->ghost) {
+ state->ghost = true;
+ state->bucket = 0;
+ state->offset = 0;
+ return port_dump_next(ofproto_, state_, port);
}
+
+ return EOF;
}
static int
{
struct port_dump_state *state = state_;
- if (!state->done) {
- dpif_port_dump_done(&state->dump);
+ if (state->has_port) {
+ ofproto_port_destroy(&state->port);
}
free(state);
return 0;
port_poll(const struct ofproto *ofproto_, char **devnamep)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- return dpif_port_poll(ofproto->dpif, devnamep);
+
+ if (ofproto->port_poll_errno) {
+ int error = ofproto->port_poll_errno;
+ ofproto->port_poll_errno = 0;
+ return error;
+ }
+
+ if (sset_is_empty(&ofproto->port_poll_set)) {
+ return EAGAIN;
+ }
+
+ *devnamep = sset_pop(&ofproto->port_poll_set);
+ return 0;
}
static void
port_poll_wait(const struct ofproto *ofproto_)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- dpif_port_poll_wait(ofproto->dpif);
+ dpif_port_poll_wait(ofproto->backer->dpif);
}
static int
* It's possible to batch more than that, but the benefit might be minimal. */
struct flow_miss {
struct hmap_node hmap_node;
+ struct ofproto_dpif *ofproto;
struct flow flow;
enum odp_key_fitness key_fitness;
const struct nlattr *key;
size_t key_len;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct list packets;
enum dpif_upcall_type upcall_type;
};
struct flow_miss_op {
struct dpif_op dpif_op;
- struct subfacet *subfacet; /* Subfacet */
- void *garbage; /* Pointer to pass to free(), NULL if none. */
- uint64_t stub[1024 / 8]; /* Temporary buffer. */
+
+ uint64_t slow_stub[128 / 8]; /* Buffer for compose_slow_path() */
+ struct xlate_out xout;
+ bool xout_garbage; /* 'xout' needs to be uninitialized? */
};
/* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
flow_get_metadata(flow, &pin.fmd);
- /* Registers aren't meaningful on a miss. */
- memset(pin.fmd.reg_masks, 0, sizeof pin.fmd.reg_masks);
-
connmgr_send_packet_in(ofproto->up.connmgr, &pin);
}
static enum slow_path_reason
process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet)
+ const struct ofport_dpif *ofport, const struct ofpbuf *packet)
{
- struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
-
if (!ofport) {
return 0;
- }
-
- if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
+ } else if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
if (packet) {
cfm_process_heartbeat(ofport->cfm, packet);
}
return SLOW_CFM;
+ } else if (ofport->bfd && bfd_should_process_flow(flow)) {
+ if (packet) {
+ bfd_process_packet(ofport->bfd, flow, packet);
+ }
+ return SLOW_BFD;
} else if (ofport->bundle && ofport->bundle->lacp
&& flow->dl_type == htons(ETH_TYPE_LACP)) {
if (packet) {
stp_process_packet(ofport, packet);
}
return SLOW_STP;
+ } else {
+ return 0;
}
- return 0;
}
static struct flow_miss *
-flow_miss_find(struct hmap *todo, const struct flow *flow, uint32_t hash)
+flow_miss_find(struct hmap *todo, const struct ofproto_dpif *ofproto,
+ const struct flow *flow, uint32_t hash)
{
struct flow_miss *miss;
HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
- if (flow_equal(&miss->flow, flow)) {
+ if (miss->ofproto == ofproto && flow_equal(&miss->flow, flow)) {
return miss;
}
}
init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
struct flow_miss_op *op)
{
- if (miss->flow.vlan_tci != miss->initial_tci) {
+ if (miss->flow.vlan_tci != miss->initial_vals.vlan_tci) {
/* This packet was received on a VLAN splinter port. We
* added a VLAN to the packet to make the packet resemble
* the flow, but the actions were composed assuming that
eth_pop_vlan(packet);
}
- op->subfacet = NULL;
- op->garbage = NULL;
+ op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_EXECUTE;
op->dpif_op.u.execute.key = miss->key;
op->dpif_op.u.execute.key_len = miss->key_len;
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- ofproto->n_matches++;
-
if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
/*
* Extra-special case for fail-open mode.
* increment '*n_ops'. */
static void
handle_flow_miss_without_facet(struct flow_miss *miss,
- struct rule_dpif *rule,
struct flow_miss_op *ops, size_t *n_ops)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct action_xlate_ctx ctx;
+ struct rule_dpif *rule = rule_dpif_lookup(miss->ofproto, &miss->flow);
+ long long int now = time_msec();
struct ofpbuf *packet;
+ struct xlate_in xin;
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
struct dpif_flow_stats stats;
- struct ofpbuf odp_actions;
COVERAGE_INC(facet_suppress);
- ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
+ handle_flow_miss_common(rule, packet, &miss->flow);
- dpif_flow_stats_extract(&miss->flow, packet, &stats);
+ dpif_flow_stats_extract(&miss->flow, packet, now, &stats);
rule_credit_stats(rule, &stats);
- action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
- rule, 0, packet);
- ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
- &odp_actions);
+ xlate_in_init(&xin, miss->ofproto, &miss->flow, &miss->initial_vals,
+ rule, stats.tcp_flags, packet);
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &op->xout);
- if (odp_actions.size) {
+ if (op->xout.odp_actions.size) {
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- execute->actions = odp_actions.data;
- execute->actions_len = odp_actions.size;
- op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
+ execute->actions = op->xout.odp_actions.data;
+ execute->actions_len = op->xout.odp_actions.size;
+ op->xout_garbage = true;
(*n_ops)++;
} else {
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&op->xout);
}
}
}
/* Handles 'miss', which matches 'facet'. May add any required datapath
- * operations to 'ops', incrementing '*n_ops' for each new op. */
+ * operations to 'ops', incrementing '*n_ops' for each new op.
+ *
+ * All of the packets in 'miss' are considered to have arrived at time 'now'.
+ * This is really important only for new facets: if we just called time_msec()
+ * here, then the new subfacet or its packets could look (occasionally) as
+ * though it was used some time after the facet was used. That can make a
+ * one-packet flow look like it has a nonzero duration, which looks odd in
+ * e.g. NetFlow statistics. */
static void
handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
+ long long int now,
struct flow_miss_op *ops, size_t *n_ops)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct subfacet *subfacet;
struct ofpbuf *packet;
- subfacet = subfacet_create(facet,
- miss->key_fitness, miss->key, miss->key_len,
- miss->initial_tci);
+ subfacet = subfacet_create(facet, miss, now);
+ want_path = subfacet->facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
LIST_FOR_EACH (packet, list_node, &miss->packets) {
struct flow_miss_op *op = &ops[*n_ops];
struct dpif_flow_stats stats;
- struct ofpbuf odp_actions;
handle_flow_miss_common(facet->rule, packet, &miss->flow);
- ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
- if (!subfacet->actions || subfacet->slow) {
- subfacet_make_actions(subfacet, packet, &odp_actions);
+ if (want_path != SF_FAST_PATH) {
+ struct xlate_in xin;
+
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, 0, packet);
+ xlate_actions_for_side_effects(&xin);
}
- dpif_flow_stats_extract(&facet->flow, packet, &stats);
+ dpif_flow_stats_extract(&facet->flow, packet, now, &stats);
subfacet_update_stats(subfacet, &stats);
- if (subfacet->actions_len) {
+ if (facet->xout.odp_actions.size) {
struct dpif_execute *execute = &op->dpif_op.u.execute;
init_flow_miss_execute_op(miss, packet, op);
- op->subfacet = subfacet;
- if (!subfacet->slow) {
- execute->actions = subfacet->actions;
- execute->actions_len = subfacet->actions_len;
- ofpbuf_uninit(&odp_actions);
- } else {
- execute->actions = odp_actions.data;
- execute->actions_len = odp_actions.size;
- op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
- }
-
+ execute->actions = facet->xout.odp_actions.data,
+ execute->actions_len = facet->xout.odp_actions.size;
(*n_ops)++;
- } else {
- ofpbuf_uninit(&odp_actions);
}
}
- want_path = subfacet_want_path(subfacet->slow);
if (miss->upcall_type == DPIF_UC_MISS || subfacet->path != want_path) {
struct flow_miss_op *op = &ops[(*n_ops)++];
struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
- op->subfacet = subfacet;
- op->garbage = NULL;
+ subfacet->path = want_path;
+
+ op->xout_garbage = false;
op->dpif_op.type = DPIF_OP_FLOW_PUT;
put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
put->key = miss->key;
put->key_len = miss->key_len;
if (want_path == SF_FAST_PATH) {
- put->actions = subfacet->actions;
- put->actions_len = subfacet->actions_len;
+ put->actions = facet->xout.odp_actions.data;
+ put->actions_len = facet->xout.odp_actions.size;
} else {
- compose_slow_path(ofproto, &facet->flow, subfacet->slow,
- op->stub, sizeof op->stub,
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ op->slow_stub, sizeof op->slow_stub,
&put->actions, &put->actions_len);
}
put->stats = NULL;
}
}
-/* Handles flow miss 'miss' on 'ofproto'. May add any required datapath
- * operations to 'ops', incrementing '*n_ops' for each new op. */
+/* Handles flow miss 'miss'. May add any required datapath operations
+ * to 'ops', incrementing '*n_ops' for each new op. */
static void
-handle_flow_miss(struct ofproto_dpif *ofproto, struct flow_miss *miss,
- struct flow_miss_op *ops, size_t *n_ops)
+handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
+ size_t *n_ops)
{
+ struct ofproto_dpif *ofproto = miss->ofproto;
struct facet *facet;
+ long long int now;
uint32_t hash;
/* The caller must ensure that miss->hmap_node.hash contains
facet = facet_lookup_valid(ofproto, &miss->flow, hash);
if (!facet) {
- struct rule_dpif *rule = rule_dpif_lookup(ofproto, &miss->flow);
-
- if (!flow_miss_should_make_facet(ofproto, miss, hash)) {
- handle_flow_miss_without_facet(miss, rule, ops, n_ops);
+ /* There does not exist a bijection between 'struct flow' and datapath
+ * flow keys with fitness ODP_FIT_TO_LITTLE. This breaks a fundamental
+ * assumption used throughout the facet and subfacet handling code.
+ * Since we have to handle these misses in userspace anyway, we simply
+ * skip facet creation, avoiding the problem altogether. */
+ if (miss->key_fitness == ODP_FIT_TOO_LITTLE
+ || !flow_miss_should_make_facet(ofproto, miss, hash)) {
+ handle_flow_miss_without_facet(miss, ops, n_ops);
return;
}
- facet = facet_create(rule, &miss->flow, hash);
+ facet = facet_create(miss, hash);
+ now = facet->used;
+ } else {
+ now = time_msec();
+ }
+ handle_flow_miss_with_facet(miss, facet, now, ops, n_ops);
+}
+
+static struct drop_key *
+drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
+ size_t key_len)
+{
+ struct drop_key *drop_key;
+
+ HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
+ &backer->drop_keys) {
+ if (drop_key->key_len == key_len
+ && !memcmp(drop_key->key, key, key_len)) {
+ return drop_key;
+ }
+ }
+ return NULL;
+}
+
+static void
+drop_key_clear(struct dpif_backer *backer)
+{
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
+ struct drop_key *drop_key, *next;
+
+ HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
+ int error;
+
+ error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
+ NULL);
+ if (error && !VLOG_DROP_WARN(&rl)) {
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
+ VLOG_WARN("Failed to delete drop key (%s) (%s)", strerror(error),
+ ds_cstr(&ds));
+ ds_destroy(&ds);
+ }
+
+ hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
+ free(drop_key->key);
+ free(drop_key);
}
- handle_flow_miss_with_facet(miss, facet, ops, n_ops);
}
-/* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
- * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
- * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
- * what a flow key should contain.
+/* Given a datpath, packet, and flow metadata ('backer', 'packet', and 'key'
+ * respectively), populates 'flow' with the result of odp_flow_key_to_flow().
+ * Optionally, if nonnull, populates 'fitnessp' with the fitness of 'flow' as
+ * returned by odp_flow_key_to_flow(). Also, optionally populates 'ofproto'
+ * with the ofproto_dpif, and 'odp_in_port' with the datapath in_port, that
+ * 'packet' ingressed.
*
- * This function also includes some logic to help make VLAN splinters
- * transparent to the rest of the upcall processing logic. In particular, if
- * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
- * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
- * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
+ * If 'ofproto' is nonnull, requires 'flow''s in_port to exist. Otherwise sets
+ * 'flow''s in_port to OFPP_NONE.
*
- * Sets '*initial_tci' to the VLAN TCI with which the packet was really
- * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
- * (This differs from the value returned in flow->vlan_tci only for packets
- * received on VLAN splinters.)
- */
-static enum odp_key_fitness
-ofproto_dpif_extract_flow_key(const struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len,
- struct flow *flow, ovs_be16 *initial_tci,
- struct ofpbuf *packet)
+ * This function does post-processing on data returned from
+ * odp_flow_key_to_flow() to help make VLAN splinters transparent to the rest
+ * of the upcall processing logic. In particular, if the extracted in_port is
+ * a VLAN splinter port, it replaces flow->in_port by the "real" port, sets
+ * flow->vlan_tci correctly for the VLAN of the VLAN splinter port, and pushes
+ * a VLAN header onto 'packet' (if it is nonnull).
+ *
+ * Optionally, if 'initial_vals' is nonnull, sets 'initial_vals->vlan_tci'
+ * to the VLAN TCI with which the packet was really received, that is, the
+ * actual VLAN TCI extracted by odp_flow_key_to_flow(). (This differs from
+ * the value returned in flow->vlan_tci only for packets received on
+ * VLAN splinters.)
+ *
+ * Similarly, this function also includes some logic to help with tunnels. It
+ * may modify 'flow' as necessary to make the tunneling implementation
+ * transparent to the upcall processing logic.
+ *
+ * Returns 0 if successful, ENODEV if the parsed flow has no associated ofport,
+ * or some other positive errno if there are other problems. */
+static int
+ofproto_receive(const struct dpif_backer *backer, struct ofpbuf *packet,
+ const struct nlattr *key, size_t key_len,
+ struct flow *flow, enum odp_key_fitness *fitnessp,
+ struct ofproto_dpif **ofproto, uint32_t *odp_in_port,
+ struct initial_vals *initial_vals)
{
+ const struct ofport_dpif *port;
enum odp_key_fitness fitness;
+ int error = ENODEV;
fitness = odp_flow_key_to_flow(key, key_len, flow);
if (fitness == ODP_FIT_ERROR) {
- return fitness;
+ error = EINVAL;
+ goto exit;
+ }
+
+ if (initial_vals) {
+ initial_vals->vlan_tci = flow->vlan_tci;
+ }
+
+ if (odp_in_port) {
+ *odp_in_port = flow->in_port;
+ }
+
+ port = (tnl_port_should_receive(flow)
+ ? ofport_dpif_cast(tnl_port_receive(flow))
+ : odp_port_to_ofport(backer, flow->in_port));
+ flow->in_port = port ? port->up.ofp_port : OFPP_NONE;
+ if (!port) {
+ goto exit;
}
- *initial_tci = flow->vlan_tci;
- if (vsp_adjust_flow(ofproto, flow)) {
+ /* XXX: Since the tunnel module is not scoped per backer, for a tunnel port
+ * it's theoretically possible that we'll receive an ofport belonging to an
+ * entirely different datapath. In practice, this can't happen because no
+ * platforms has two separate datapaths which each support tunneling. */
+ ovs_assert(ofproto_dpif_cast(port->up.ofproto)->backer == backer);
+
+ if (vsp_adjust_flow(ofproto_dpif_cast(port->up.ofproto), flow)) {
if (packet) {
- /* Make the packet resemble the flow, so that it gets sent to an
- * OpenFlow controller properly, so that it looks correct for
- * sFlow, and so that flow_extract() will get the correct vlan_tci
- * if it is called on 'packet'.
+ /* Make the packet resemble the flow, so that it gets sent to
+ * an OpenFlow controller properly, so that it looks correct
+ * for sFlow, and so that flow_extract() will get the correct
+ * vlan_tci if it is called on 'packet'.
*
* The allocated space inside 'packet' probably also contains
- * 'key', that is, both 'packet' and 'key' are probably part of a
- * struct dpif_upcall (see the large comment on that structure
- * definition), so pushing data on 'packet' is in general not a
- * good idea since it could overwrite 'key' or free it as a side
- * effect. However, it's OK in this special case because we know
- * that 'packet' is inside a Netlink attribute: pushing 4 bytes
- * will just overwrite the 4-byte "struct nlattr", which is fine
- * since we don't need that header anymore. */
+ * 'key', that is, both 'packet' and 'key' are probably part of
+ * a struct dpif_upcall (see the large comment on that
+ * structure definition), so pushing data on 'packet' is in
+ * general not a good idea since it could overwrite 'key' or
+ * free it as a side effect. However, it's OK in this special
+ * case because we know that 'packet' is inside a Netlink
+ * attribute: pushing 4 bytes will just overwrite the 4-byte
+ * "struct nlattr", which is fine since we don't need that
+ * header anymore. */
eth_push_vlan(packet, flow->vlan_tci);
}
+ /* We can't reproduce 'key' from 'flow'. */
+ fitness = fitness == ODP_FIT_PERFECT ? ODP_FIT_TOO_MUCH : fitness;
+ }
+ error = 0;
- /* Let the caller know that we can't reproduce 'key' from 'flow'. */
- if (fitness == ODP_FIT_PERFECT) {
- fitness = ODP_FIT_TOO_MUCH;
- }
+ if (ofproto) {
+ *ofproto = ofproto_dpif_cast(port->up.ofproto);
}
- return fitness;
+exit:
+ if (fitnessp) {
+ *fitnessp = fitness;
+ }
+ return error;
}
static void
-handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
+handle_miss_upcalls(struct dpif_backer *backer, struct dpif_upcall *upcalls,
size_t n_upcalls)
{
struct dpif_upcall *upcall;
for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
struct flow_miss *miss = &misses[n_misses];
struct flow_miss *existing_miss;
+ struct ofproto_dpif *ofproto;
+ uint32_t odp_in_port;
+ struct flow flow;
uint32_t hash;
+ int error;
- /* Obtain metadata and check userspace/kernel agreement on flow match,
- * then set 'flow''s header pointers. */
- miss->key_fitness = ofproto_dpif_extract_flow_key(
- ofproto, upcall->key, upcall->key_len,
- &miss->flow, &miss->initial_tci, upcall->packet);
- if (miss->key_fitness == ODP_FIT_ERROR) {
+ error = ofproto_receive(backer, upcall->packet, upcall->key,
+ upcall->key_len, &flow, &miss->key_fitness,
+ &ofproto, &odp_in_port, &miss->initial_vals);
+ if (error == ENODEV) {
+ struct drop_key *drop_key;
+
+ /* Received packet on datapath port for which we couldn't
+ * associate an ofproto. This can happen if a port is removed
+ * while traffic is being received. Print a rate-limited message
+ * in case it happens frequently. Install a drop flow so
+ * that future packets of the flow are inexpensively dropped
+ * in the kernel. */
+ VLOG_INFO_RL(&rl, "received packet on unassociated datapath port "
+ "%"PRIu32, odp_in_port);
+
+ drop_key = drop_key_lookup(backer, upcall->key, upcall->key_len);
+ if (!drop_key) {
+ drop_key = xmalloc(sizeof *drop_key);
+ drop_key->key = xmemdup(upcall->key, upcall->key_len);
+ drop_key->key_len = upcall->key_len;
+
+ hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
+ hash_bytes(drop_key->key, drop_key->key_len, 0));
+ dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
+ drop_key->key, drop_key->key_len, NULL, 0, NULL);
+ }
continue;
}
- flow_extract(upcall->packet, miss->flow.skb_priority,
- miss->flow.tun_id, miss->flow.in_port, &miss->flow);
+ if (error) {
+ continue;
+ }
+
+ ofproto->n_missed++;
+ flow_extract(upcall->packet, flow.skb_priority, flow.skb_mark,
+ &flow.tunnel, flow.in_port, &miss->flow);
/* Add other packets to a to-do list. */
hash = flow_hash(&miss->flow, 0);
- existing_miss = flow_miss_find(&todo, &miss->flow, hash);
+ existing_miss = flow_miss_find(&todo, ofproto, &miss->flow, hash);
if (!existing_miss) {
hmap_insert(&todo, &miss->hmap_node, hash);
+ miss->ofproto = ofproto;
miss->key = upcall->key;
miss->key_len = upcall->key_len;
miss->upcall_type = upcall->type;
* operations to batch. */
n_ops = 0;
HMAP_FOR_EACH (miss, hmap_node, &todo) {
- handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
+ handle_flow_miss(miss, flow_miss_ops, &n_ops);
}
- assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
+ ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
/* Execute batch. */
for (i = 0; i < n_ops; i++) {
dpif_ops[i] = &flow_miss_ops[i].dpif_op;
}
- dpif_operate(ofproto->dpif, dpif_ops, n_ops);
+ dpif_operate(backer->dpif, dpif_ops, n_ops);
- /* Free memory and update facets. */
+ /* Free memory. */
for (i = 0; i < n_ops; i++) {
- struct flow_miss_op *op = &flow_miss_ops[i];
-
- switch (op->dpif_op.type) {
- case DPIF_OP_EXECUTE:
- break;
-
- case DPIF_OP_FLOW_PUT:
- if (!op->dpif_op.error) {
- op->subfacet->path = subfacet_want_path(op->subfacet->slow);
- }
- break;
-
- case DPIF_OP_FLOW_DEL:
- NOT_REACHED();
+ if (flow_miss_ops[i].xout_garbage) {
+ xlate_out_uninit(&flow_miss_ops[i].xout);
}
-
- free(op->garbage);
}
hmap_destroy(&todo);
}
-static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL }
+static enum { SFLOW_UPCALL, MISS_UPCALL, BAD_UPCALL, FLOW_SAMPLE_UPCALL,
+ IPFIX_UPCALL }
classify_upcall(const struct dpif_upcall *upcall)
{
+ size_t userdata_len;
union user_action_cookie cookie;
/* First look at the upcall type. */
}
/* "action" upcalls need a closer look. */
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
- switch (cookie.type) {
- case USER_ACTION_COOKIE_SFLOW:
+ if (!upcall->userdata) {
+ VLOG_WARN_RL(&rl, "action upcall missing cookie");
+ return BAD_UPCALL;
+ }
+ userdata_len = nl_attr_get_size(upcall->userdata);
+ if (userdata_len < sizeof cookie.type
+ || userdata_len > sizeof cookie) {
+ VLOG_WARN_RL(&rl, "action upcall cookie has unexpected size %zu",
+ userdata_len);
+ return BAD_UPCALL;
+ }
+ memset(&cookie, 0, sizeof cookie);
+ memcpy(&cookie, nl_attr_get(upcall->userdata), userdata_len);
+ if (userdata_len == sizeof cookie.sflow
+ && cookie.type == USER_ACTION_COOKIE_SFLOW) {
return SFLOW_UPCALL;
-
- case USER_ACTION_COOKIE_SLOW_PATH:
+ } else if (userdata_len == sizeof cookie.slow_path
+ && cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
return MISS_UPCALL;
-
- case USER_ACTION_COOKIE_UNSPEC:
- default:
- VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
+ } else if (userdata_len == sizeof cookie.flow_sample
+ && cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
+ return FLOW_SAMPLE_UPCALL;
+ } else if (userdata_len == sizeof cookie.ipfix
+ && cookie.type == USER_ACTION_COOKIE_IPFIX) {
+ return IPFIX_UPCALL;
+ } else {
+ VLOG_WARN_RL(&rl, "invalid user cookie of type %"PRIu16
+ " and size %zu", cookie.type, userdata_len);
return BAD_UPCALL;
}
}
static void
-handle_sflow_upcall(struct ofproto_dpif *ofproto,
+handle_sflow_upcall(struct dpif_backer *backer,
const struct dpif_upcall *upcall)
{
+ struct ofproto_dpif *ofproto;
union user_action_cookie cookie;
- enum odp_key_fitness fitness;
- ovs_be16 initial_tci;
struct flow flow;
+ uint32_t odp_in_port;
- fitness = ofproto_dpif_extract_flow_key(ofproto, upcall->key,
- upcall->key_len, &flow,
- &initial_tci, upcall->packet);
- if (fitness == ODP_FIT_ERROR) {
+ if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
+ &flow, NULL, &ofproto, &odp_in_port, NULL)
+ || !ofproto->sflow) {
+ return;
+ }
+
+ memset(&cookie, 0, sizeof cookie);
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof cookie.sflow);
+ dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
+ odp_in_port, &cookie);
+}
+
+static void
+handle_flow_sample_upcall(struct dpif_backer *backer,
+ const struct dpif_upcall *upcall)
+{
+ struct ofproto_dpif *ofproto;
+ union user_action_cookie cookie;
+ struct flow flow;
+
+ if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
+ &flow, NULL, &ofproto, NULL, NULL)
+ || !ofproto->ipfix) {
+ return;
+ }
+
+ memset(&cookie, 0, sizeof cookie);
+ memcpy(&cookie, nl_attr_get(upcall->userdata), sizeof cookie.flow_sample);
+
+ /* The flow reflects exactly the contents of the packet. Sample
+ * the packet using it. */
+ dpif_ipfix_flow_sample(ofproto->ipfix, upcall->packet, &flow,
+ cookie.flow_sample.collector_set_id,
+ cookie.flow_sample.probability,
+ cookie.flow_sample.obs_domain_id,
+ cookie.flow_sample.obs_point_id);
+}
+
+static void
+handle_ipfix_upcall(struct dpif_backer *backer,
+ const struct dpif_upcall *upcall)
+{
+ struct ofproto_dpif *ofproto;
+ struct flow flow;
+
+ if (ofproto_receive(backer, upcall->packet, upcall->key, upcall->key_len,
+ &flow, NULL, &ofproto, NULL, NULL)
+ || !ofproto->ipfix) {
return;
}
- memcpy(&cookie, &upcall->userdata, sizeof(cookie));
- dpif_sflow_received(ofproto->sflow, upcall->packet, &flow, &cookie);
+ /* The flow reflects exactly the contents of the packet. Sample
+ * the packet using it. */
+ dpif_ipfix_bridge_sample(ofproto->ipfix, upcall->packet, &flow);
}
static int
-handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
+handle_upcalls(struct dpif_backer *backer, unsigned int max_batch)
{
struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
int n_misses;
int i;
- assert(max_batch <= FLOW_MISS_MAX_BATCH);
+ ovs_assert(max_batch <= FLOW_MISS_MAX_BATCH);
n_misses = 0;
for (n_processed = 0; n_processed < max_batch; n_processed++) {
ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
sizeof miss_buf_stubs[n_misses]);
- error = dpif_recv(ofproto->dpif, upcall, buf);
+ error = dpif_recv(backer->dpif, upcall, buf);
if (error) {
ofpbuf_uninit(buf);
break;
break;
case SFLOW_UPCALL:
- if (ofproto->sflow) {
- handle_sflow_upcall(ofproto, upcall);
- }
+ handle_sflow_upcall(backer, upcall);
+ ofpbuf_uninit(buf);
+ break;
+
+ case FLOW_SAMPLE_UPCALL:
+ handle_flow_sample_upcall(backer, upcall);
+ ofpbuf_uninit(buf);
+ break;
+
+ case IPFIX_UPCALL:
+ handle_ipfix_upcall(backer, upcall);
ofpbuf_uninit(buf);
break;
}
/* Handle deferred MISS_UPCALL processing. */
- handle_miss_upcalls(ofproto, misses, n_misses);
+ handle_miss_upcalls(backer, misses, n_misses);
for (i = 0; i < n_misses; i++) {
ofpbuf_uninit(&miss_bufs[i]);
}
/* Flow expiration. */
static int subfacet_max_idle(const struct ofproto_dpif *);
-static void update_stats(struct ofproto_dpif *);
+static void update_stats(struct dpif_backer *);
static void rule_expire(struct rule_dpif *);
static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
*
* Returns the number of milliseconds after which it should be called again. */
static int
-expire(struct ofproto_dpif *ofproto)
+expire(struct dpif_backer *backer)
{
- struct rule_dpif *rule, *next_rule;
- struct oftable *table;
- int dp_max_idle;
+ struct ofproto_dpif *ofproto;
+ int max_idle = INT32_MAX;
- /* Update stats for each flow in the datapath. */
- update_stats(ofproto);
+ /* Periodically clear out the drop keys in an effort to keep them
+ * relatively few. */
+ drop_key_clear(backer);
- /* Expire subfacets that have been idle too long. */
- dp_max_idle = subfacet_max_idle(ofproto);
- expire_subfacets(ofproto, dp_max_idle);
+ /* Update stats for each flow in the backer. */
+ update_stats(backer);
- /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
- OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
- struct cls_cursor cursor;
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct rule *rule, *next_rule;
+ int dp_max_idle;
- cls_cursor_init(&cursor, &table->cls, NULL);
- CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
- rule_expire(rule);
+ if (ofproto->backer != backer) {
+ continue;
}
- }
- /* All outstanding data in existing flows has been accounted, so it's a
- * good time to do bond rebalancing. */
- if (ofproto->has_bonded_bundles) {
- struct ofbundle *bundle;
+ /* Keep track of the max number of flows per ofproto_dpif. */
+ update_max_subfacet_count(ofproto);
+
+ /* Expire subfacets that have been idle too long. */
+ dp_max_idle = subfacet_max_idle(ofproto);
+ expire_subfacets(ofproto, dp_max_idle);
- HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
- if (bundle->bond) {
- bond_rebalance(bundle->bond, &ofproto->revalidate_set);
+ max_idle = MIN(max_idle, dp_max_idle);
+
+ /* Expire OpenFlow flows whose idle_timeout or hard_timeout
+ * has passed. */
+ LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
+ &ofproto->up.expirable) {
+ rule_expire(rule_dpif_cast(rule));
+ }
+
+ /* All outstanding data in existing flows has been accounted, so it's a
+ * good time to do bond rebalancing. */
+ if (ofproto->has_bonded_bundles) {
+ struct ofbundle *bundle;
+
+ HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
+ if (bundle->bond) {
+ bond_rebalance(bundle->bond, &backer->revalidate_set);
+ }
}
}
}
- return MIN(dp_max_idle, 1000);
+ return MIN(max_idle, 1000);
}
/* Updates flow table statistics given that the datapath just reported 'stats'
const struct dpif_flow_stats *stats)
{
struct facet *facet = subfacet->facet;
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ struct dpif_flow_stats diff;
+
+ diff.tcp_flags = stats->tcp_flags;
+ diff.used = stats->used;
if (stats->n_packets >= subfacet->dp_packet_count) {
- uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
- facet->packet_count += extra;
+ diff.n_packets = stats->n_packets - subfacet->dp_packet_count;
} else {
VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
+ diff.n_packets = 0;
}
if (stats->n_bytes >= subfacet->dp_byte_count) {
- facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
+ diff.n_bytes = stats->n_bytes - subfacet->dp_byte_count;
} else {
VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
+ diff.n_bytes = 0;
}
+ ofproto->n_hit += diff.n_packets;
subfacet->dp_packet_count = stats->n_packets;
subfacet->dp_byte_count = stats->n_bytes;
+ subfacet_update_stats(subfacet, &diff);
- facet->tcp_flags |= stats->tcp_flags;
-
- subfacet_update_time(subfacet, stats->used);
if (facet->accounted_bytes < facet->byte_count) {
facet_learn(facet);
facet_account(facet);
facet->accounted_bytes = facet->byte_count;
}
- facet_push_stats(facet);
}
/* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
* about, or a flow that shouldn't be installed but was anyway. Delete it. */
static void
-delete_unexpected_flow(struct dpif *dpif,
+delete_unexpected_flow(struct ofproto_dpif *ofproto,
const struct nlattr *key, size_t key_len)
{
if (!VLOG_DROP_WARN(&rl)) {
ds_init(&s);
odp_flow_key_format(key, key_len, &s);
- VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s));
+ VLOG_WARN("unexpected flow on %s: %s", ofproto->up.name, ds_cstr(&s));
ds_destroy(&s);
}
COVERAGE_INC(facet_unexpected);
- dpif_flow_del(dpif, key, key_len, NULL);
+ dpif_flow_del(ofproto->backer->dpif, key, key_len, NULL);
}
/* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
* avoided by calling update_stats() whenever rules are created or
* deleted. However, the performance impact of making so many calls to the
* datapath do not justify the benefit of having perfectly accurate statistics.
+ *
+ * In addition, this function maintains per ofproto flow hit counts. The patch
+ * port is not treated specially. e.g. A packet ingress from br0 patched into
+ * br1 will increase the hit count of br0 by 1, however, does not affect
+ * the hit or miss counts of br1.
*/
static void
-update_stats(struct ofproto_dpif *p)
+update_stats(struct dpif_backer *backer)
{
const struct dpif_flow_stats *stats;
struct dpif_flow_dump dump;
const struct nlattr *key;
+ struct ofproto_dpif *ofproto;
size_t key_len;
- dpif_flow_dump_start(&dump, p->dpif);
+ dpif_flow_dump_start(&dump, backer->dpif);
while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
+ struct flow flow;
struct subfacet *subfacet;
+ uint32_t key_hash;
+
+ if (ofproto_receive(backer, NULL, key, key_len, &flow, NULL, &ofproto,
+ NULL, NULL)) {
+ continue;
+ }
+
+ ofproto->total_subfacet_count += hmap_count(&ofproto->subfacets);
+ ofproto->n_update_stats++;
- subfacet = subfacet_find(p, key, key_len);
+ key_hash = odp_flow_key_hash(key, key_len);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
case SF_FAST_PATH:
update_subfacet_stats(subfacet, stats);
case SF_NOT_INSTALLED:
default:
- delete_unexpected_flow(p->dpif, key, key_len);
+ delete_unexpected_flow(ofproto, key, key_len);
break;
}
+ run_fast_rl();
}
dpif_flow_dump_done(&dump);
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ update_moving_averages(ofproto);
+ }
+
}
/* Calculates and returns the number of milliseconds of idle time after which
return bucket * BUCKET_WIDTH;
}
-enum { EXPIRE_MAX_BATCH = 50 };
-
-static void
-expire_batch(struct ofproto_dpif *ofproto, struct subfacet **subfacets, int n)
-{
- struct odputil_keybuf keybufs[EXPIRE_MAX_BATCH];
- struct dpif_op ops[EXPIRE_MAX_BATCH];
- struct dpif_op *opsp[EXPIRE_MAX_BATCH];
- struct ofpbuf keys[EXPIRE_MAX_BATCH];
- struct dpif_flow_stats stats[EXPIRE_MAX_BATCH];
- int i;
-
- for (i = 0; i < n; i++) {
- ops[i].type = DPIF_OP_FLOW_DEL;
- subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
- ops[i].u.flow_del.key = keys[i].data;
- ops[i].u.flow_del.key_len = keys[i].size;
- ops[i].u.flow_del.stats = &stats[i];
- opsp[i] = &ops[i];
- }
-
- dpif_operate(ofproto->dpif, opsp, n);
- for (i = 0; i < n; i++) {
- subfacet_reset_dp_stats(subfacets[i], &stats[i]);
- subfacets[i]->path = SF_NOT_INSTALLED;
- subfacet_destroy(subfacets[i]);
- }
-}
-
static void
expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
{
long long int special_cutoff = time_msec() - 10000;
struct subfacet *subfacet, *next_subfacet;
- struct subfacet *batch[EXPIRE_MAX_BATCH];
+ struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
int n_batch;
n_batch = 0;
&ofproto->subfacets) {
long long int cutoff;
- cutoff = (subfacet->slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)
+ cutoff = (subfacet->facet->xout.slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP
+ | SLOW_STP)
? special_cutoff
: normal_cutoff);
if (subfacet->used < cutoff) {
if (subfacet->path != SF_NOT_INSTALLED) {
batch[n_batch++] = subfacet;
- if (n_batch >= EXPIRE_MAX_BATCH) {
- expire_batch(ofproto, batch, n_batch);
+ if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
+ subfacet_destroy_batch(ofproto, batch, n_batch);
n_batch = 0;
}
} else {
}
if (n_batch > 0) {
- expire_batch(ofproto, batch, n_batch);
+ subfacet_destroy_batch(ofproto, batch, n_batch);
}
}
\f
/* Facets. */
-/* Creates and returns a new facet owned by 'rule', given a 'flow'.
+/* Creates and returns a new facet based on 'miss'.
*
* The caller must already have determined that no facet with an identical
- * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
- * the ofproto's classifier table.
+ * 'miss->flow' exists in 'miss->ofproto'.
*
- * 'hash' must be the return value of flow_hash(flow, 0).
+ * 'hash' must be the return value of flow_hash(miss->flow, 0).
*
* The facet will initially have no subfacets. The caller should create (at
* least) one subfacet with subfacet_create(). */
static struct facet *
-facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
+facet_create(const struct flow_miss *miss, uint32_t hash)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
+ struct ofproto_dpif *ofproto = miss->ofproto;
+ struct xlate_in xin;
struct facet *facet;
facet = xzalloc(sizeof *facet);
facet->used = time_msec();
+ facet->flow = miss->flow;
+ facet->initial_vals = miss->initial_vals;
+ facet->rule = rule_dpif_lookup(ofproto, &facet->flow);
+ facet->learn_rl = time_msec() + 500;
+
hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
- list_push_back(&rule->facets, &facet->list_node);
- facet->rule = rule;
- facet->flow = *flow;
+ list_push_back(&facet->rule->facets, &facet->list_node);
list_init(&facet->subfacets);
netflow_flow_init(&facet->nf_flow);
netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, 0, NULL);
+ xin.may_learn = true;
+ xlate_actions(&xin, &facet->xout);
+ facet->nf_flow.output_iface = facet->xout.nf_output_iface;
+
return facet;
}
static void
facet_free(struct facet *facet)
{
- free(facet);
+ if (facet) {
+ xlate_out_uninit(&facet->xout);
+ free(facet);
+ }
}
/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
- * 'packet', which arrived on 'in_port'.
- *
- * Takes ownership of 'packet'. */
+ * 'packet', which arrived on 'in_port'. */
static bool
execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
const struct nlattr *odp_actions, size_t actions_len,
int error;
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ odp_flow_key_from_flow(&key, flow,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
- error = dpif_execute(ofproto->dpif, key.data, key.size,
+ error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
odp_actions, actions_len, packet);
-
- ofpbuf_delete(packet);
return !error;
}
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
struct subfacet *subfacet, *next_subfacet;
- assert(!list_is_empty(&facet->subfacets));
+ ovs_assert(!list_is_empty(&facet->subfacets));
/* First uninstall all of the subfacets to get final statistics. */
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
static void
facet_learn(struct facet *facet)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- struct action_xlate_ctx ctx;
+ long long int now = time_msec();
+
+ if (!facet->xout.has_fin_timeout && now < facet->learn_rl) {
+ return;
+ }
- if (!facet->has_learn
- && !facet->has_normal
- && (!facet->has_fin_timeout
+ facet->learn_rl = now + 500;
+
+ if (!facet->xout.has_learn
+ && !facet->xout.has_normal
+ && (!facet->xout.has_fin_timeout
|| !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
return;
}
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- facet->flow.vlan_tci,
- facet->rule, facet->tcp_flags, NULL);
- ctx.may_learn = true;
- xlate_actions_for_side_effects(&ctx, facet->rule->up.ofpacts,
- facet->rule->up.ofpacts_len);
+ facet_push_stats(facet, true);
}
static void
facet_account(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- struct subfacet *subfacet;
const struct nlattr *a;
unsigned int left;
ovs_be16 vlan_tci;
uint64_t n_bytes;
- if (!facet->has_normal || !ofproto->has_bonded_bundles) {
+ if (!facet->xout.has_normal || !ofproto->has_bonded_bundles) {
return;
}
n_bytes = facet->byte_count - facet->accounted_bytes;
*
* We use the actions from an arbitrary subfacet because they should all
* be equally valid for our purpose. */
- subfacet = CONTAINER_OF(list_front(&facet->subfacets),
- struct subfacet, list_node);
vlan_tci = facet->flow.vlan_tci;
- NL_ATTR_FOR_EACH_UNSAFE (a, left,
- subfacet->actions, subfacet->actions_len) {
+ NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size) {
const struct ovs_action_push_vlan *vlan;
struct ofport_dpif *port;
const struct ofpact *ofpacts = rule->ofpacts;
size_t ofpacts_len = rule->ofpacts_len;
- if (ofpacts->type == OFPACT_CONTROLLER &&
+ if (ofpacts_len > 0 &&
+ ofpacts->type == OFPACT_CONTROLLER &&
ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len)) {
return true;
}
struct subfacet *subfacet;
LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- assert(!subfacet->dp_byte_count);
- assert(!subfacet->dp_packet_count);
+ ovs_assert(!subfacet->dp_byte_count);
+ ovs_assert(!subfacet->dp_packet_count);
}
- facet_push_stats(facet);
+ facet_push_stats(facet, false);
if (facet->accounted_bytes < facet->byte_count) {
facet_account(facet);
facet->accounted_bytes = facet->byte_count;
netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
}
- facet->rule->packet_count += facet->packet_count;
- facet->rule->byte_count += facet->byte_count;
-
/* Reset counters to prevent double counting if 'facet' ever gets
* reinstalled. */
facet_reset_counters(facet);
facet = facet_find(ofproto, flow, hash);
if (facet
- && (ofproto->need_revalidate
- || tag_set_intersects(&ofproto->revalidate_set, facet->tags))) {
- facet_revalidate(facet);
+ && (ofproto->backer->need_revalidate
+ || tag_set_intersects(&ofproto->backer->revalidate_set,
+ facet->xout.tags))
+ && !facet_revalidate(facet)) {
+ return NULL;
}
return facet;
}
-static const char *
-subfacet_path_to_string(enum subfacet_path path)
-{
- switch (path) {
- case SF_NOT_INSTALLED:
- return "not installed";
- case SF_FAST_PATH:
- return "in fast path";
- case SF_SLOW_PATH:
- return "in slow path";
- default:
- return "<error>";
- }
-}
-
-/* Returns the path in which a subfacet should be installed if its 'slow'
- * member has the specified value. */
-static enum subfacet_path
-subfacet_want_path(enum slow_path_reason slow)
-{
- return slow ? SF_SLOW_PATH : SF_FAST_PATH;
-}
-
-/* Returns true if 'subfacet' needs to have its datapath flow updated,
- * supposing that its actions have been recalculated as 'want_actions' and that
- * 'slow' is nonzero iff 'subfacet' should be in the slow path. */
-static bool
-subfacet_should_install(struct subfacet *subfacet, enum slow_path_reason slow,
- const struct ofpbuf *want_actions)
-{
- enum subfacet_path want_path = subfacet_want_path(slow);
- return (want_path != subfacet->path
- || (want_path == SF_FAST_PATH
- && (subfacet->actions_len != want_actions->size
- || memcmp(subfacet->actions, want_actions->data,
- subfacet->actions_len))));
-}
-
static bool
facet_check_consistency(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ struct xlate_out xout;
+ struct xlate_in xin;
struct rule_dpif *rule;
- struct subfacet *subfacet;
- bool may_log = false;
bool ok;
/* Check the rule for consistency. */
rule = rule_dpif_lookup(ofproto, &facet->flow);
- ok = rule == facet->rule;
- if (!ok) {
- may_log = !VLOG_DROP_WARN(&rl);
- if (may_log) {
- struct ds s;
+ if (rule != facet->rule) {
+ if (!VLOG_DROP_WARN(&rl)) {
+ struct ds s = DS_EMPTY_INITIALIZER;
- ds_init(&s);
flow_format(&s, &facet->flow);
ds_put_format(&s, ": facet associated with wrong rule (was "
"table=%"PRIu8",", facet->rule->up.table_id);
ds_put_format(&s, ") (should have been table=%"PRIu8",",
rule->up.table_id);
cls_rule_format(&rule->up.cr, &s);
- ds_put_char(&s, ')');
+ ds_put_cstr(&s, ")\n");
- VLOG_WARN("%s", ds_cstr(&s));
ds_destroy(&s);
}
+ return false;
}
/* Check the datapath actions for consistency. */
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- enum subfacet_path want_path;
- struct odputil_keybuf keybuf;
- struct action_xlate_ctx ctx;
- struct ofpbuf key;
- struct ds s;
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, rule,
+ 0, NULL);
+ xlate_actions(&xin, &xout);
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, rule, 0, NULL);
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len,
- &odp_actions);
-
- if (subfacet->path == SF_NOT_INSTALLED) {
- /* This only happens if the datapath reported an error when we
- * tried to install the flow. Don't flag another error here. */
- continue;
- }
+ ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
+ && facet->xout.slow == xout.slow;
+ if (!ok && !VLOG_DROP_WARN(&rl)) {
+ struct ds s = DS_EMPTY_INITIALIZER;
- want_path = subfacet_want_path(subfacet->slow);
- if (want_path == SF_SLOW_PATH && subfacet->path == SF_SLOW_PATH) {
- /* The actions for slow-path flows may legitimately vary from one
- * packet to the next. We're done. */
- continue;
- }
+ flow_format(&s, &facet->flow);
+ ds_put_cstr(&s, ": inconsistency in facet");
- if (!subfacet_should_install(subfacet, subfacet->slow, &odp_actions)) {
- continue;
+ if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
+ ds_put_cstr(&s, " (actions were: ");
+ format_odp_actions(&s, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size);
+ ds_put_cstr(&s, ") (correct actions: ");
+ format_odp_actions(&s, xout.odp_actions.data,
+ xout.odp_actions.size);
+ ds_put_cstr(&s, ")");
}
- /* Inconsistency! */
- if (ok) {
- may_log = !VLOG_DROP_WARN(&rl);
- ok = false;
- }
- if (!may_log) {
- /* Rate-limited, skip reporting. */
- continue;
+ if (facet->xout.slow != xout.slow) {
+ ds_put_format(&s, " slow path incorrect. should be %d", xout.slow);
}
- ds_init(&s);
- subfacet_get_key(subfacet, &keybuf, &key);
- odp_flow_key_format(key.data, key.size, &s);
-
- ds_put_cstr(&s, ": inconsistency in subfacet");
- if (want_path != subfacet->path) {
- enum odp_key_fitness fitness = subfacet->key_fitness;
-
- ds_put_format(&s, " (%s, fitness=%s)",
- subfacet_path_to_string(subfacet->path),
- odp_key_fitness_to_string(fitness));
- ds_put_format(&s, " (should have been %s)",
- subfacet_path_to_string(want_path));
- } else if (want_path == SF_FAST_PATH) {
- ds_put_cstr(&s, " (actions were: ");
- format_odp_actions(&s, subfacet->actions,
- subfacet->actions_len);
- ds_put_cstr(&s, ") (correct actions: ");
- format_odp_actions(&s, odp_actions.data, odp_actions.size);
- ds_put_char(&s, ')');
- } else {
- ds_put_cstr(&s, " (actions: ");
- format_odp_actions(&s, subfacet->actions,
- subfacet->actions_len);
- ds_put_char(&s, ')');
- }
- VLOG_WARN("%s", ds_cstr(&s));
ds_destroy(&s);
}
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
return ok;
}
* 'facet' to the new rule and recompiles its actions.
*
* - If the rule found is the same as 'facet''s current rule, leaves 'facet'
- * where it is and recompiles its actions anyway. */
-static void
+ * where it is and recompiles its actions anyway.
+ *
+ * - If any of 'facet''s subfacets correspond to a new flow according to
+ * ofproto_receive(), 'facet' is removed.
+ *
+ * Returns true if 'facet' is still valid. False if 'facet' was removed. */
+static bool
facet_revalidate(struct facet *facet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- struct actions {
- struct nlattr *odp_actions;
- size_t actions_len;
- };
- struct actions *new_actions;
-
- struct action_xlate_ctx ctx;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
-
struct rule_dpif *new_rule;
struct subfacet *subfacet;
- int i;
+ struct xlate_out xout;
+ struct xlate_in xin;
COVERAGE_INC(facet_revalidate);
+ /* Check that child subfacets still correspond to this facet. Tunnel
+ * configuration changes could cause a subfacet's OpenFlow in_port to
+ * change. */
+ LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
+ struct ofproto_dpif *recv_ofproto;
+ struct flow recv_flow;
+ int error;
+
+ error = ofproto_receive(ofproto->backer, NULL, subfacet->key,
+ subfacet->key_len, &recv_flow, NULL,
+ &recv_ofproto, NULL, NULL);
+ if (error
+ || recv_ofproto != ofproto
+ || memcmp(&recv_flow, &facet->flow, sizeof recv_flow)) {
+ facet_remove(facet);
+ return false;
+ }
+ }
+
new_rule = rule_dpif_lookup(ofproto, &facet->flow);
/* Calculate new datapath actions.
* We do not modify any 'facet' state yet, because we might need to, e.g.,
* emit a NetFlow expiration and, if so, we need to have the old state
* around to properly compose it. */
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals, new_rule,
+ 0, NULL);
+ xlate_actions(&xin, &xout);
+
+ /* A facet's slow path reason should only change under dramatic
+ * circumstances. Rather than try to update everything, it's simpler to
+ * remove the facet and start over. */
+ if (facet->xout.slow != xout.slow) {
+ facet_remove(facet);
+ xlate_out_uninit(&xout);
+ return false;
+ }
- /* If the datapath actions changed or the installability changed,
- * then we need to talk to the datapath. */
- i = 0;
- new_actions = NULL;
- memset(&ctx, 0, sizeof ctx);
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- enum slow_path_reason slow;
-
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
- subfacet->initial_tci, new_rule, 0, NULL);
- xlate_actions(&ctx, new_rule->up.ofpacts, new_rule->up.ofpacts_len,
- &odp_actions);
-
- slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
- if (subfacet_should_install(subfacet, slow, &odp_actions)) {
- struct dpif_flow_stats stats;
-
- subfacet_install(subfacet,
- odp_actions.data, odp_actions.size, &stats, slow);
- subfacet_update_stats(subfacet, &stats);
+ if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
+ LIST_FOR_EACH(subfacet, list_node, &facet->subfacets) {
+ if (subfacet->path == SF_FAST_PATH) {
+ struct dpif_flow_stats stats;
- if (!new_actions) {
- new_actions = xcalloc(list_size(&facet->subfacets),
- sizeof *new_actions);
+ subfacet_install(subfacet, &xout.odp_actions, &stats);
+ subfacet_update_stats(subfacet, &stats);
}
- new_actions[i].odp_actions = xmemdup(odp_actions.data,
- odp_actions.size);
- new_actions[i].actions_len = odp_actions.size;
}
- i++;
- }
- ofpbuf_uninit(&odp_actions);
-
- if (new_actions) {
facet_flush_stats(facet);
+
+ ofpbuf_clear(&facet->xout.odp_actions);
+ ofpbuf_put(&facet->xout.odp_actions, xout.odp_actions.data,
+ xout.odp_actions.size);
}
/* Update 'facet' now that we've taken care of all the old state. */
- facet->tags = ctx.tags;
- facet->nf_flow.output_iface = ctx.nf_output_iface;
- facet->has_learn = ctx.has_learn;
- facet->has_normal = ctx.has_normal;
- facet->has_fin_timeout = ctx.has_fin_timeout;
- facet->mirrors = ctx.mirrors;
-
- i = 0;
- LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
- subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
-
- if (new_actions && new_actions[i].odp_actions) {
- free(subfacet->actions);
- subfacet->actions = new_actions[i].odp_actions;
- subfacet->actions_len = new_actions[i].actions_len;
- }
- i++;
- }
- free(new_actions);
+ facet->xout.tags = xout.tags;
+ facet->xout.slow = xout.slow;
+ facet->xout.has_learn = xout.has_learn;
+ facet->xout.has_normal = xout.has_normal;
+ facet->xout.has_fin_timeout = xout.has_fin_timeout;
+ facet->xout.nf_output_iface = xout.nf_output_iface;
+ facet->xout.mirrors = xout.mirrors;
+ facet->nf_flow.output_iface = facet->xout.nf_output_iface;
if (facet->rule != new_rule) {
COVERAGE_INC(facet_changed_rule);
facet->used = new_rule->up.created;
facet->prev_used = facet->used;
}
-}
-/* Updates 'facet''s used time. Caller is responsible for calling
- * facet_push_stats() to update the flows which 'facet' resubmits into. */
-static void
-facet_update_time(struct facet *facet, long long int used)
-{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- if (used > facet->used) {
- facet->used = used;
- ofproto_rule_update_used(&facet->rule->up, used);
- netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
- }
+ xlate_out_uninit(&xout);
+ return true;
}
static void
}
static void
-facet_push_stats(struct facet *facet)
+facet_push_stats(struct facet *facet, bool may_learn)
{
struct dpif_flow_stats stats;
- assert(facet->packet_count >= facet->prev_packet_count);
- assert(facet->byte_count >= facet->prev_byte_count);
- assert(facet->used >= facet->prev_used);
+ ovs_assert(facet->packet_count >= facet->prev_packet_count);
+ ovs_assert(facet->byte_count >= facet->prev_byte_count);
+ ovs_assert(facet->used >= facet->prev_used);
stats.n_packets = facet->packet_count - facet->prev_packet_count;
stats.n_bytes = facet->byte_count - facet->prev_byte_count;
stats.used = facet->used;
- stats.tcp_flags = 0;
+ stats.tcp_flags = facet->tcp_flags;
+
+ if (may_learn || stats.n_packets || facet->used > facet->prev_used) {
+ struct ofproto_dpif *ofproto =
+ ofproto_dpif_cast(facet->rule->up.ofproto);
+
+ struct ofport_dpif *in_port;
+ struct xlate_in xin;
- if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
facet->prev_packet_count = facet->packet_count;
facet->prev_byte_count = facet->byte_count;
facet->prev_used = facet->used;
- flow_push_stats(facet->rule, &facet->flow, &stats);
+ in_port = get_ofp_port(ofproto, facet->flow.in_port);
+ if (in_port && in_port->tnl_port) {
+ netdev_vport_inc_rx(in_port->up.netdev, &stats);
+ }
+
+ rule_credit_stats(facet->rule, &stats);
+ netflow_flow_update_time(ofproto->netflow, &facet->nf_flow,
+ facet->used);
+ netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
+ update_mirror_stats(ofproto, facet->xout.mirrors, stats.n_packets,
+ stats.n_bytes);
- update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
- facet->mirrors, stats.n_packets, stats.n_bytes);
+ xlate_in_init(&xin, ofproto, &facet->flow, &facet->initial_vals,
+ facet->rule, stats.tcp_flags, NULL);
+ xin.resubmit_stats = &stats;
+ xin.may_learn = may_learn;
+ xlate_actions_for_side_effects(&xin);
}
}
static void
-rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
+push_all_stats__(bool run_fast)
{
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- ofproto_rule_update_used(&rule->up, stats->used);
+ static long long int rl = LLONG_MIN;
+ struct ofproto_dpif *ofproto;
+
+ if (time_msec() < rl) {
+ return;
+ }
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ struct facet *facet;
+
+ HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
+ facet_push_stats(facet, false);
+ if (run_fast) {
+ run_fast_rl();
+ }
+ }
+ }
+
+ rl = time_msec() + 100;
}
-/* Pushes flow statistics to the rules which 'flow' resubmits into given
- * 'rule''s actions and mirrors. */
static void
-flow_push_stats(struct rule_dpif *rule,
- const struct flow *flow, const struct dpif_flow_stats *stats)
+push_all_stats(void)
{
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct action_xlate_ctx ctx;
+ push_all_stats__(true);
+}
+static void
+rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
+{
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
ofproto_rule_update_used(&rule->up, stats->used);
-
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
- 0, NULL);
- ctx.resubmit_stats = stats;
- xlate_actions_for_side_effects(&ctx, rule->up.ofpacts,
- rule->up.ofpacts_len);
}
\f
/* Subfacets. */
static struct subfacet *
-subfacet_find__(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len, uint32_t key_hash,
- const struct flow *flow)
+subfacet_find(struct ofproto_dpif *ofproto,
+ const struct nlattr *key, size_t key_len, uint32_t key_hash)
{
struct subfacet *subfacet;
HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
&ofproto->subfacets) {
- if (subfacet->key
- ? (subfacet->key_len == key_len
- && !memcmp(key, subfacet->key, key_len))
- : flow_equal(flow, &subfacet->facet->flow)) {
+ if (subfacet->key_len == key_len
+ && !memcmp(key, subfacet->key, key_len)) {
return subfacet;
}
}
}
/* Searches 'facet' (within 'ofproto') for a subfacet with the specified
- * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
- * there is one, otherwise creates and returns a new subfacet.
- *
- * If the returned subfacet is new, then subfacet->actions will be NULL, in
- * which case the caller must populate the actions with
- * subfacet_make_actions(). */
+ * 'key_fitness', 'key', and 'key_len' members in 'miss'. Returns the
+ * existing subfacet if there is one, otherwise creates and returns a
+ * new subfacet. */
static struct subfacet *
-subfacet_create(struct facet *facet, enum odp_key_fitness key_fitness,
- const struct nlattr *key, size_t key_len, ovs_be16 initial_tci)
+subfacet_create(struct facet *facet, struct flow_miss *miss,
+ long long int now)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- uint32_t key_hash = odp_flow_key_hash(key, key_len);
+ enum odp_key_fitness key_fitness = miss->key_fitness;
+ const struct nlattr *key = miss->key;
+ size_t key_len = miss->key_len;
+ uint32_t key_hash;
struct subfacet *subfacet;
+ key_hash = odp_flow_key_hash(key, key_len);
+
if (list_is_empty(&facet->subfacets)) {
subfacet = &facet->one_subfacet;
-
- /* This subfacet should conceptually be created, and have its first
- * packet pass through, at the same time that its facet was created.
- * If we called time_msec() here, then the subfacet could look
- * (occasionally) as though it was used some time after the facet was
- * used. That can make a one-packet flow look like it has a nonzero
- * duration, which looks odd in e.g. NetFlow statistics. */
- subfacet->used = facet->used;
} else {
- subfacet = subfacet_find__(ofproto, key, key_len, key_hash,
- &facet->flow);
+ subfacet = subfacet_find(ofproto, key, key_len, key_hash);
if (subfacet) {
if (subfacet->facet == facet) {
return subfacet;
}
subfacet = xmalloc(sizeof *subfacet);
- subfacet->used = time_msec();
}
hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
list_push_back(&facet->subfacets, &subfacet->list_node);
subfacet->facet = facet;
subfacet->key_fitness = key_fitness;
- if (key_fitness != ODP_FIT_PERFECT) {
- subfacet->key = xmemdup(key, key_len);
- subfacet->key_len = key_len;
- } else {
- subfacet->key = NULL;
- subfacet->key_len = 0;
- }
+ subfacet->key = xmemdup(key, key_len);
+ subfacet->key_len = key_len;
+ subfacet->used = now;
+ subfacet->created = now;
subfacet->dp_packet_count = 0;
subfacet->dp_byte_count = 0;
- subfacet->actions_len = 0;
- subfacet->actions = NULL;
- subfacet->slow = (subfacet->key_fitness == ODP_FIT_TOO_LITTLE
- ? SLOW_MATCH
- : 0);
subfacet->path = SF_NOT_INSTALLED;
- subfacet->initial_tci = initial_tci;
+ ofproto->subfacet_add_count++;
return subfacet;
}
-/* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
- * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
-static struct subfacet *
-subfacet_find(struct ofproto_dpif *ofproto,
- const struct nlattr *key, size_t key_len)
-{
- uint32_t key_hash = odp_flow_key_hash(key, key_len);
- enum odp_key_fitness fitness;
- struct flow flow;
-
- fitness = odp_flow_key_to_flow(key, key_len, &flow);
- if (fitness == ODP_FIT_ERROR) {
- return NULL;
- }
-
- return subfacet_find__(ofproto, key, key_len, key_hash, &flow);
-}
-
/* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
* its facet within 'ofproto', and frees it. */
static void
struct facet *facet = subfacet->facet;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
+ /* Update ofproto stats before uninstall the subfacet. */
+ ofproto->subfacet_del_count++;
+ ofproto->total_subfacet_life_span += (time_msec() - subfacet->created);
+
subfacet_uninstall(subfacet);
hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
list_remove(&subfacet->list_node);
free(subfacet->key);
- free(subfacet->actions);
if (subfacet != &facet->one_subfacet) {
free(subfacet);
}
}
}
-/* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
- * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
- * for use as temporary storage. */
-static void
-subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
- struct ofpbuf *key)
-{
- if (!subfacet->key) {
- ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
- odp_flow_key_from_flow(key, &subfacet->facet->flow);
- } else {
- ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
- }
-}
-
-/* Composes the datapath actions for 'subfacet' based on its rule's actions.
- * Translates the actions into 'odp_actions', which the caller must have
- * initialized and is responsible for uninitializing. */
static void
-subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet,
- struct ofpbuf *odp_actions)
+subfacet_destroy_batch(struct ofproto_dpif *ofproto,
+ struct subfacet **subfacets, int n)
{
- struct facet *facet = subfacet->facet;
- struct rule_dpif *rule = facet->rule;
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
- struct action_xlate_ctx ctx;
+ struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
+ struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
+ struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
+ int i;
- action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
- rule, 0, packet);
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, odp_actions);
- facet->tags = ctx.tags;
- facet->has_learn = ctx.has_learn;
- facet->has_normal = ctx.has_normal;
- facet->has_fin_timeout = ctx.has_fin_timeout;
- facet->nf_flow.output_iface = ctx.nf_output_iface;
- facet->mirrors = ctx.mirrors;
+ for (i = 0; i < n; i++) {
+ ops[i].type = DPIF_OP_FLOW_DEL;
+ ops[i].u.flow_del.key = subfacets[i]->key;
+ ops[i].u.flow_del.key_len = subfacets[i]->key_len;
+ ops[i].u.flow_del.stats = &stats[i];
+ opsp[i] = &ops[i];
+ }
- subfacet->slow = (subfacet->slow & SLOW_MATCH) | ctx.slow;
- if (subfacet->actions_len != odp_actions->size
- || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
- free(subfacet->actions);
- subfacet->actions_len = odp_actions->size;
- subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
+ dpif_operate(ofproto->backer->dpif, opsp, n);
+ for (i = 0; i < n; i++) {
+ subfacet_reset_dp_stats(subfacets[i], &stats[i]);
+ subfacets[i]->path = SF_NOT_INSTALLED;
+ subfacet_destroy(subfacets[i]);
+ run_fast_rl();
}
}
*
* Returns 0 if successful, otherwise a positive errno value. */
static int
-subfacet_install(struct subfacet *subfacet,
- const struct nlattr *actions, size_t actions_len,
- struct dpif_flow_stats *stats,
- enum slow_path_reason slow)
+subfacet_install(struct subfacet *subfacet, const struct ofpbuf *odp_actions,
+ struct dpif_flow_stats *stats)
{
struct facet *facet = subfacet->facet;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
- enum subfacet_path path = subfacet_want_path(slow);
+ enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
+ const struct nlattr *actions = odp_actions->data;
+ size_t actions_len = odp_actions->size;
+
uint64_t slow_path_stub[128 / 8];
- struct odputil_keybuf keybuf;
enum dpif_flow_put_flags flags;
- struct ofpbuf key;
int ret;
flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
}
if (path == SF_SLOW_PATH) {
- compose_slow_path(ofproto, &facet->flow, slow,
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
slow_path_stub, sizeof slow_path_stub,
&actions, &actions_len);
}
- subfacet_get_key(subfacet, &keybuf, &key);
- ret = dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
- actions, actions_len, stats);
+ ret = dpif_flow_put(ofproto->backer->dpif, flags, subfacet->key,
+ subfacet->key_len, actions, actions_len, stats);
if (stats) {
subfacet_reset_dp_stats(subfacet, stats);
return ret;
}
-static int
-subfacet_reinstall(struct subfacet *subfacet, struct dpif_flow_stats *stats)
-{
- return subfacet_install(subfacet, subfacet->actions, subfacet->actions_len,
- stats, subfacet->slow);
-}
-
/* If 'subfacet' is installed in the datapath, uninstalls it. */
static void
subfacet_uninstall(struct subfacet *subfacet)
if (subfacet->path != SF_NOT_INSTALLED) {
struct rule_dpif *rule = subfacet->facet->rule;
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct odputil_keybuf keybuf;
struct dpif_flow_stats stats;
- struct ofpbuf key;
int error;
- subfacet_get_key(subfacet, &keybuf, &key);
- error = dpif_flow_del(ofproto->dpif, key.data, key.size, &stats);
+ error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
+ subfacet->key_len, &stats);
subfacet_reset_dp_stats(subfacet, &stats);
if (!error) {
subfacet_update_stats(subfacet, &stats);
}
subfacet->path = SF_NOT_INSTALLED;
} else {
- assert(subfacet->dp_packet_count == 0);
- assert(subfacet->dp_byte_count == 0);
+ ovs_assert(subfacet->dp_packet_count == 0);
+ ovs_assert(subfacet->dp_byte_count == 0);
}
}
subfacet->dp_byte_count = 0;
}
-/* Updates 'subfacet''s used time. The caller is responsible for calling
- * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
-static void
-subfacet_update_time(struct subfacet *subfacet, long long int used)
-{
- if (used > subfacet->used) {
- subfacet->used = used;
- facet_update_time(subfacet->facet, used);
- }
-}
-
/* Folds the statistics from 'stats' into the counters in 'subfacet'.
*
* Because of the meaning of a subfacet's counters, it only makes sense to do
if (stats->n_packets || stats->used > subfacet->used) {
struct facet *facet = subfacet->facet;
- subfacet_update_time(subfacet, stats->used);
+ subfacet->used = MAX(subfacet->used, stats->used);
+ facet->used = MAX(facet->used, stats->used);
facet->packet_count += stats->n_packets;
facet->byte_count += stats->n_bytes;
facet->tcp_flags |= stats->tcp_flags;
- facet_push_stats(facet);
- netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
}
}
\f
static struct rule_dpif *
rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow)
{
- struct ofport_dpif *port;
struct rule_dpif *rule;
rule = rule_dpif_lookup__(ofproto, flow, 0);
return rule;
}
- port = get_ofp_port(ofproto, flow->in_port);
- if (!port) {
- VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
- return ofproto->miss_rule;
- }
-
- if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
- return ofproto->no_packet_in_rule;
- }
- return ofproto->miss_rule;
+ return rule_dpif_miss_rule(ofproto, flow);
}
static struct rule_dpif *
return rule_dpif_cast(rule_from_cls_rule(cls_rule));
}
+static struct rule_dpif *
+rule_dpif_miss_rule(struct ofproto_dpif *ofproto, const struct flow *flow)
+{
+ struct ofport_dpif *port;
+
+ port = get_ofp_port(ofproto, flow->in_port);
+ if (!port) {
+ VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, flow->in_port);
+ return ofproto->miss_rule;
+ }
+
+ if (port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
+ return ofproto->no_packet_in_rule;
+ }
+ return ofproto->miss_rule;
+}
+
static void
complete_operation(struct rule_dpif *rule)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
struct rule_dpif *victim;
uint8_t table_id;
- enum ofperr error;
-
- error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len,
- &rule->up.cr.flow, ofproto->max_ports);
- if (error) {
- return error;
- }
rule->packet_count = 0;
rule->byte_count = 0;
}
table_id = rule->up.table_id;
- rule->tag = (victim ? victim->tag
- : table_id == 0 ? 0
- : rule_calculate_tag(&rule->up.cr.flow, &rule->up.cr.wc,
- ofproto->tables[table_id].basis));
+ if (victim) {
+ rule->tag = victim->tag;
+ } else if (table_id == 0) {
+ rule->tag = 0;
+ } else {
+ struct flow flow;
+
+ miniflow_expand(&rule->up.cr.match.flow, &flow);
+ rule->tag = rule_calculate_tag(&flow, &rule->up.cr.match.mask,
+ ofproto->tables[table_id].basis);
+ }
complete_operation(rule);
return 0;
rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- struct facet *facet;
+
+ /* push_all_stats() can handle flow misses which, when using the learn
+ * action, can cause rules to be added and deleted. This can corrupt our
+ * caller's datastructures which assume that rule_get_stats() doesn't have
+ * an impact on the flow table. To be safe, we disable miss handling. */
+ push_all_stats__(false);
/* Start from historical data for 'rule' itself that are no longer tracked
* in facets. This counts, for example, facets that have expired. */
*packets = rule->packet_count;
- *bytes = rule->byte_count;
-
- /* Add any statistics that are tracked by facets. This includes
- * statistical data recently updated by ofproto_update_stats() as well as
- * stats for packets that were executed "by hand" via dpif_execute(). */
- LIST_FOR_EACH (facet, list_node, &rule->facets) {
- *packets += facet->packet_count;
- *bytes += facet->byte_count;
- }
+ *bytes = rule->byte_count;
}
-static enum ofperr
-rule_execute(struct rule *rule_, const struct flow *flow,
- struct ofpbuf *packet)
+static void
+rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
+ struct ofpbuf *packet)
{
- struct rule_dpif *rule = rule_dpif_cast(rule_);
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
-
+ struct initial_vals initial_vals;
struct dpif_flow_stats stats;
+ struct xlate_out xout;
+ struct xlate_in xin;
- struct action_xlate_ctx ctx;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
-
- dpif_flow_stats_extract(flow, packet, &stats);
+ dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
rule_credit_stats(rule, &stats);
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
- rule, stats.tcp_flags, packet);
- ctx.resubmit_stats = &stats;
- xlate_actions(&ctx, rule->up.ofpacts, rule->up.ofpacts_len, &odp_actions);
+ initial_vals.vlan_tci = flow->vlan_tci;
+ xlate_in_init(&xin, ofproto, flow, &initial_vals, rule, stats.tcp_flags,
+ packet);
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
- execute_odp_actions(ofproto, flow, odp_actions.data,
- odp_actions.size, packet);
+ execute_odp_actions(ofproto, flow, xout.odp_actions.data,
+ xout.odp_actions.size, packet);
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
+}
+static enum ofperr
+rule_execute(struct rule *rule, const struct flow *flow,
+ struct ofpbuf *packet)
+{
+ rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
+ ofpbuf_delete(packet);
return 0;
}
rule_modify_actions(struct rule *rule_)
{
struct rule_dpif *rule = rule_dpif_cast(rule_);
- struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- enum ofperr error;
-
- error = ofpacts_check(rule->up.ofpacts, rule->up.ofpacts_len,
- &rule->up.cr.flow, ofproto->max_ports);
- if (error) {
- ofoperation_complete(rule->up.pending, error);
- return;
- }
complete_operation(rule);
}
static int
send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
{
- const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+ uint64_t odp_actions_stub[1024 / 8];
struct ofpbuf key, odp_actions;
+ struct dpif_flow_stats stats;
struct odputil_keybuf keybuf;
- uint16_t odp_port;
+ struct ofpact_output output;
+ struct xlate_out xout;
+ struct xlate_in xin;
struct flow flow;
int error;
- flow_extract(packet, 0, 0, 0, &flow);
- odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
- flow.vlan_tci);
- if (odp_port != ofport->odp_port) {
- eth_pop_vlan(packet);
- flow.vlan_tci = htons(0);
- }
-
+ ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, &flow);
- ofpbuf_init(&odp_actions, 32);
- compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
+ /* Use OFPP_NONE as the in_port to avoid special packet processing. */
+ flow_extract(packet, 0, 0, NULL, OFPP_NONE, &flow);
+ odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(ofproto,
+ OFPP_LOCAL));
+ dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
+
+ ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
+ output.port = ofport->up.ofp_port;
+ output.max_len = 0;
- nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
- error = dpif_execute(ofproto->dpif,
+ xlate_in_init(&xin, ofproto, &flow, NULL, NULL, 0, packet);
+ xin.ofpacts_len = sizeof output;
+ xin.ofpacts = &output.ofpact;
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
+
+ error = dpif_execute(ofproto->backer->dpif,
key.data, key.size,
- odp_actions.data, odp_actions.size,
+ xout.odp_actions.data, xout.odp_actions.size,
packet);
- ofpbuf_uninit(&odp_actions);
+ xlate_out_uninit(&xout);
if (error) {
- VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
- ofproto->up.name, odp_port, strerror(error));
+ VLOG_WARN_RL(&rl, "%s: failed to send packet on port %s (%s)",
+ ofproto->up.name, netdev_get_name(ofport->up.netdev),
+ strerror(error));
}
- ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
+
+ ofproto->stats.tx_packets++;
+ ofproto->stats.tx_bytes += packet->size;
return error;
}
\f
/* OpenFlow to datapath action translation. */
+static bool may_receive(const struct ofport_dpif *, struct xlate_ctx *);
static void do_xlate_actions(const struct ofpact *, size_t ofpacts_len,
- struct action_xlate_ctx *);
-static void xlate_normal(struct action_xlate_ctx *);
+ struct xlate_ctx *);
+static void xlate_normal(struct xlate_ctx *);
/* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
* The action will state 'slow' as the reason that the action is in the slow
cookie.slow_path.reason = slow;
ofpbuf_use_stack(&buf, stub, stub_size);
- if (slow & (SLOW_CFM | SLOW_LACP | SLOW_STP)) {
- uint32_t pid = dpif_port_get_pid(ofproto->dpif, UINT16_MAX);
- odp_put_userspace_action(pid, &cookie, &buf);
+ if (slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)) {
+ uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif, UINT32_MAX);
+ odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf);
} else {
- put_userspace_action(ofproto, &buf, flow, &cookie);
+ put_userspace_action(ofproto, &buf, flow, &cookie,
+ sizeof cookie.slow_path);
}
*actionsp = buf.data;
*actions_lenp = buf.size;
put_userspace_action(const struct ofproto_dpif *ofproto,
struct ofpbuf *odp_actions,
const struct flow *flow,
- const union user_action_cookie *cookie)
+ const union user_action_cookie *cookie,
+ const size_t cookie_size)
{
uint32_t pid;
- pid = dpif_port_get_pid(ofproto->dpif,
- ofp_port_to_odp_port(flow->in_port));
+ pid = dpif_port_get_pid(ofproto->backer->dpif,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
+
+ return odp_put_userspace_action(pid, cookie, cookie_size, odp_actions);
+}
+
+/* Compose SAMPLE action for sFlow or IPFIX. The given probability is
+ * the number of packets out of UINT32_MAX to sample. The given
+ * cookie is passed back in the callback for each sampled packet.
+ */
+static size_t
+compose_sample_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow,
+ const uint32_t probability,
+ const union user_action_cookie *cookie,
+ const size_t cookie_size)
+{
+ size_t sample_offset, actions_offset;
+ int cookie_offset;
+
+ sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
+
+ nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
+
+ actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
+ cookie_offset = put_userspace_action(ofproto, odp_actions, flow, cookie,
+ cookie_size);
- return odp_put_userspace_action(pid, cookie, odp_actions);
+ nl_msg_end_nested(odp_actions, actions_offset);
+ nl_msg_end_nested(odp_actions, sample_offset);
+ return cookie_offset;
}
static void
}
}
-/* Compose SAMPLE action for sFlow. */
+/* Compose SAMPLE action for sFlow bridge sampling. */
static size_t
compose_sflow_action(const struct ofproto_dpif *ofproto,
struct ofpbuf *odp_actions,
{
uint32_t probability;
union user_action_cookie cookie;
- size_t sample_offset, actions_offset;
- int cookie_offset;
if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
return 0;
}
- sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
-
- /* Number of packets out of UINT_MAX to sample. */
probability = dpif_sflow_get_probability(ofproto->sflow);
- nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
-
- actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
compose_sflow_cookie(ofproto, htons(0), odp_port,
odp_port == OVSP_NONE ? 0 : 1, &cookie);
- cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
- nl_msg_end_nested(odp_actions, actions_offset);
- nl_msg_end_nested(odp_actions, sample_offset);
- return cookie_offset;
+ return compose_sample_action(ofproto, odp_actions, flow, probability,
+ &cookie, sizeof cookie.sflow);
+}
+
+static void
+compose_flow_sample_cookie(uint16_t probability, uint32_t collector_set_id,
+ uint32_t obs_domain_id, uint32_t obs_point_id,
+ union user_action_cookie *cookie)
+{
+ cookie->type = USER_ACTION_COOKIE_FLOW_SAMPLE;
+ cookie->flow_sample.probability = probability;
+ cookie->flow_sample.collector_set_id = collector_set_id;
+ cookie->flow_sample.obs_domain_id = obs_domain_id;
+ cookie->flow_sample.obs_point_id = obs_point_id;
+}
+
+static void
+compose_ipfix_cookie(union user_action_cookie *cookie)
+{
+ cookie->type = USER_ACTION_COOKIE_IPFIX;
+}
+
+/* Compose SAMPLE action for IPFIX bridge sampling. */
+static void
+compose_ipfix_action(const struct ofproto_dpif *ofproto,
+ struct ofpbuf *odp_actions,
+ const struct flow *flow)
+{
+ uint32_t probability;
+ union user_action_cookie cookie;
+
+ if (!ofproto->ipfix || flow->in_port == OFPP_NONE) {
+ return;
+ }
+
+ probability = dpif_ipfix_get_bridge_exporter_probability(ofproto->ipfix);
+ compose_ipfix_cookie(&cookie);
+
+ compose_sample_action(ofproto, odp_actions, flow, probability,
+ &cookie, sizeof cookie.ipfix);
}
-/* SAMPLE action must be first action in any given list of actions.
- * At this point we do not have all information required to build it. So try to
- * build sample action as complete as possible. */
+/* SAMPLE action for sFlow must be first action in any given list of
+ * actions. At this point we do not have all information required to
+ * build it. So try to build sample action as complete as possible. */
static void
-add_sflow_action(struct action_xlate_ctx *ctx)
+add_sflow_action(struct xlate_ctx *ctx)
{
ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
- ctx->odp_actions,
- &ctx->flow, OVSP_NONE);
+ &ctx->xout->odp_actions,
+ &ctx->xin->flow, OVSP_NONE);
ctx->sflow_odp_port = 0;
ctx->sflow_n_outputs = 0;
}
+/* SAMPLE action for IPFIX must be 1st or 2nd action in any given list
+ * of actions, eventually after the SAMPLE action for sFlow. */
+static void
+add_ipfix_action(struct xlate_ctx *ctx)
+{
+ compose_ipfix_action(ctx->ofproto, &ctx->xout->odp_actions,
+ &ctx->xin->flow);
+}
+
/* Fix SAMPLE action according to data collected while composing ODP actions.
* We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
* USERSPACE action's user-cookie which is required for sflow. */
static void
-fix_sflow_action(struct action_xlate_ctx *ctx)
+fix_sflow_action(struct xlate_ctx *ctx)
{
const struct flow *base = &ctx->base_flow;
union user_action_cookie *cookie;
return;
}
- cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
- sizeof(*cookie));
- assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
+ cookie = ofpbuf_at(&ctx->xout->odp_actions, ctx->user_cookie_offset,
+ sizeof cookie->sflow);
+ ovs_assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
compose_sflow_cookie(ctx->ofproto, base->vlan_tci,
ctx->sflow_odp_port, ctx->sflow_n_outputs, cookie);
}
static void
-compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
+compose_output_action__(struct xlate_ctx *ctx, uint16_t ofp_port,
bool check_stp)
{
const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
- uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
- ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
- uint8_t flow_nw_tos = ctx->flow.nw_tos;
- uint16_t out_port;
+ ovs_be16 flow_vlan_tci;
+ uint32_t flow_skb_mark;
+ uint8_t flow_nw_tos;
+ struct priority_to_dscp *pdscp;
+ uint32_t out_port, odp_port;
- if (ofport) {
- struct priority_to_dscp *pdscp;
+ /* If 'struct flow' gets additional metadata, we'll need to zero it out
+ * before traversing a patch port. */
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+
+ if (!ofport) {
+ xlate_report(ctx, "Nonexistent output port");
+ return;
+ } else if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD) {
+ xlate_report(ctx, "OFPPC_NO_FWD set, skipping output");
+ return;
+ } else if (check_stp && !stp_forward_in_state(ofport->stp_state)) {
+ xlate_report(ctx, "STP not in forwarding state, skipping output");
+ return;
+ }
+
+ if (netdev_vport_is_patch(ofport->up.netdev)) {
+ struct ofport_dpif *peer = ofport_get_peer(ofport);
+ struct flow old_flow = ctx->xin->flow;
+ const struct ofproto_dpif *peer_ofproto;
+ enum slow_path_reason special;
+ struct ofport_dpif *in_port;
- if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD
- || (check_stp && !stp_forward_in_state(ofport->stp_state))) {
+ if (!peer) {
+ xlate_report(ctx, "Nonexistent patch port peer");
return;
}
- pdscp = get_priority(ofport, ctx->flow.skb_priority);
- if (pdscp) {
- ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= pdscp->dscp;
+ peer_ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ if (peer_ofproto->backer != ctx->ofproto->backer) {
+ xlate_report(ctx, "Patch port peer on a different datapath");
+ return;
}
- } else {
- /* We may not have an ofport record for this port, but it doesn't hurt
- * to allow forwarding to it anyhow. Maybe such a port will appear
- * later and we're pre-populating the flow table. */
+
+ ctx->ofproto = ofproto_dpif_cast(peer->up.ofproto);
+ ctx->xin->flow.in_port = peer->up.ofp_port;
+ ctx->xin->flow.metadata = htonll(0);
+ memset(&ctx->xin->flow.tunnel, 0, sizeof ctx->xin->flow.tunnel);
+ memset(ctx->xin->flow.regs, 0, sizeof ctx->xin->flow.regs);
+
+ in_port = get_ofp_port(ctx->ofproto, ctx->xin->flow.in_port);
+ special = process_special(ctx->ofproto, &ctx->xin->flow, in_port,
+ ctx->xin->packet);
+ if (special) {
+ ctx->xout->slow = special;
+ } else if (!in_port || may_receive(in_port, ctx)) {
+ if (!in_port || stp_forward_in_state(in_port->stp_state)) {
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
+ } else {
+ /* Forwarding is disabled by STP. Let OFPP_NORMAL and the
+ * learning action look at the packet, then drop it. */
+ struct flow old_base_flow = ctx->base_flow;
+ size_t old_size = ctx->xout->odp_actions.size;
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, true);
+ ctx->base_flow = old_base_flow;
+ ctx->xout->odp_actions.size = old_size;
+ }
+ }
+
+ ctx->xin->flow = old_flow;
+ ctx->ofproto = ofproto_dpif_cast(ofport->up.ofproto);
+
+ if (ctx->xin->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
+ netdev_vport_inc_rx(peer->up.netdev, ctx->xin->resubmit_stats);
+ }
+
+ return;
+ }
+
+ flow_vlan_tci = ctx->xin->flow.vlan_tci;
+ flow_skb_mark = ctx->xin->flow.skb_mark;
+ flow_nw_tos = ctx->xin->flow.nw_tos;
+
+ pdscp = get_priority(ofport, ctx->xin->flow.skb_priority);
+ if (pdscp) {
+ ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
+ ctx->xin->flow.nw_tos |= pdscp->dscp;
}
- out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
- ctx->flow.vlan_tci);
- if (out_port != odp_port) {
- ctx->flow.vlan_tci = htons(0);
+ if (ofport->tnl_port) {
+ /* Save tunnel metadata so that changes made due to
+ * the Logical (tunnel) Port are not visible for any further
+ * matches, while explicit set actions on tunnel metadata are.
+ */
+ struct flow_tnl flow_tnl = ctx->xin->flow.tunnel;
+ odp_port = tnl_port_send(ofport->tnl_port, &ctx->xin->flow);
+ if (odp_port == OVSP_NONE) {
+ xlate_report(ctx, "Tunneling decided against output");
+ goto out; /* restore flow_nw_tos */
+ }
+ if (ctx->xin->flow.tunnel.ip_dst == ctx->orig_tunnel_ip_dst) {
+ xlate_report(ctx, "Not tunneling to our own address");
+ goto out; /* restore flow_nw_tos */
+ }
+ if (ctx->xin->resubmit_stats) {
+ netdev_vport_inc_tx(ofport->up.netdev, ctx->xin->resubmit_stats);
+ }
+ out_port = odp_port;
+ commit_odp_tunnel_action(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
+ ctx->xin->flow.tunnel = flow_tnl; /* Restore tunnel metadata */
+ } else {
+ uint16_t vlandev_port;
+ odp_port = ofport->odp_port;
+ vlandev_port = vsp_realdev_to_vlandev(ctx->ofproto, ofp_port,
+ ctx->xin->flow.vlan_tci);
+ if (vlandev_port == ofp_port) {
+ out_port = odp_port;
+ } else {
+ out_port = ofp_port_to_odp_port(ctx->ofproto, vlandev_port);
+ ctx->xin->flow.vlan_tci = htons(0);
+ }
+ ctx->xin->flow.skb_mark &= ~IPSEC_MARK;
}
- commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
- nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
+ commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
+ nl_msg_put_u32(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
ctx->sflow_odp_port = odp_port;
ctx->sflow_n_outputs++;
- ctx->nf_output_iface = ofp_port;
- ctx->flow.vlan_tci = flow_vlan_tci;
- ctx->flow.nw_tos = flow_nw_tos;
+ ctx->xout->nf_output_iface = ofp_port;
+
+ /* Restore flow */
+ ctx->xin->flow.vlan_tci = flow_vlan_tci;
+ ctx->xin->flow.skb_mark = flow_skb_mark;
+ out:
+ ctx->xin->flow.nw_tos = flow_nw_tos;
}
static void
-compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
+compose_output_action(struct xlate_ctx *ctx, uint16_t ofp_port)
{
compose_output_action__(ctx, ofp_port, true);
}
static void
-xlate_table_action(struct action_xlate_ctx *ctx,
- uint16_t in_port, uint8_t table_id)
+tag_the_flow(struct xlate_ctx *ctx, struct rule_dpif *rule)
+{
+ struct ofproto_dpif *ofproto = ctx->ofproto;
+ uint8_t table_id = ctx->table_id;
+
+ if (table_id > 0 && table_id < N_TABLES) {
+ struct table_dpif *table = &ofproto->tables[table_id];
+ if (table->other_table) {
+ ctx->xout->tags |= (rule && rule->tag
+ ? rule->tag
+ : rule_calculate_tag(&ctx->xin->flow,
+ &table->other_table->mask,
+ table->basis));
+ }
+ }
+}
+
+/* Common rule processing in one place to avoid duplicating code. */
+static struct rule_dpif *
+ctx_rule_hooks(struct xlate_ctx *ctx, struct rule_dpif *rule,
+ bool may_packet_in)
+{
+ if (ctx->xin->resubmit_hook) {
+ ctx->xin->resubmit_hook(ctx, rule);
+ }
+ if (rule == NULL && may_packet_in) {
+ /* XXX
+ * check if table configuration flags
+ * OFPTC_TABLE_MISS_CONTROLLER, default.
+ * OFPTC_TABLE_MISS_CONTINUE,
+ * OFPTC_TABLE_MISS_DROP
+ * When OF1.0, OFPTC_TABLE_MISS_CONTINUE is used. What to do?
+ */
+ rule = rule_dpif_miss_rule(ctx->ofproto, &ctx->xin->flow);
+ }
+ if (rule && ctx->xin->resubmit_stats) {
+ rule_credit_stats(rule, ctx->xin->resubmit_stats);
+ }
+ return rule;
+}
+
+static void
+xlate_table_action(struct xlate_ctx *ctx,
+ uint16_t in_port, uint8_t table_id, bool may_packet_in)
{
if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
- struct ofproto_dpif *ofproto = ctx->ofproto;
struct rule_dpif *rule;
- uint16_t old_in_port;
- uint8_t old_table_id;
+ uint16_t old_in_port = ctx->xin->flow.in_port;
+ uint8_t old_table_id = ctx->table_id;
- old_table_id = ctx->table_id;
ctx->table_id = table_id;
/* Look up a flow with 'in_port' as the input port. */
- old_in_port = ctx->flow.in_port;
- ctx->flow.in_port = in_port;
- rule = rule_dpif_lookup__(ofproto, &ctx->flow, table_id);
-
- /* Tag the flow. */
- if (table_id > 0 && table_id < N_TABLES) {
- struct table_dpif *table = &ofproto->tables[table_id];
- if (table->other_table) {
- ctx->tags |= (rule && rule->tag
- ? rule->tag
- : rule_calculate_tag(&ctx->flow,
- &table->other_table->wc,
- table->basis));
- }
- }
+ ctx->xin->flow.in_port = in_port;
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, table_id);
+
+ tag_the_flow(ctx, rule);
/* Restore the original input port. Otherwise OFPP_NORMAL and
* OFPP_IN_PORT will have surprising behavior. */
- ctx->flow.in_port = old_in_port;
+ ctx->xin->flow.in_port = old_in_port;
- if (ctx->resubmit_hook) {
- ctx->resubmit_hook(ctx, rule);
- }
+ rule = ctx_rule_hooks(ctx, rule, may_packet_in);
if (rule) {
struct rule_dpif *old_rule = ctx->rule;
- if (ctx->resubmit_stats) {
- rule_credit_stats(rule, ctx->resubmit_stats);
- }
-
ctx->recurse++;
ctx->rule = rule;
do_xlate_actions(rule->up.ofpacts, rule->up.ofpacts_len, ctx);
}
static void
-xlate_ofpact_resubmit(struct action_xlate_ctx *ctx,
+xlate_ofpact_resubmit(struct xlate_ctx *ctx,
const struct ofpact_resubmit *resubmit)
{
uint16_t in_port;
in_port = resubmit->in_port;
if (in_port == OFPP_IN_PORT) {
- in_port = ctx->flow.in_port;
+ in_port = ctx->xin->flow.in_port;
}
table_id = resubmit->table_id;
table_id = ctx->table_id;
}
- xlate_table_action(ctx, in_port, table_id);
+ xlate_table_action(ctx, in_port, table_id, false);
}
static void
-flood_packets(struct action_xlate_ctx *ctx, bool all)
+flood_packets(struct xlate_ctx *ctx, bool all)
{
struct ofport_dpif *ofport;
HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
uint16_t ofp_port = ofport->up.ofp_port;
- if (ofp_port == ctx->flow.in_port) {
+ if (ofp_port == ctx->xin->flow.in_port) {
continue;
}
}
}
- ctx->nf_output_iface = NF_OUT_FLOOD;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
static void
-execute_controller_action(struct action_xlate_ctx *ctx, int len,
+execute_controller_action(struct xlate_ctx *ctx, int len,
enum ofp_packet_in_reason reason,
uint16_t controller_id)
{
struct ofputil_packet_in pin;
struct ofpbuf *packet;
- ctx->slow |= SLOW_CONTROLLER;
- if (!ctx->packet) {
+ ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
+ ctx->xout->slow = SLOW_CONTROLLER;
+ if (!ctx->xin->packet) {
return;
}
- packet = ofpbuf_clone(ctx->packet);
+ packet = ofpbuf_clone(ctx->xin->packet);
if (packet->l2 && packet->l3) {
struct eth_header *eh;
+ uint16_t mpls_depth;
eth_pop_vlan(packet);
eh = packet->l2;
- /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
- * LLC frame. Calculating the Ethernet type of these frames is more
- * trouble than seems appropriate for a simple assertion. */
- assert(ntohs(eh->eth_type) < ETH_TYPE_MIN
- || eh->eth_type == ctx->flow.dl_type);
+ memcpy(eh->eth_src, ctx->xin->flow.dl_src, sizeof eh->eth_src);
+ memcpy(eh->eth_dst, ctx->xin->flow.dl_dst, sizeof eh->eth_dst);
- memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
- memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
+ if (ctx->xin->flow.vlan_tci & htons(VLAN_CFI)) {
+ eth_push_vlan(packet, ctx->xin->flow.vlan_tci);
+ }
+
+ mpls_depth = eth_mpls_depth(packet);
- if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
- eth_push_vlan(packet, ctx->flow.vlan_tci);
+ if (mpls_depth < ctx->xin->flow.mpls_depth) {
+ push_mpls(packet, ctx->xin->flow.dl_type, ctx->xin->flow.mpls_lse);
+ } else if (mpls_depth > ctx->xin->flow.mpls_depth) {
+ pop_mpls(packet, ctx->xin->flow.dl_type);
+ } else if (mpls_depth) {
+ set_mpls_lse(packet, ctx->xin->flow.mpls_lse);
}
if (packet->l4) {
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
- ctx->flow.nw_tos, ctx->flow.nw_ttl);
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ packet_set_ipv4(packet, ctx->xin->flow.nw_src,
+ ctx->xin->flow.nw_dst, ctx->xin->flow.nw_tos,
+ ctx->xin->flow.nw_ttl);
}
if (packet->l7) {
- if (ctx->flow.nw_proto == IPPROTO_TCP) {
- packet_set_tcp_port(packet, ctx->flow.tp_src,
- ctx->flow.tp_dst);
- } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
- packet_set_udp_port(packet, ctx->flow.tp_src,
- ctx->flow.tp_dst);
+ if (ctx->xin->flow.nw_proto == IPPROTO_TCP) {
+ packet_set_tcp_port(packet, ctx->xin->flow.tp_src,
+ ctx->xin->flow.tp_dst);
+ } else if (ctx->xin->flow.nw_proto == IPPROTO_UDP) {
+ packet_set_udp_port(packet, ctx->xin->flow.tp_src,
+ ctx->xin->flow.tp_dst);
}
}
}
pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
pin.send_len = len;
- flow_get_metadata(&ctx->flow, &pin.fmd);
+ flow_get_metadata(&ctx->xin->flow, &pin.fmd);
connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
ofpbuf_delete(packet);
}
+static void
+execute_mpls_push_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(eth_type));
+
+ if (ctx->base_flow.mpls_depth) {
+ ctx->xin->flow.mpls_lse &= ~htonl(MPLS_BOS_MASK);
+ ctx->xin->flow.mpls_depth++;
+ } else {
+ ovs_be32 label;
+ uint8_t tc, ttl;
+
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IPV6)) {
+ label = htonl(0x2); /* IPV6 Explicit Null. */
+ } else {
+ label = htonl(0x0); /* IPV4 Explicit Null. */
+ }
+ tc = (ctx->xin->flow.nw_tos & IP_DSCP_MASK) >> 2;
+ ttl = ctx->xin->flow.nw_ttl ? ctx->xin->flow.nw_ttl : 0x40;
+ ctx->xin->flow.mpls_lse = set_mpls_lse_values(ttl, tc, 1, label);
+ ctx->xin->flow.mpls_depth = 1;
+ }
+ ctx->xin->flow.dl_type = eth_type;
+}
+
+static void
+execute_mpls_pop_action(struct xlate_ctx *ctx, ovs_be16 eth_type)
+{
+ ovs_assert(eth_type_mpls(ctx->xin->flow.dl_type));
+ ovs_assert(!eth_type_mpls(eth_type));
+
+ if (ctx->xin->flow.mpls_depth) {
+ ctx->xin->flow.mpls_depth--;
+ ctx->xin->flow.mpls_lse = htonl(0);
+ if (!ctx->xin->flow.mpls_depth) {
+ ctx->xin->flow.dl_type = eth_type;
+ }
+ }
+}
+
+static bool
+compose_dec_ttl(struct xlate_ctx *ctx, struct ofpact_cnt_ids *ids)
+{
+ if (ctx->xin->flow.dl_type != htons(ETH_TYPE_IP) &&
+ ctx->xin->flow.dl_type != htons(ETH_TYPE_IPV6)) {
+ return false;
+ }
+
+ if (ctx->xin->flow.nw_ttl > 1) {
+ ctx->xin->flow.nw_ttl--;
+ return false;
+ } else {
+ size_t i;
+
+ for (i = 0; i < ids->n_controllers; i++) {
+ execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL,
+ ids->cnt_ids[i]);
+ }
+
+ /* Stop processing for current table. */
+ return true;
+ }
+}
+
+static bool
+execute_set_mpls_ttl_action(struct xlate_ctx *ctx, uint8_t ttl)
+{
+ if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
+ return true;
+ }
+
+ set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
+ return false;
+}
+
static bool
-compose_dec_ttl(struct action_xlate_ctx *ctx)
+execute_dec_mpls_ttl_action(struct xlate_ctx *ctx)
{
- if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
- ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
+ uint8_t ttl = mpls_lse_to_ttl(ctx->xin->flow.mpls_lse);
+
+ if (!eth_type_mpls(ctx->xin->flow.dl_type)) {
return false;
}
- if (ctx->flow.nw_ttl > 1) {
- ctx->flow.nw_ttl--;
+ if (ttl > 1) {
+ ttl--;
+ set_mpls_lse_ttl(&ctx->xin->flow.mpls_lse, ttl);
return false;
} else {
execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
}
static void
-xlate_output_action(struct action_xlate_ctx *ctx,
- uint16_t port, uint16_t max_len)
+xlate_output_action(struct xlate_ctx *ctx,
+ uint16_t port, uint16_t max_len, bool may_packet_in)
{
- uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+ uint16_t prev_nf_output_iface = ctx->xout->nf_output_iface;
- ctx->nf_output_iface = NF_OUT_DROP;
+ ctx->xout->nf_output_iface = NF_OUT_DROP;
switch (port) {
case OFPP_IN_PORT:
- compose_output_action(ctx, ctx->flow.in_port);
+ compose_output_action(ctx, ctx->xin->flow.in_port);
break;
case OFPP_TABLE:
- xlate_table_action(ctx, ctx->flow.in_port, 0);
+ xlate_table_action(ctx, ctx->xin->flow.in_port, 0, may_packet_in);
break;
case OFPP_NORMAL:
xlate_normal(ctx);
break;
case OFPP_LOCAL:
default:
- if (port != ctx->flow.in_port) {
+ if (port != ctx->xin->flow.in_port) {
compose_output_action(ctx, port);
+ } else {
+ xlate_report(ctx, "skipping output to input port");
}
break;
}
if (prev_nf_output_iface == NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_FLOOD;
- } else if (ctx->nf_output_iface == NF_OUT_DROP) {
- ctx->nf_output_iface = prev_nf_output_iface;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
+ ctx->xout->nf_output_iface = prev_nf_output_iface;
} else if (prev_nf_output_iface != NF_OUT_DROP &&
- ctx->nf_output_iface != NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_MULTI;
+ ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->xout->nf_output_iface = NF_OUT_MULTI;
}
}
static void
-xlate_output_reg_action(struct action_xlate_ctx *ctx,
+xlate_output_reg_action(struct xlate_ctx *ctx,
const struct ofpact_output_reg *or)
{
- uint64_t port = mf_get_subfield(&or->src, &ctx->flow);
+ uint64_t port = mf_get_subfield(&or->src, &ctx->xin->flow);
if (port <= UINT16_MAX) {
- xlate_output_action(ctx, port, or->max_len);
+ xlate_output_action(ctx, port, or->max_len, false);
}
}
static void
-xlate_enqueue_action(struct action_xlate_ctx *ctx,
+xlate_enqueue_action(struct xlate_ctx *ctx,
const struct ofpact_enqueue *enqueue)
{
uint16_t ofp_port = enqueue->port;
int error;
/* Translate queue to priority. */
- error = dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &priority);
+ error = dpif_queue_to_priority(ctx->ofproto->backer->dpif,
+ queue_id, &priority);
if (error) {
/* Fall back to ordinary output action. */
- xlate_output_action(ctx, enqueue->port, 0);
+ xlate_output_action(ctx, enqueue->port, 0, false);
return;
}
/* Check output port. */
if (ofp_port == OFPP_IN_PORT) {
- ofp_port = ctx->flow.in_port;
- } else if (ofp_port == ctx->flow.in_port) {
+ ofp_port = ctx->xin->flow.in_port;
+ } else if (ofp_port == ctx->xin->flow.in_port) {
return;
}
/* Add datapath actions. */
- flow_priority = ctx->flow.skb_priority;
- ctx->flow.skb_priority = priority;
+ flow_priority = ctx->xin->flow.skb_priority;
+ ctx->xin->flow.skb_priority = priority;
compose_output_action(ctx, ofp_port);
- ctx->flow.skb_priority = flow_priority;
+ ctx->xin->flow.skb_priority = flow_priority;
/* Update NetFlow output port. */
- if (ctx->nf_output_iface == NF_OUT_DROP) {
- ctx->nf_output_iface = ofp_port;
- } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
- ctx->nf_output_iface = NF_OUT_MULTI;
+ if (ctx->xout->nf_output_iface == NF_OUT_DROP) {
+ ctx->xout->nf_output_iface = ofp_port;
+ } else if (ctx->xout->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->xout->nf_output_iface = NF_OUT_MULTI;
}
}
static void
-xlate_set_queue_action(struct action_xlate_ctx *ctx, uint32_t queue_id)
+xlate_set_queue_action(struct xlate_ctx *ctx, uint32_t queue_id)
{
uint32_t skb_priority;
- if (!dpif_queue_to_priority(ctx->ofproto->dpif, queue_id, &skb_priority)) {
- ctx->flow.skb_priority = skb_priority;
+ if (!dpif_queue_to_priority(ctx->ofproto->backer->dpif,
+ queue_id, &skb_priority)) {
+ ctx->xin->flow.skb_priority = skb_priority;
} else {
/* Couldn't translate queue to a priority. Nothing to do. A warning
* has already been logged. */
}
}
-struct xlate_reg_state {
- ovs_be16 vlan_tci;
- ovs_be64 tun_id;
-};
-
-static void
-xlate_autopath(struct action_xlate_ctx *ctx,
- const struct ofpact_autopath *ap)
-{
- uint16_t ofp_port = ap->port;
- struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
-
- if (!port || !port->bundle) {
- ofp_port = OFPP_NONE;
- } else if (port->bundle->bond) {
- /* Autopath does not support VLAN hashing. */
- struct ofport_dpif *slave = bond_choose_output_slave(
- port->bundle->bond, &ctx->flow, 0, &ctx->tags);
- if (slave) {
- ofp_port = slave->up.ofp_port;
- }
- }
- nxm_reg_load(&ap->dst, ofp_port, &ctx->flow);
-}
-
static bool
slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
{
}
static void
-xlate_bundle_action(struct action_xlate_ctx *ctx,
+xlate_bundle_action(struct xlate_ctx *ctx,
const struct ofpact_bundle *bundle)
{
uint16_t port;
- port = bundle_execute(bundle, &ctx->flow, slave_enabled_cb, ctx->ofproto);
+ port = bundle_execute(bundle, &ctx->xin->flow, slave_enabled_cb,
+ ctx->ofproto);
if (bundle->dst.field) {
- nxm_reg_load(&bundle->dst, port, &ctx->flow);
+ nxm_reg_load(&bundle->dst, port, &ctx->xin->flow);
} else {
- xlate_output_action(ctx, port, 0);
+ xlate_output_action(ctx, port, 0, false);
}
}
static void
-xlate_learn_action(struct action_xlate_ctx *ctx,
+xlate_learn_action(struct xlate_ctx *ctx,
const struct ofpact_learn *learn)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
int error;
ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
- learn_execute(learn, &ctx->flow, &fm, &ofpacts);
+ learn_execute(learn, &ctx->xin->flow, &fm, &ofpacts);
error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
if (error && !VLOG_DROP_WARN(&rl)) {
}
static void
-xlate_fin_timeout(struct action_xlate_ctx *ctx,
+xlate_fin_timeout(struct xlate_ctx *ctx,
const struct ofpact_fin_timeout *oft)
{
- if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
+ if (ctx->xin->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
struct rule_dpif *rule = ctx->rule;
reduce_timeout(oft->fin_idle_timeout, &rule->up.idle_timeout);
}
}
+static void
+xlate_sample_action(struct xlate_ctx *ctx,
+ const struct ofpact_sample *os)
+{
+ union user_action_cookie cookie;
+ /* Scale the probability from 16-bit to 32-bit while representing
+ * the same percentage. */
+ uint32_t probability = (os->probability << 16) | os->probability;
+
+ commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions);
+
+ compose_flow_sample_cookie(os->probability, os->collector_set_id,
+ os->obs_domain_id, os->obs_point_id, &cookie);
+ compose_sample_action(ctx->ofproto, &ctx->xout->odp_actions, &ctx->xin->flow,
+ probability, &cookie, sizeof cookie.flow_sample);
+}
+
static bool
-may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
+may_receive(const struct ofport_dpif *port, struct xlate_ctx *ctx)
{
- if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
+ if (port->up.pp.config & (eth_addr_equals(ctx->xin->flow.dl_dst,
+ eth_addr_stp)
? OFPUTIL_PC_NO_RECV_STP
: OFPUTIL_PC_NO_RECV)) {
return false;
return true;
}
+static bool
+tunnel_ecn_ok(struct xlate_ctx *ctx)
+{
+ if (is_ip_any(&ctx->base_flow)
+ && (ctx->xin->flow.tunnel.ip_tos & IP_ECN_MASK) == IP_ECN_CE) {
+ if ((ctx->base_flow.nw_tos & IP_ECN_MASK) == IP_ECN_NOT_ECT) {
+ VLOG_WARN_RL(&rl, "dropping tunnel packet marked ECN CE"
+ " but is not ECN capable");
+ return false;
+ } else {
+ /* Set the ECN CE value in the tunneled packet. */
+ ctx->xin->flow.nw_tos |= IP_ECN_CE;
+ }
+ }
+
+ return true;
+}
+
static void
do_xlate_actions(const struct ofpact *ofpacts, size_t ofpacts_len,
- struct action_xlate_ctx *ctx)
+ struct xlate_ctx *ctx)
{
- const struct ofport_dpif *port;
bool was_evictable = true;
const struct ofpact *a;
- port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
- if (port && !may_receive(port, ctx)) {
- /* Drop this flow. */
- return;
- }
-
if (ctx->rule) {
/* Don't let the rule we're working on get evicted underneath us. */
was_evictable = ctx->rule->up.evictable;
ctx->rule->up.evictable = false;
}
+
+ do_xlate_actions_again:
OFPACT_FOR_EACH (a, ofpacts, ofpacts_len) {
struct ofpact_controller *controller;
+ const struct ofpact_metadata *metadata;
if (ctx->exit) {
break;
switch (a->type) {
case OFPACT_OUTPUT:
xlate_output_action(ctx, ofpact_get_OUTPUT(a)->port,
- ofpact_get_OUTPUT(a)->max_len);
+ ofpact_get_OUTPUT(a)->max_len, true);
break;
case OFPACT_CONTROLLER:
break;
case OFPACT_SET_VLAN_VID:
- ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
- ctx->flow.vlan_tci |= (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
- | htons(VLAN_CFI));
+ ctx->xin->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
+ ctx->xin->flow.vlan_tci |=
+ (htons(ofpact_get_SET_VLAN_VID(a)->vlan_vid)
+ | htons(VLAN_CFI));
break;
case OFPACT_SET_VLAN_PCP:
- ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
- ctx->flow.vlan_tci |= htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp
- << VLAN_PCP_SHIFT)
- | VLAN_CFI);
+ ctx->xin->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
+ ctx->xin->flow.vlan_tci |=
+ htons((ofpact_get_SET_VLAN_PCP(a)->vlan_pcp << VLAN_PCP_SHIFT)
+ | VLAN_CFI);
break;
case OFPACT_STRIP_VLAN:
- ctx->flow.vlan_tci = htons(0);
+ ctx->xin->flow.vlan_tci = htons(0);
+ break;
+
+ case OFPACT_PUSH_VLAN:
+ /* XXX 802.1AD(QinQ) */
+ ctx->xin->flow.vlan_tci = htons(VLAN_CFI);
break;
case OFPACT_SET_ETH_SRC:
- memcpy(ctx->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
+ memcpy(ctx->xin->flow.dl_src, ofpact_get_SET_ETH_SRC(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_ETH_DST:
- memcpy(ctx->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
+ memcpy(ctx->xin->flow.dl_dst, ofpact_get_SET_ETH_DST(a)->mac,
ETH_ADDR_LEN);
break;
case OFPACT_SET_IPV4_SRC:
- ctx->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_src = ofpact_get_SET_IPV4_SRC(a)->ipv4;
+ }
break;
case OFPACT_SET_IPV4_DST:
- ctx->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_dst = ofpact_get_SET_IPV4_DST(a)->ipv4;
+ }
break;
case OFPACT_SET_IPV4_DSCP:
/* OpenFlow 1.0 only supports IPv4. */
- if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
- ctx->flow.nw_tos &= ~IP_DSCP_MASK;
- ctx->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_IP)) {
+ ctx->xin->flow.nw_tos &= ~IP_DSCP_MASK;
+ ctx->xin->flow.nw_tos |= ofpact_get_SET_IPV4_DSCP(a)->dscp;
}
break;
case OFPACT_SET_L4_SRC_PORT:
- ctx->flow.tp_src = htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ if (is_ip_any(&ctx->xin->flow)) {
+ ctx->xin->flow.tp_src =
+ htons(ofpact_get_SET_L4_SRC_PORT(a)->port);
+ }
break;
case OFPACT_SET_L4_DST_PORT:
- ctx->flow.tp_dst = htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ if (is_ip_any(&ctx->xin->flow)) {
+ ctx->xin->flow.tp_dst =
+ htons(ofpact_get_SET_L4_DST_PORT(a)->port);
+ }
break;
case OFPACT_RESUBMIT:
break;
case OFPACT_SET_TUNNEL:
- ctx->flow.tun_id = htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
+ ctx->xin->flow.tunnel.tun_id =
+ htonll(ofpact_get_SET_TUNNEL(a)->tun_id);
break;
case OFPACT_SET_QUEUE:
break;
case OFPACT_POP_QUEUE:
- ctx->flow.skb_priority = ctx->orig_skb_priority;
+ ctx->xin->flow.skb_priority = ctx->orig_skb_priority;
break;
case OFPACT_REG_MOVE:
- nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->flow);
+ nxm_execute_reg_move(ofpact_get_REG_MOVE(a), &ctx->xin->flow);
break;
case OFPACT_REG_LOAD:
- nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->flow);
+ nxm_execute_reg_load(ofpact_get_REG_LOAD(a), &ctx->xin->flow);
+ break;
+
+ case OFPACT_STACK_PUSH:
+ nxm_execute_stack_push(ofpact_get_STACK_PUSH(a), &ctx->xin->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_STACK_POP:
+ nxm_execute_stack_pop(ofpact_get_STACK_POP(a), &ctx->xin->flow,
+ &ctx->stack);
+ break;
+
+ case OFPACT_PUSH_MPLS:
+ execute_mpls_push_action(ctx, ofpact_get_PUSH_MPLS(a)->ethertype);
+ break;
+
+ case OFPACT_POP_MPLS:
+ execute_mpls_pop_action(ctx, ofpact_get_POP_MPLS(a)->ethertype);
+ break;
+
+ case OFPACT_SET_MPLS_TTL:
+ if (execute_set_mpls_ttl_action(ctx,
+ ofpact_get_SET_MPLS_TTL(a)->ttl)) {
+ goto out;
+ }
+ break;
+
+ case OFPACT_DEC_MPLS_TTL:
+ if (execute_dec_mpls_ttl_action(ctx)) {
+ goto out;
+ }
break;
case OFPACT_DEC_TTL:
- if (compose_dec_ttl(ctx)) {
+ if (compose_dec_ttl(ctx, ofpact_get_DEC_TTL(a))) {
goto out;
}
break;
break;
case OFPACT_MULTIPATH:
- multipath_execute(ofpact_get_MULTIPATH(a), &ctx->flow);
- break;
-
- case OFPACT_AUTOPATH:
- xlate_autopath(ctx, ofpact_get_AUTOPATH(a));
+ multipath_execute(ofpact_get_MULTIPATH(a), &ctx->xin->flow);
break;
case OFPACT_BUNDLE:
xlate_bundle_action(ctx, ofpact_get_BUNDLE(a));
break;
- case OFPACT_OUTPUT_REG:
- xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
- break;
+ case OFPACT_OUTPUT_REG:
+ xlate_output_reg_action(ctx, ofpact_get_OUTPUT_REG(a));
+ break;
+
+ case OFPACT_LEARN:
+ ctx->xout->has_learn = true;
+ if (ctx->xin->may_learn) {
+ xlate_learn_action(ctx, ofpact_get_LEARN(a));
+ }
+ break;
+
+ case OFPACT_EXIT:
+ ctx->exit = true;
+ break;
+
+ case OFPACT_FIN_TIMEOUT:
+ ctx->xout->has_fin_timeout = true;
+ xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
+ break;
+
+ case OFPACT_CLEAR_ACTIONS:
+ /* XXX
+ * Nothing to do because writa-actions is not supported for now.
+ * When writa-actions is supported, clear-actions also must
+ * be supported at the same time.
+ */
+ break;
+
+ case OFPACT_WRITE_METADATA:
+ metadata = ofpact_get_WRITE_METADATA(a);
+ ctx->xin->flow.metadata &= ~metadata->mask;
+ ctx->xin->flow.metadata |= metadata->metadata & metadata->mask;
+ break;
+
+ case OFPACT_GOTO_TABLE: {
+ /* It is assumed that goto-table is the last action. */
+ struct ofpact_goto_table *ogt = ofpact_get_GOTO_TABLE(a);
+ struct rule_dpif *rule;
+
+ ovs_assert(ctx->table_id < ogt->table_id);
+
+ ctx->table_id = ogt->table_id;
+
+ /* Look up a flow from the new table. */
+ rule = rule_dpif_lookup__(ctx->ofproto, &ctx->xin->flow, ctx->table_id);
+
+ tag_the_flow(ctx, rule);
- case OFPACT_LEARN:
- ctx->has_learn = true;
- if (ctx->may_learn) {
- xlate_learn_action(ctx, ofpact_get_LEARN(a));
- }
- break;
+ rule = ctx_rule_hooks(ctx, rule, true);
- case OFPACT_EXIT:
- ctx->exit = true;
+ if (rule) {
+ if (ctx->rule) {
+ ctx->rule->up.evictable = was_evictable;
+ }
+ ctx->rule = rule;
+ was_evictable = rule->up.evictable;
+ rule->up.evictable = false;
+
+ /* Tail recursion removal. */
+ ofpacts = rule->up.ofpacts;
+ ofpacts_len = rule->up.ofpacts_len;
+ goto do_xlate_actions_again;
+ }
break;
+ }
- case OFPACT_FIN_TIMEOUT:
- ctx->has_fin_timeout = true;
- xlate_fin_timeout(ctx, ofpact_get_FIN_TIMEOUT(a));
+ case OFPACT_SAMPLE:
+ xlate_sample_action(ctx, ofpact_get_SAMPLE(a));
break;
}
}
out:
- /* We've let OFPP_NORMAL and the learning action look at the packet,
- * so drop it now if forwarding is disabled. */
- if (port && !stp_forward_in_state(port->stp_state)) {
- ofpbuf_clear(ctx->odp_actions);
- add_sflow_action(ctx);
- }
if (ctx->rule) {
ctx->rule->up.evictable = was_evictable;
}
}
static void
-action_xlate_ctx_init(struct action_xlate_ctx *ctx,
- struct ofproto_dpif *ofproto, const struct flow *flow,
- ovs_be16 initial_tci, struct rule_dpif *rule,
- uint8_t tcp_flags, const struct ofpbuf *packet)
+xlate_in_init(struct xlate_in *xin, struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ const struct initial_vals *initial_vals,
+ struct rule_dpif *rule, uint8_t tcp_flags,
+ const struct ofpbuf *packet)
+{
+ xin->ofproto = ofproto;
+ xin->flow = *flow;
+ xin->packet = packet;
+ xin->may_learn = packet != NULL;
+ xin->rule = rule;
+ xin->ofpacts = NULL;
+ xin->ofpacts_len = 0;
+ xin->tcp_flags = tcp_flags;
+ xin->resubmit_hook = NULL;
+ xin->report_hook = NULL;
+ xin->resubmit_stats = NULL;
+
+ if (initial_vals) {
+ xin->initial_vals = *initial_vals;
+ } else {
+ xin->initial_vals.vlan_tci = xin->flow.vlan_tci;
+ }
+}
+
+static void
+xlate_out_uninit(struct xlate_out *xout)
{
- ctx->ofproto = ofproto;
- ctx->flow = *flow;
- ctx->base_flow = ctx->flow;
- ctx->base_flow.tun_id = 0;
- ctx->base_flow.vlan_tci = initial_tci;
- ctx->rule = rule;
- ctx->packet = packet;
- ctx->may_learn = packet != NULL;
- ctx->tcp_flags = tcp_flags;
- ctx->resubmit_hook = NULL;
- ctx->report_hook = NULL;
- ctx->resubmit_stats = NULL;
+ if (xout) {
+ ofpbuf_uninit(&xout->odp_actions);
+ }
}
/* Translates the 'ofpacts_len' bytes of "struct ofpacts" starting at 'ofpacts'
* into datapath actions in 'odp_actions', using 'ctx'. */
static void
-xlate_actions(struct action_xlate_ctx *ctx,
- const struct ofpact *ofpacts, size_t ofpacts_len,
- struct ofpbuf *odp_actions)
+xlate_actions(struct xlate_in *xin, struct xlate_out *xout)
{
/* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
* that in the future we always keep a copy of the original flow for
static bool hit_resubmit_limit;
enum slow_path_reason special;
+ const struct ofpact *ofpacts;
+ struct ofport_dpif *in_port;
+ struct flow orig_flow;
+ struct xlate_ctx ctx;
+ size_t ofpacts_len;
COVERAGE_INC(ofproto_dpif_xlate);
- ofpbuf_clear(odp_actions);
- ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
-
- ctx->odp_actions = odp_actions;
- ctx->tags = 0;
- ctx->slow = 0;
- ctx->has_learn = false;
- ctx->has_normal = false;
- ctx->has_fin_timeout = false;
- ctx->nf_output_iface = NF_OUT_DROP;
- ctx->mirrors = 0;
- ctx->recurse = 0;
- ctx->max_resubmit_trigger = false;
- ctx->orig_skb_priority = ctx->flow.skb_priority;
- ctx->table_id = 0;
- ctx->exit = false;
-
- if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
+ /* Flow initialization rules:
+ * - 'base_flow' must match the kernel's view of the packet at the
+ * time that action processing starts. 'flow' represents any
+ * transformations we wish to make through actions.
+ * - By default 'base_flow' and 'flow' are the same since the input
+ * packet matches the output before any actions are applied.
+ * - When using VLAN splinters, 'base_flow''s VLAN is set to the value
+ * of the received packet as seen by the kernel. If we later output
+ * to another device without any modifications this will cause us to
+ * insert a new tag since the original one was stripped off by the
+ * VLAN device.
+ * - Tunnel metadata as received is retained in 'flow'. This allows
+ * tunnel metadata matching also in later tables.
+ * Since a kernel action for setting the tunnel metadata will only be
+ * generated with actual tunnel output, changing the tunnel metadata
+ * values in 'flow' (such as tun_id) will only have effect with a later
+ * tunnel output action.
+ * - Tunnel 'base_flow' is completely cleared since that is what the
+ * kernel does. If we wish to maintain the original values an action
+ * needs to be generated. */
+
+ ctx.xin = xin;
+ ctx.xout = xout;
+
+ ctx.ofproto = xin->ofproto;
+ ctx.rule = xin->rule;
+
+ ctx.base_flow = ctx.xin->flow;
+ ctx.base_flow.vlan_tci = xin->initial_vals.vlan_tci;
+ memset(&ctx.base_flow.tunnel, 0, sizeof ctx.base_flow.tunnel);
+ ctx.orig_tunnel_ip_dst = ctx.xin->flow.tunnel.ip_dst;
+
+ ctx.xout->tags = 0;
+ ctx.xout->slow = 0;
+ ctx.xout->has_learn = false;
+ ctx.xout->has_normal = false;
+ ctx.xout->has_fin_timeout = false;
+ ctx.xout->nf_output_iface = NF_OUT_DROP;
+ ctx.xout->mirrors = 0;
+
+ ofpbuf_use_stub(&ctx.xout->odp_actions, ctx.xout->odp_actions_stub,
+ sizeof ctx.xout->odp_actions_stub);
+ ofpbuf_reserve(&ctx.xout->odp_actions, NL_A_U32_SIZE);
+
+ ctx.recurse = 0;
+ ctx.max_resubmit_trigger = false;
+ ctx.orig_skb_priority = ctx.xin->flow.skb_priority;
+ ctx.table_id = 0;
+ ctx.exit = false;
+
+ if (xin->ofpacts) {
+ ofpacts = xin->ofpacts;
+ ofpacts_len = xin->ofpacts_len;
+ } else if (xin->rule) {
+ ofpacts = xin->rule->up.ofpacts;
+ ofpacts_len = xin->rule->up.ofpacts_len;
+ } else {
+ NOT_REACHED();
+ }
+
+ ofpbuf_use_stub(&ctx.stack, ctx.init_stack, sizeof ctx.init_stack);
+
+ if (ctx.ofproto->has_mirrors || hit_resubmit_limit) {
/* Do this conditionally because the copy is expensive enough that it
- * shows up in profiles.
- *
- * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
- * believe that I wasn't using it without initializing it if I kept it
- * in a local variable. */
- ctx->orig_flow = ctx->flow;
+ * shows up in profiles. */
+ orig_flow = ctx.xin->flow;
}
- if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
- switch (ctx->ofproto->up.frag_handling) {
+ if (ctx.xin->flow.nw_frag & FLOW_NW_FRAG_ANY) {
+ switch (ctx.ofproto->up.frag_handling) {
case OFPC_FRAG_NORMAL:
/* We must pretend that transport ports are unavailable. */
- ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
- ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
+ ctx.xin->flow.tp_src = ctx.base_flow.tp_src = htons(0);
+ ctx.xin->flow.tp_dst = ctx.base_flow.tp_dst = htons(0);
break;
case OFPC_FRAG_DROP:
}
}
- special = process_special(ctx->ofproto, &ctx->flow, ctx->packet);
+ in_port = get_ofp_port(ctx.ofproto, ctx.xin->flow.in_port);
+ special = process_special(ctx.ofproto, &ctx.xin->flow, in_port,
+ ctx.xin->packet);
if (special) {
- ctx->slow |= special;
+ ctx.xout->slow = special;
} else {
static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
- ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
+ struct initial_vals initial_vals;
+ size_t sample_actions_len;
+ uint32_t local_odp_port;
+
+ initial_vals.vlan_tci = ctx.base_flow.vlan_tci;
+
+ add_sflow_action(&ctx);
+ add_ipfix_action(&ctx);
+ sample_actions_len = ctx.xout->odp_actions.size;
- add_sflow_action(ctx);
- do_xlate_actions(ofpacts, ofpacts_len, ctx);
+ if (tunnel_ecn_ok(&ctx) && (!in_port || may_receive(in_port, &ctx))) {
+ do_xlate_actions(ofpacts, ofpacts_len, &ctx);
- if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
+ /* We've let OFPP_NORMAL and the learning action look at the
+ * packet, so drop it now if forwarding is disabled. */
+ if (in_port && !stp_forward_in_state(in_port->stp_state)) {
+ ctx.xout->odp_actions.size = sample_actions_len;
+ }
+ }
+
+ if (ctx.max_resubmit_trigger && !ctx.xin->resubmit_hook) {
if (!hit_resubmit_limit) {
/* We didn't record the original flow. Make sure we do from
* now on. */
} else if (!VLOG_DROP_ERR(&trace_rl)) {
struct ds ds = DS_EMPTY_INITIALIZER;
- ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
- initial_tci, &ds);
+ ofproto_trace(ctx.ofproto, &orig_flow, ctx.xin->packet,
+ &initial_vals, &ds);
VLOG_ERR("Trace triggered by excessive resubmit "
"recursion:\n%s", ds_cstr(&ds));
ds_destroy(&ds);
}
}
- if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
- ctx->odp_actions->data,
- ctx->odp_actions->size)) {
- ctx->slow |= SLOW_IN_BAND;
- if (ctx->packet
- && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
- ctx->packet)) {
- compose_output_action(ctx, OFPP_LOCAL);
- }
+ local_odp_port = ofp_port_to_odp_port(ctx.ofproto, OFPP_LOCAL);
+ if (!connmgr_must_output_local(ctx.ofproto->up.connmgr, &ctx.xin->flow,
+ local_odp_port,
+ ctx.xout->odp_actions.data,
+ ctx.xout->odp_actions.size)) {
+ compose_output_action(&ctx, OFPP_LOCAL);
}
- if (ctx->ofproto->has_mirrors) {
- add_mirror_actions(ctx, &ctx->orig_flow);
+ if (ctx.ofproto->has_mirrors) {
+ add_mirror_actions(&ctx, &orig_flow);
}
- fix_sflow_action(ctx);
+ fix_sflow_action(&ctx);
}
+
+ ofpbuf_uninit(&ctx.stack);
}
/* Translates the 'ofpacts_len' bytes of "struct ofpact"s starting at 'ofpacts'
* into datapath actions, using 'ctx', and discards the datapath actions. */
static void
-xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
- const struct ofpact *ofpacts,
- size_t ofpacts_len)
+xlate_actions_for_side_effects(struct xlate_in *xin)
{
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ struct xlate_out xout;
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(ctx, ofpacts, ofpacts_len, &odp_actions);
- ofpbuf_uninit(&odp_actions);
+ xlate_actions(xin, &xout);
+ xlate_out_uninit(&xout);
}
static void
-xlate_report(struct action_xlate_ctx *ctx, const char *s)
+xlate_report(struct xlate_ctx *ctx, const char *s)
{
- if (ctx->report_hook) {
- ctx->report_hook(ctx, s);
+ if (ctx->xin->report_hook) {
+ ctx->xin->report_hook(ctx, s);
}
}
\f
}
static void
-output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
+output_normal(struct xlate_ctx *ctx, const struct ofbundle *out_bundle,
uint16_t vlan)
{
struct ofport_dpif *port;
if (!out_bundle->bond) {
port = ofbundle_get_a_port(out_bundle);
} else {
- port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
- vid, &ctx->tags);
+ port = bond_choose_output_slave(out_bundle->bond, &ctx->xin->flow,
+ vid, &ctx->xout->tags);
if (!port) {
/* No slaves enabled, so drop packet. */
return;
}
}
- old_tci = ctx->flow.vlan_tci;
+ old_tci = ctx->xin->flow.vlan_tci;
tci = htons(vid);
if (tci || out_bundle->use_priority_tags) {
- tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
+ tci |= ctx->xin->flow.vlan_tci & htons(VLAN_PCP_MASK);
if (tci) {
tci |= htons(VLAN_CFI);
}
}
- ctx->flow.vlan_tci = tci;
+ ctx->xin->flow.vlan_tci = tci;
compose_output_action(ctx, port->up.ofp_port);
- ctx->flow.vlan_tci = old_tci;
+ ctx->xin->flow.vlan_tci = old_tci;
}
static int
}
static void
-add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
+add_mirror_actions(struct xlate_ctx *ctx, const struct flow *orig_flow)
{
struct ofproto_dpif *ofproto = ctx->ofproto;
mirror_mask_t mirrors;
size_t left;
in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
- ctx->packet != NULL, NULL);
+ ctx->xin->packet != NULL, NULL);
if (!in_bundle) {
return;
}
/* Drop frames on bundles reserved for mirroring. */
if (in_bundle->mirror_out) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
"%s, which is reserved exclusively for mirroring",
/* Check VLAN. */
vid = vlan_tci_to_vid(orig_flow->vlan_tci);
- if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
+ if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
return;
}
vlan = input_vid_to_vlan(in_bundle, vid);
/* Look at the output ports to check for destination selections. */
- NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
- ctx->odp_actions->size) {
+ NL_ATTR_FOR_EACH (a, left, ctx->xout->odp_actions.data,
+ ctx->xout->odp_actions.size) {
enum ovs_action_attr type = nl_attr_type(a);
struct ofport_dpif *ofport;
}
/* Restore the original packet before adding the mirror actions. */
- ctx->flow = *orig_flow;
+ ctx->xin->flow = *orig_flow;
while (mirrors) {
struct ofmirror *m;
m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
if (!vlan_is_mirrored(m, vlan)) {
- mirrors &= mirrors - 1;
+ mirrors = zero_rightmost_1bit(mirrors);
continue;
}
mirrors &= ~m->dup_mirrors;
- ctx->mirrors |= m->dup_mirrors;
+ ctx->xout->mirrors |= m->dup_mirrors;
if (m->out) {
output_normal(ctx, m->out, vlan);
} else if (vlan != m->out_vlan
return;
}
- for (; mirrors; mirrors &= mirrors - 1) {
+ for (; mirrors; mirrors = zero_rightmost_1bit(mirrors)) {
struct ofmirror *m;
m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
in_bundle->name, vlan);
mac->port.p = in_bundle;
- tag_set_add(&ofproto->revalidate_set,
+ tag_set_add(&ofproto->backer->revalidate_set,
mac_learning_changed(ofproto->ml, mac));
}
}
* so in one special case.
*/
static bool
-is_admissible(struct action_xlate_ctx *ctx, struct ofport_dpif *in_port,
+is_admissible(struct xlate_ctx *ctx, struct ofport_dpif *in_port,
uint16_t vlan)
{
struct ofproto_dpif *ofproto = ctx->ofproto;
- struct flow *flow = &ctx->flow;
+ struct flow *flow = &ctx->xin->flow;
struct ofbundle *in_bundle = in_port->bundle;
/* Drop frames for reserved multicast addresses
struct mac_entry *mac;
switch (bond_check_admissibility(in_bundle->bond, in_port,
- flow->dl_dst, &ctx->tags)) {
+ flow->dl_dst, &ctx->xout->tags)) {
case BV_ACCEPT:
break;
}
static void
-xlate_normal(struct action_xlate_ctx *ctx)
+xlate_normal(struct xlate_ctx *ctx)
{
struct ofport_dpif *in_port;
struct ofbundle *in_bundle;
uint16_t vlan;
uint16_t vid;
- ctx->has_normal = true;
+ ctx->xout->has_normal = true;
- in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
- ctx->packet != NULL, &in_port);
+ in_bundle = lookup_input_bundle(ctx->ofproto, ctx->xin->flow.in_port,
+ ctx->xin->packet != NULL, &in_port);
if (!in_bundle) {
xlate_report(ctx, "no input bundle, dropping");
return;
}
/* Drop malformed frames. */
- if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
- !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->flow.dl_type == htons(ETH_TYPE_VLAN) &&
+ !(ctx->xin->flow.vlan_tci & htons(VLAN_CFI))) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
"VLAN tag received on port %s",
/* Drop frames on bundles reserved for mirroring. */
if (in_bundle->mirror_out) {
- if (ctx->packet != NULL) {
+ if (ctx->xin->packet != NULL) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
"%s, which is reserved exclusively for mirroring",
}
/* Check VLAN. */
- vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
- if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
+ vid = vlan_tci_to_vid(ctx->xin->flow.vlan_tci);
+ if (!input_vid_is_valid(vid, in_bundle, ctx->xin->packet != NULL)) {
xlate_report(ctx, "disallowed VLAN VID for this input port, dropping");
return;
}
}
/* Learn source MAC. */
- if (ctx->may_learn) {
- update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
+ if (ctx->xin->may_learn) {
+ update_learning_table(ctx->ofproto, &ctx->xin->flow, vlan, in_bundle);
}
/* Determine output bundle. */
- mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
- &ctx->tags);
+ mac = mac_learning_lookup(ctx->ofproto->ml, ctx->xin->flow.dl_dst, vlan,
+ &ctx->xout->tags);
if (mac) {
if (mac->port.p != in_bundle) {
xlate_report(ctx, "forwarding to learned port");
output_normal(ctx, bundle, vlan);
}
}
- ctx->nf_output_iface = NF_OUT_FLOOD;
+ ctx->xout->nf_output_iface = NF_OUT_FLOOD;
}
}
\f
* a few more, but not all of the facets or even all of the facets that
* resubmit to the table modified by MAC learning). */
-/* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
+/* Calculates the tag to use for 'flow' and mask 'mask' when it is inserted
* into an OpenFlow table with the given 'basis'. */
static tag_type
-rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc,
+rule_calculate_tag(const struct flow *flow, const struct minimask *mask,
uint32_t secret)
{
- if (flow_wildcards_is_catchall(wc)) {
+ if (minimask_is_catchall(mask)) {
return 0;
} else {
- struct flow tag_flow = *flow;
- flow_zero_wildcards(&tag_flow, wc);
- return tag_create_deterministic(flow_hash(&tag_flow, secret));
+ uint32_t hash = flow_hash_in_minimask(flow, mask, secret);
+ return tag_create_deterministic(hash);
}
}
if (table->catchall_table != catchall || table->other_table != other) {
table->catchall_table = catchall;
table->other_table = other;
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
table_update_taggable(ofproto, rule->up.table_id);
- if (!ofproto->need_revalidate) {
+ if (!ofproto->backer->need_revalidate) {
struct table_dpif *table = &ofproto->tables[rule->up.table_id];
if (table->other_table && rule->tag) {
- tag_set_add(&ofproto->revalidate_set, rule->tag);
+ tag_set_add(&ofproto->backer->revalidate_set, rule->tag);
} else {
- ofproto->need_revalidate = REV_FLOW_TABLE;
+ ofproto->backer->need_revalidate = REV_FLOW_TABLE;
}
}
}
enum ofp_config_flags frag_handling)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
-
if (frag_handling != OFPC_FRAG_REASM) {
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
return true;
} else {
return false;
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- enum ofperr error;
-
- if (flow->in_port >= ofproto->max_ports && flow->in_port < OFPP_MAX) {
- return OFPERR_NXBRC_BAD_IN_PORT;
- }
-
- error = ofpacts_check(ofpacts, ofpacts_len, flow, ofproto->max_ports);
- if (!error) {
- struct odputil_keybuf keybuf;
- struct dpif_flow_stats stats;
+ struct initial_vals initial_vals;
+ struct odputil_keybuf keybuf;
+ struct dpif_flow_stats stats;
+ struct xlate_out xout;
+ struct xlate_in xin;
+ struct ofpbuf key;
- struct ofpbuf key;
- struct action_xlate_ctx ctx;
- uint64_t odp_actions_stub[1024 / 8];
- struct ofpbuf odp_actions;
+ ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
+ odp_flow_key_from_flow(&key, flow,
+ ofp_port_to_odp_port(ofproto, flow->in_port));
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow);
+ dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- dpif_flow_stats_extract(flow, packet, &stats);
+ initial_vals.vlan_tci = flow->vlan_tci;
+ xlate_in_init(&xin, ofproto, flow, &initial_vals, NULL, stats.tcp_flags,
+ packet);
+ xin.resubmit_stats = &stats;
+ xin.ofpacts_len = ofpacts_len;
+ xin.ofpacts = ofpacts;
- action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
- packet_get_tcp_flags(packet, flow), packet);
- ctx.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
+ dpif_execute(ofproto->backer->dpif, key.data, key.size,
+ xout.odp_actions.data, xout.odp_actions.size, packet);
+ xlate_out_uninit(&xout);
- ofpbuf_use_stub(&odp_actions,
- odp_actions_stub, sizeof odp_actions_stub);
- xlate_actions(&ctx, ofpacts, ofpacts_len, &odp_actions);
- dpif_execute(ofproto->dpif, key.data, key.size,
- odp_actions.data, odp_actions.size, packet);
- ofpbuf_uninit(&odp_actions);
- }
- return error;
+ return 0;
}
\f
/* NetFlow. */
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
+ dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
}
static void
if (subfacet->path == SF_FAST_PATH) {
struct dpif_flow_stats stats;
- subfacet_reinstall(subfacet, &stats);
+ subfacet_install(subfacet, &facet->xout.odp_actions, &stats);
subfacet_update_stats(subfacet, &stats);
}
}
unixctl_command_reply_error(conn, "no such bridge");
return;
}
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
} else {
HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
- mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
+ mac_learning_flush(ofproto->ml, &ofproto->backer->revalidate_set);
}
}
}
struct trace_ctx {
- struct action_xlate_ctx ctx;
+ struct xlate_out xout;
+ struct xlate_in xin;
struct flow flow;
struct ds *result;
};
static void
trace_format_flow(struct ds *result, int level, const char *title,
- struct trace_ctx *trace)
+ struct trace_ctx *trace)
{
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
- if (flow_equal(&trace->ctx.flow, &trace->flow)) {
+ if (flow_equal(&trace->xin.flow, &trace->flow)) {
ds_put_cstr(result, "unchanged");
} else {
- flow_format(result, &trace->ctx.flow);
- trace->flow = trace->ctx.flow;
+ flow_format(result, &trace->xin.flow);
+ trace->flow = trace->xin.flow;
}
ds_put_char(result, '\n');
}
trace_format_odp(struct ds *result, int level, const char *title,
struct trace_ctx *trace)
{
- struct ofpbuf *odp_actions = trace->ctx.odp_actions;
+ struct ofpbuf *odp_actions = &trace->xout.odp_actions;
ds_put_char_multiple(result, '\t', level);
ds_put_format(result, "%s: ", title);
}
static void
-trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
+trace_resubmit(struct xlate_ctx *ctx, struct rule_dpif *rule)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
+ struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
struct ds *result = trace->result;
ds_put_char(result, '\n');
}
static void
-trace_report(struct action_xlate_ctx *ctx, const char *s)
+trace_report(struct xlate_ctx *ctx, const char *s)
{
- struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
+ struct trace_ctx *trace = CONTAINER_OF(ctx->xin, struct trace_ctx, xin);
struct ds *result = trace->result;
ds_put_char_multiple(result, '\t', ctx->recurse);
ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
void *aux OVS_UNUSED)
{
- const char *dpname = argv[1];
+ const struct dpif_backer *backer;
struct ofproto_dpif *ofproto;
struct ofpbuf odp_key;
struct ofpbuf *packet;
- ovs_be16 initial_tci;
+ struct initial_vals initial_vals;
struct ds result;
struct flow flow;
char *s;
packet = NULL;
- ofpbuf_init(&odp_key, 0);
+ backer = NULL;
ds_init(&result);
+ ofpbuf_init(&odp_key, 0);
- ofproto = ofproto_dpif_lookup(dpname);
- if (!ofproto) {
- unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
- "for help)");
- goto exit;
+ /* Handle "-generate" or a hex string as the last argument. */
+ if (!strcmp(argv[argc - 1], "-generate")) {
+ packet = ofpbuf_new(0);
+ argc--;
+ } else {
+ const char *error = eth_from_hex(argv[argc - 1], &packet);
+ if (!error) {
+ argc--;
+ } else if (argc == 4) {
+ /* The 3-argument form must end in "-generate' or a hex string. */
+ unixctl_command_reply_error(conn, error);
+ goto exit;
+ }
}
- if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
- /* ofproto/trace dpname flow [-generate] */
- const char *flow_s = argv[2];
- const char *generate_s = argv[3];
-
- /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
- * flow. We guess which type it is based on whether 'flow_s' contains
- * an '(', since a datapath flow always contains '(') but an
- * OpenFlow-like flow should not (in fact it's allowed but I believe
- * that's not documented anywhere).
- *
- * An alternative would be to try to parse 'flow_s' both ways, but then
- * it would be tricky giving a sensible error message. After all, do
- * you just say "syntax error" or do you present both error messages?
- * Both choices seem lousy. */
- if (strchr(flow_s, '(')) {
- int error;
- /* Convert string to datapath key. */
- ofpbuf_init(&odp_key, 0);
- error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
- if (error) {
- unixctl_command_reply_error(conn, "Bad flow syntax");
- goto exit;
+ /* Parse the flow and determine whether a datapath or
+ * bridge is specified. If function odp_flow_key_from_string()
+ * returns 0, the flow is a odp_flow. If function
+ * parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
+ if (!odp_flow_key_from_string(argv[argc - 1], NULL, &odp_key)) {
+ /* If the odp_flow is the second argument,
+ * the datapath name is the first argument. */
+ if (argc == 3) {
+ const char *dp_type;
+ if (!strncmp(argv[1], "ovs-", 4)) {
+ dp_type = argv[1] + 4;
+ } else {
+ dp_type = argv[1];
}
-
- /* Convert odp_key to flow. */
- error = ofproto_dpif_extract_flow_key(ofproto, odp_key.data,
- odp_key.size, &flow,
- &initial_tci, NULL);
- if (error == ODP_FIT_ERROR) {
- unixctl_command_reply_error(conn, "Invalid flow");
+ backer = shash_find_data(&all_dpif_backers, dp_type);
+ if (!backer) {
+ unixctl_command_reply_error(conn, "Cannot find datapath "
+ "of this name");
goto exit;
}
} else {
- char *error_s;
-
- error_s = parse_ofp_exact_flow(&flow, argv[2]);
- if (error_s) {
- unixctl_command_reply_error(conn, error_s);
- free(error_s);
+ /* No datapath name specified, so there should be only one
+ * datapath. */
+ struct shash_node *node;
+ if (shash_count(&all_dpif_backers) != 1) {
+ unixctl_command_reply_error(conn, "Must specify datapath "
+ "name, there is more than one type of datapath");
goto exit;
}
-
- initial_tci = flow.vlan_tci;
- vsp_adjust_flow(ofproto, &flow);
+ node = shash_first(&all_dpif_backers);
+ backer = node->data;
}
- /* Generate a packet, if requested. */
- if (generate_s) {
- packet = ofpbuf_new(0);
- flow_compose(packet, &flow);
+ /* Extract the ofproto_dpif object from the ofproto_receive()
+ * function. */
+ if (ofproto_receive(backer, NULL, odp_key.data,
+ odp_key.size, &flow, NULL, &ofproto, NULL,
+ &initial_vals)) {
+ unixctl_command_reply_error(conn, "Invalid datapath flow");
+ goto exit;
}
- } else if (argc == 6) {
- /* ofproto/trace dpname priority tun_id in_port packet */
- const char *priority_s = argv[2];
- const char *tun_id_s = argv[3];
- const char *in_port_s = argv[4];
- const char *packet_s = argv[5];
- uint16_t in_port = ofp_port_to_odp_port(atoi(in_port_s));
- ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
- uint32_t priority = atoi(priority_s);
- const char *msg;
-
- msg = eth_from_hex(packet_s, &packet);
- if (msg) {
- unixctl_command_reply_error(conn, msg);
+ ds_put_format(&result, "Bridge: %s\n", ofproto->up.name);
+ } else if (!parse_ofp_exact_flow(&flow, argv[argc - 1])) {
+ if (argc != 3) {
+ unixctl_command_reply_error(conn, "Must specify bridge name");
goto exit;
}
- ds_put_cstr(&result, "Packet: ");
- s = ofp_packet_to_string(packet->data, packet->size);
- ds_put_cstr(&result, s);
- free(s);
-
- flow_extract(packet, priority, tun_id, in_port, &flow);
- initial_tci = flow.vlan_tci;
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "Unknown bridge name");
+ goto exit;
+ }
+ initial_vals.vlan_tci = flow.vlan_tci;
} else {
- unixctl_command_reply_error(conn, "Bad command syntax");
+ unixctl_command_reply_error(conn, "Bad flow syntax");
goto exit;
}
- ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
+ /* Generate a packet, if requested. */
+ if (packet) {
+ if (!packet->size) {
+ flow_compose(packet, &flow);
+ } else {
+ ds_put_cstr(&result, "Packet: ");
+ s = ofp_packet_to_string(packet->data, packet->size);
+ ds_put_cstr(&result, s);
+ free(s);
+
+ /* Use the metadata from the flow and the packet argument
+ * to reconstruct the flow. */
+ flow_extract(packet, flow.skb_priority, flow.skb_mark, NULL,
+ flow.in_port, &flow);
+ initial_vals.vlan_tci = flow.vlan_tci;
+ }
+ }
+
+ ofproto_trace(ofproto, &flow, packet, &initial_vals, &result);
unixctl_command_reply(conn, ds_cstr(&result));
exit:
static void
ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct ofpbuf *packet, ovs_be16 initial_tci,
- struct ds *ds)
+ const struct ofpbuf *packet,
+ const struct initial_vals *initial_vals, struct ds *ds)
{
struct rule_dpif *rule;
trace.flow = *flow;
ofpbuf_use_stub(&odp_actions,
odp_actions_stub, sizeof odp_actions_stub);
- action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
- rule, tcp_flags, packet);
- trace.ctx.resubmit_hook = trace_resubmit;
- trace.ctx.report_hook = trace_report;
- xlate_actions(&trace.ctx, rule->up.ofpacts, rule->up.ofpacts_len,
- &odp_actions);
+ xlate_in_init(&trace.xin, ofproto, flow, initial_vals, rule, tcp_flags,
+ packet);
+ trace.xin.resubmit_hook = trace_resubmit;
+ trace.xin.report_hook = trace_report;
+ xlate_actions(&trace.xin, &trace.xout);
ds_put_char(ds, '\n');
trace_format_flow(ds, 0, "Final flow", &trace);
ds_put_cstr(ds, "Datapath actions: ");
- format_odp_actions(ds, odp_actions.data, odp_actions.size);
- ofpbuf_uninit(&odp_actions);
-
- if (trace.ctx.slow) {
- enum slow_path_reason slow;
+ format_odp_actions(ds, trace.xout.odp_actions.data,
+ trace.xout.odp_actions.size);
+ if (trace.xout.slow) {
ds_put_cstr(ds, "\nThis flow is handled by the userspace "
"slow path because it:");
- for (slow = trace.ctx.slow; slow; ) {
- enum slow_path_reason bit = rightmost_1bit(slow);
-
- switch (bit) {
- case SLOW_CFM:
- ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
- break;
- case SLOW_LACP:
- ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
- break;
- case SLOW_STP:
- ds_put_cstr(ds, "\n\t- Consists of STP packets.");
- break;
- case SLOW_IN_BAND:
- ds_put_cstr(ds, "\n\t- Needs in-band special case "
- "processing.");
- if (!packet) {
- ds_put_cstr(ds, "\n\t (The datapath actions are "
- "incomplete--for complete actions, "
- "please supply a packet.)");
- }
- break;
- case SLOW_CONTROLLER:
- ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
- "to the OpenFlow controller.");
- break;
- case SLOW_MATCH:
- ds_put_cstr(ds, "\n\t- Needs more specific matching "
- "than the datapath supports.");
- break;
- }
-
- slow &= ~bit;
- }
-
- if (slow & ~SLOW_MATCH) {
- ds_put_cstr(ds, "\nThe datapath actions above do not reflect "
- "the special slow-path processing.");
+ switch (trace.xout.slow) {
+ case SLOW_CFM:
+ ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
+ break;
+ case SLOW_LACP:
+ ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
+ break;
+ case SLOW_STP:
+ ds_put_cstr(ds, "\n\t- Consists of STP packets.");
+ break;
+ case SLOW_BFD:
+ ds_put_cstr(ds, "\n\t- Consists of BFD packets.");
+ break;
+ case SLOW_CONTROLLER:
+ ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
+ "to the OpenFlow controller.");
+ break;
+ case __SLOW_MAX:
+ NOT_REACHED();
}
}
+
+ xlate_out_uninit(&trace.xout);
}
}
}
}
if (errors) {
- ofproto->need_revalidate = REV_INCONSISTENCY;
+ ofproto->backer->need_revalidate = REV_INCONSISTENCY;
}
if (errors) {
ds_destroy(&reply);
}
+/* Store the current ofprotos in 'ofproto_shash'. Returns a sorted list
+ * of the 'ofproto_shash' nodes. It is the responsibility of the caller
+ * to destroy 'ofproto_shash' and free the returned value. */
+static const struct shash_node **
+get_ofprotos(struct shash *ofproto_shash)
+{
+ const struct ofproto_dpif *ofproto;
+
+ HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
+ char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
+ shash_add_nocopy(ofproto_shash, name, ofproto);
+ }
+
+ return shash_sort(ofproto_shash);
+}
+
+static void
+ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
+ const char *argv[] OVS_UNUSED,
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ struct shash ofproto_shash;
+ const struct shash_node **sorted_ofprotos;
+ int i;
+
+ shash_init(&ofproto_shash);
+ sorted_ofprotos = get_ofprotos(&ofproto_shash);
+ for (i = 0; i < shash_count(&ofproto_shash); i++) {
+ const struct shash_node *node = sorted_ofprotos[i];
+ ds_put_format(&ds, "%s\n", node->name);
+ }
+
+ shash_destroy(&ofproto_shash);
+ free(sorted_ofprotos);
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+show_dp_format(const struct ofproto_dpif *ofproto, struct ds *ds)
+{
+ const struct shash_node **ports;
+ int i;
+ struct avg_subfacet_rates lifetime;
+ unsigned long long int minutes;
+ const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+
+ minutes = (time_msec() - ofproto->created) / min_ms;
+
+ if (minutes > 0) {
+ lifetime.add_rate = (double)ofproto->total_subfacet_add_count
+ / minutes;
+ lifetime.del_rate = (double)ofproto->total_subfacet_del_count
+ / minutes;
+ }else {
+ lifetime.add_rate = 0.0;
+ lifetime.del_rate = 0.0;
+ }
+
+ ds_put_format(ds, "%s (%s):\n", ofproto->up.name,
+ dpif_name(ofproto->backer->dpif));
+ ds_put_format(ds,
+ "\tlookups: hit:%"PRIu64" missed:%"PRIu64"\n",
+ ofproto->n_hit, ofproto->n_missed);
+ ds_put_format(ds, "\tflows: cur: %zu, avg: %5.3f, max: %d,"
+ " life span: %llu(ms)\n",
+ hmap_count(&ofproto->subfacets),
+ avg_subfacet_count(ofproto),
+ ofproto->max_n_subfacet,
+ avg_subfacet_life_span(ofproto));
+ if (minutes >= 60) {
+ show_dp_rates(ds, "\t\thourly avg:", &ofproto->hourly);
+ }
+ if (minutes >= 60 * 24) {
+ show_dp_rates(ds, "\t\tdaily avg:", &ofproto->daily);
+ }
+ show_dp_rates(ds, "\t\toverall avg:", &lifetime);
+
+ ports = shash_sort(&ofproto->up.port_by_name);
+ for (i = 0; i < shash_count(&ofproto->up.port_by_name); i++) {
+ const struct shash_node *node = ports[i];
+ struct ofport *ofport = node->data;
+ const char *name = netdev_get_name(ofport->netdev);
+ const char *type = netdev_get_type(ofport->netdev);
+ uint32_t odp_port;
+
+ ds_put_format(ds, "\t%s %u/", name, ofport->ofp_port);
+
+ odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
+ if (odp_port != OVSP_NONE) {
+ ds_put_format(ds, "%"PRIu32":", odp_port);
+ } else {
+ ds_put_cstr(ds, "none:");
+ }
+
+ if (strcmp(type, "system")) {
+ struct netdev *netdev;
+ int error;
+
+ ds_put_format(ds, " (%s", type);
+
+ error = netdev_open(name, type, &netdev);
+ if (!error) {
+ struct smap config;
+
+ smap_init(&config);
+ error = netdev_get_config(netdev, &config);
+ if (!error) {
+ const struct smap_node **nodes;
+ size_t i;
+
+ nodes = smap_sort(&config);
+ for (i = 0; i < smap_count(&config); i++) {
+ const struct smap_node *node = nodes[i];
+ ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
+ node->key, node->value);
+ }
+ free(nodes);
+ }
+ smap_destroy(&config);
+
+ netdev_close(netdev);
+ }
+ ds_put_char(ds, ')');
+ }
+ ds_put_char(ds, '\n');
+ }
+ free(ports);
+}
+
+static void
+ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc,
+ const char *argv[], void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+
+ if (argc > 1) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ ofproto = ofproto_dpif_lookup(argv[i]);
+ if (!ofproto) {
+ ds_put_format(&ds, "Unknown bridge %s (use dpif/dump-dps "
+ "for help)", argv[i]);
+ unixctl_command_reply_error(conn, ds_cstr(&ds));
+ return;
+ }
+ show_dp_format(ofproto, &ds);
+ }
+ } else {
+ struct shash ofproto_shash;
+ const struct shash_node **sorted_ofprotos;
+ int i;
+
+ shash_init(&ofproto_shash);
+ sorted_ofprotos = get_ofprotos(&ofproto_shash);
+ for (i = 0; i < shash_count(&ofproto_shash); i++) {
+ const struct shash_node *node = sorted_ofprotos[i];
+ show_dp_format(node->data, &ds);
+ }
+
+ shash_destroy(&ofproto_shash);
+ free(sorted_ofprotos);
+ }
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[],
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ const struct ofproto_dpif *ofproto;
+ struct subfacet *subfacet;
+
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ update_stats(ofproto->backer);
+
+ HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
+ struct facet *facet = subfacet->facet;
+
+ odp_flow_key_format(subfacet->key, subfacet->key_len, &ds);
+
+ ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
+ subfacet->dp_packet_count, subfacet->dp_byte_count);
+ if (subfacet->used) {
+ ds_put_format(&ds, "%.3fs",
+ (time_msec() - subfacet->used) / 1000.0);
+ } else {
+ ds_put_format(&ds, "never");
+ }
+ if (subfacet->facet->tcp_flags) {
+ ds_put_cstr(&ds, ", flags:");
+ packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
+ }
+
+ ds_put_cstr(&ds, ", actions:");
+ if (facet->xout.slow) {
+ uint64_t slow_path_stub[128 / 8];
+ const struct nlattr *actions;
+ size_t actions_len;
+
+ compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
+ slow_path_stub, sizeof slow_path_stub,
+ &actions, &actions_len);
+ format_odp_actions(&ds, actions, actions_len);
+ } else {
+ format_odp_actions(&ds, facet->xout.odp_actions.data,
+ facet->xout.odp_actions.size);
+ }
+ ds_put_char(&ds, '\n');
+ }
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
+static void
+ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
+ int argc OVS_UNUSED, const char *argv[],
+ void *aux OVS_UNUSED)
+{
+ struct ds ds = DS_EMPTY_INITIALIZER;
+ struct ofproto_dpif *ofproto;
+
+ ofproto = ofproto_dpif_lookup(argv[1]);
+ if (!ofproto) {
+ unixctl_command_reply_error(conn, "no such bridge");
+ return;
+ }
+
+ flush(&ofproto->up);
+
+ unixctl_command_reply(conn, ds_cstr(&ds));
+ ds_destroy(&ds);
+}
+
static void
ofproto_dpif_unixctl_init(void)
{
unixctl_command_register(
"ofproto/trace",
- "bridge {tun_id in_port packet | odp_flow [-generate]}",
- 2, 5, ofproto_unixctl_trace, NULL);
+ "[dp_name]|bridge odp_flow|br_flow [-generate|packet]",
+ 1, 3, ofproto_unixctl_trace, NULL);
unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
ofproto_unixctl_fdb_flush, NULL);
unixctl_command_register("fdb/show", "bridge", 1, 1,
ofproto_dpif_unclog, NULL);
unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
ofproto_dpif_self_check, NULL);
+ unixctl_command_register("dpif/dump-dps", "", 0, 0,
+ ofproto_unixctl_dpif_dump_dps, NULL);
+ unixctl_command_register("dpif/show", "[bridge]", 0, INT_MAX,
+ ofproto_unixctl_dpif_show, NULL);
+ unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
+ ofproto_unixctl_dpif_dump_flows, NULL);
+ unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
+ ofproto_unixctl_dpif_del_flows, NULL);
}
\f
/* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
return 0;
}
- ofproto->need_revalidate = REV_RECONFIGURE;
+ ofproto->backer->need_revalidate = REV_RECONFIGURE;
if (ofport->realdev_ofp_port) {
vsp_remove(ofport);
return hash_2words(realdev_ofp_port, vid);
}
-/* Returns the ODP port number of the Linux VLAN device that corresponds to
- * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
- * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
- * it would return the port number of eth0.9.
+/* Returns the OFP port number of the Linux VLAN device that corresponds to
+ * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
+ * 'struct ofport_dpif'. For example, given 'realdev_ofp_port' of eth0 and
+ * 'vlan_tci' 9, it would return the port number of eth0.9.
*
- * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
- * function just returns its 'realdev_odp_port' argument. */
-static uint32_t
+ * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
+ * function just returns its 'realdev_ofp_port' argument. */
+static uint16_t
vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
- uint32_t realdev_odp_port, ovs_be16 vlan_tci)
+ uint16_t realdev_ofp_port, ovs_be16 vlan_tci)
{
if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
- uint16_t realdev_ofp_port = odp_port_to_ofp_port(realdev_odp_port);
int vid = vlan_tci_to_vid(vlan_tci);
const struct vlan_splinter *vsp;
&ofproto->realdev_vid_map) {
if (vsp->realdev_ofp_port == realdev_ofp_port
&& vsp->vid == vid) {
- return ofp_port_to_odp_port(vsp->vlandev_ofp_port);
+ return vsp->vlandev_ofp_port;
}
}
}
- return realdev_odp_port;
+ return realdev_ofp_port;
}
static struct vlan_splinter *
VLOG_ERR("duplicate vlan device record");
}
}
-\f
+
+static uint32_t
+ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, uint16_t ofp_port)
+{
+ const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
+ return ofport ? ofport->odp_port : OVSP_NONE;
+}
+
+static struct ofport_dpif *
+odp_port_to_ofport(const struct dpif_backer *backer, uint32_t odp_port)
+{
+ struct ofport_dpif *port;
+
+ HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node,
+ hash_int(odp_port, 0),
+ &backer->odp_to_ofport_map) {
+ if (port->odp_port == odp_port) {
+ return port;
+ }
+ }
+
+ return NULL;
+}
+
+static uint16_t
+odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, uint32_t odp_port)
+{
+ struct ofport_dpif *port;
+
+ port = odp_port_to_ofport(ofproto->backer, odp_port);
+ if (port && &ofproto->up == port->up.ofproto) {
+ return port->up.ofp_port;
+ } else {
+ return OFPP_NONE;
+ }
+}
+static unsigned long long int
+avg_subfacet_life_span(const struct ofproto_dpif *ofproto)
+{
+ unsigned long long int dc;
+ unsigned long long int avg;
+
+ dc = ofproto->total_subfacet_del_count + ofproto->subfacet_del_count;
+ avg = dc ? ofproto->total_subfacet_life_span / dc : 0;
+
+ return avg;
+}
+
+static double
+avg_subfacet_count(const struct ofproto_dpif *ofproto)
+{
+ double avg_c = 0.0;
+
+ if (ofproto->n_update_stats) {
+ avg_c = (double)ofproto->total_subfacet_count
+ / ofproto->n_update_stats;
+ }
+
+ return avg_c;
+}
+
+static void
+show_dp_rates(struct ds *ds, const char *heading,
+ const struct avg_subfacet_rates *rates)
+{
+ ds_put_format(ds, "%s add rate: %5.3f/min, del rate: %5.3f/min\n",
+ heading, rates->add_rate, rates->del_rate);
+}
+
+static void
+update_max_subfacet_count(struct ofproto_dpif *ofproto)
+{
+ ofproto->max_n_subfacet = MAX(ofproto->max_n_subfacet,
+ hmap_count(&ofproto->subfacets));
+}
+
+/* Compute exponentially weighted moving average, adding 'new' as the newest,
+ * most heavily weighted element. 'base' designates the rate of decay: after
+ * 'base' further updates, 'new''s weight in the EWMA decays to about 1/e
+ * (about .37). */
+static void
+exp_mavg(double *avg, int base, double new)
+{
+ *avg = (*avg * (base - 1) + new) / base;
+}
+
+static void
+update_moving_averages(struct ofproto_dpif *ofproto)
+{
+ const int min_ms = 60 * 1000; /* milliseconds in one minute. */
+
+ /* Update hourly averages on the minute boundaries. */
+ if (time_msec() - ofproto->last_minute >= min_ms) {
+ exp_mavg(&ofproto->hourly.add_rate, 60, ofproto->subfacet_add_count);
+ exp_mavg(&ofproto->hourly.del_rate, 60, ofproto->subfacet_del_count);
+
+ /* Update daily averages on the hour boundaries. */
+ if ((ofproto->last_minute - ofproto->created) / min_ms % 60 == 59) {
+ exp_mavg(&ofproto->daily.add_rate, 24, ofproto->hourly.add_rate);
+ exp_mavg(&ofproto->daily.del_rate, 24, ofproto->hourly.del_rate);
+ }
+
+ ofproto->total_subfacet_add_count += ofproto->subfacet_add_count;
+ ofproto->total_subfacet_del_count += ofproto->subfacet_del_count;
+ ofproto->subfacet_add_count = 0;
+ ofproto->subfacet_del_count = 0;
+ ofproto->last_minute += min_ms;
+ }
+}
+
const struct ofproto_class ofproto_dpif_class = {
+ init,
enumerate_types,
enumerate_names,
del,
+ port_open_type,
+ type_run,
+ type_run_fast,
+ type_wait,
alloc,
construct,
destruct,
set_netflow,
get_netflow_ids,
set_sflow,
+ set_ipfix,
set_cfm,
- get_cfm_fault,
- get_cfm_opup,
- get_cfm_remote_mpids,
- get_cfm_health,
+ get_cfm_status,
+ set_bfd,
+ get_bfd_status,
set_stp,
get_stp_status,
set_stp_port,
set_flood_vlans,
is_mirror_output_bundle,
forward_bpdu_changed,
- set_mac_idle_time,
+ set_mac_table_config,
set_realdev,
};