}
static void
-dp_netdev_output_port(void *dp_, struct ofpbuf *packet, uint32_t out_port)
+dp_netdev_output_port(void *dp_, struct ofpbuf *packet,
+ const struct flow *flow OVS_UNUSED, odp_port_t out_port)
{
struct dp_netdev *dp = dp_;
- struct dp_netdev_port *p = dp->ports[out_port];
+ struct dp_netdev_port *p = dp->ports[odp_to_u32(out_port)];
if (p) {
netdev_send(p->netdev, packet);
}
static void
dp_netdev_action_userspace(void *dp, struct ofpbuf *packet,
const struct flow *key,
- const struct nlattr *userdata)
+ const struct nlattr *a)
{
+ const struct nlattr *userdata;
+
+ userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
dp_netdev_output_userspace(dp, packet, DPIF_UC_ACTION, key, userdata);
}
#include "flow.h"
#include "netdev.h"
#include "netlink.h"
+#include "odp-execute.h"
#include "odp-util.h"
#include "ofp-errors.h"
#include "ofp-print.h"
COVERAGE_DEFINE(dpif_flow_del);
COVERAGE_DEFINE(dpif_execute);
COVERAGE_DEFINE(dpif_purge);
+COVERAGE_DEFINE(dpif_execute_with_help);
static const struct dpif_class *base_dpif_classes[] = {
#ifdef LINUX_DATAPATH
return dump->error == EOF ? 0 : dump->error;
}
+struct dpif_execute_helper_aux {
+ struct dpif *dpif;
+ int error;
+};
+
+static void
+dpif_execute_helper_execute__(void *aux_, struct ofpbuf *packet,
+ const struct flow *flow,
+ const struct nlattr *actions, size_t actions_len)
+{
+ struct dpif_execute_helper_aux *aux = aux_;
+ struct dpif_execute execute;
+ struct odputil_keybuf key_stub;
+ struct ofpbuf key;
+ int error;
+
+ ofpbuf_use_stub(&key, &key_stub, sizeof key_stub);
+ odp_flow_key_from_flow(&key, flow, flow->in_port.odp_port);
+
+ execute.key = key.data;
+ execute.key_len = key.size;
+ execute.actions = actions;
+ execute.actions_len = actions_len;
+ execute.packet = packet;
+ execute.needs_help = false;
+
+ error = aux->dpif->dpif_class->execute(aux->dpif, &execute);
+ if (error) {
+ aux->error = error;
+ }
+}
+
+static void
+dpif_execute_helper_output_cb(void *aux, struct ofpbuf *packet,
+ const struct flow *flow, odp_port_t out_port)
+{
+ uint64_t actions_stub[DIV_ROUND_UP(NL_A_U32_SIZE, 8)];
+ struct ofpbuf actions;
+
+ ofpbuf_use_stack(&actions, actions_stub, sizeof actions_stub);
+ nl_msg_put_u32(&actions, OVS_ACTION_ATTR_OUTPUT, odp_to_u32(out_port));
+
+ dpif_execute_helper_execute__(aux, packet, flow,
+ actions.data, actions.size);
+}
+
+static void
+dpif_execute_helper_userspace_cb(void *aux, struct ofpbuf *packet,
+ const struct flow *flow,
+ const struct nlattr *action)
+{
+ dpif_execute_helper_execute__(aux, packet, flow,
+ action, NLA_ALIGN(action->nla_len));
+}
+
+/* Executes 'execute' by performing most of the actions in userspace and
+ * passing the fully constructed packets to 'dpif' for output and userspace
+ * actions.
+ *
+ * This helps with actions that a given 'dpif' doesn't implement directly. */
+static int
+dpif_execute_with_help(struct dpif *dpif, const struct dpif_execute *execute)
+{
+ struct dpif_execute_helper_aux aux;
+ enum odp_key_fitness fit;
+ struct ofpbuf *packet;
+ struct flow flow;
+
+ COVERAGE_INC(dpif_execute_with_help);
+
+ fit = odp_flow_key_to_flow(execute->key, execute->key_len, &flow);
+ if (fit == ODP_FIT_ERROR) {
+ return EINVAL;
+ }
+
+ aux.dpif = dpif;
+ aux.error = 0;
+
+ packet = ofpbuf_clone_with_headroom(execute->packet, VLAN_HEADER_LEN);
+ odp_execute_actions(&aux, packet, &flow,
+ execute->actions, execute->actions_len,
+ dpif_execute_helper_output_cb,
+ dpif_execute_helper_userspace_cb);
+ ofpbuf_delete(packet);
+
+ return aux.error;
+}
+
static int
dpif_execute__(struct dpif *dpif, const struct dpif_execute *execute)
{
COVERAGE_INC(dpif_execute);
if (execute->actions_len > 0) {
- error = dpif->dpif_class->execute(dpif, execute);
+ error = (execute->needs_help
+ ? dpif_execute_with_help(dpif, execute)
+ : dpif->dpif_class->execute(dpif, execute));
} else {
error = 0;
}
* it contains some metadata that cannot be recovered from 'packet', such as
* tunnel and in_port.)
*
+ * Some dpif providers do not implement every action. The Linux kernel
+ * datapath, in particular, does not implement ARP field modification. If
+ * 'needs_help' is true, the dpif layer executes in userspace all of the
+ * actions that it can, and for OVS_ACTION_ATTR_OUTPUT and
+ * OVS_ACTION_ATTR_USERSPACE actions it passes the packet through to the dpif
+ * implementation.
+ *
* Returns 0 if successful, otherwise a positive errno value. */
int
dpif_execute(struct dpif *dpif,
const struct nlattr *key, size_t key_len,
const struct nlattr *actions, size_t actions_len,
- const struct ofpbuf *buf)
+ const struct ofpbuf *buf,
+ bool needs_help)
{
struct dpif_execute execute;
execute.actions = actions;
execute.actions_len = actions_len;
execute.packet = buf;
+ execute.needs_help = needs_help;
return dpif_execute__(dpif, &execute);
}
void
dpif_operate(struct dpif *dpif, struct dpif_op **ops, size_t n_ops)
{
- size_t i;
-
if (dpif->dpif_class->operate) {
- dpif->dpif_class->operate(dpif, ops, n_ops);
+ while (n_ops > 0) {
+ size_t chunk;
+
+ /* Count 'chunk', the number of ops that can be executed without
+ * needing any help. Ops that need help should be rare, so we
+ * expect this to ordinarily be 'n_ops', that is, all the ops. */
+ for (chunk = 0; chunk < n_ops; chunk++) {
+ struct dpif_op *op = ops[chunk];
+
+ if (op->type == DPIF_OP_EXECUTE && op->u.execute.needs_help) {
+ break;
+ }
+ }
+
+ if (chunk) {
+ /* Execute a chunk full of ops that the dpif provider can
+ * handle itself, without help. */
+ size_t i;
+
+ dpif->dpif_class->operate(dpif, ops, chunk);
+
+ for (i = 0; i < chunk; i++) {
+ struct dpif_op *op = ops[i];
+
+ switch (op->type) {
+ case DPIF_OP_FLOW_PUT:
+ log_flow_put_message(dpif, &op->u.flow_put, op->error);
+ break;
+
+ case DPIF_OP_FLOW_DEL:
+ log_flow_del_message(dpif, &op->u.flow_del, op->error);
+ break;
+
+ case DPIF_OP_EXECUTE:
+ log_execute_message(dpif, &op->u.execute, op->error);
+ break;
+ }
+ }
+
+ ops += chunk;
+ n_ops -= chunk;
+ } else {
+ /* Help the dpif provider to execute one op. */
+ struct dpif_op *op = ops[0];
+
+ op->error = dpif_execute__(dpif, &op->u.execute);
+ ops++;
+ n_ops--;
+ }
+ }
+ } else {
+ size_t i;
for (i = 0; i < n_ops; i++) {
struct dpif_op *op = ops[i];
switch (op->type) {
case DPIF_OP_FLOW_PUT:
- log_flow_put_message(dpif, &op->u.flow_put, op->error);
+ op->error = dpif_flow_put__(dpif, &op->u.flow_put);
break;
case DPIF_OP_FLOW_DEL:
- log_flow_del_message(dpif, &op->u.flow_del, op->error);
+ op->error = dpif_flow_del__(dpif, &op->u.flow_del);
break;
case DPIF_OP_EXECUTE:
- log_execute_message(dpif, &op->u.execute, op->error);
+ op->error = dpif_execute__(dpif, &op->u.execute);
break;
- }
- }
- return;
- }
-
- for (i = 0; i < n_ops; i++) {
- struct dpif_op *op = ops[i];
-
- switch (op->type) {
- case DPIF_OP_FLOW_PUT:
- op->error = dpif_flow_put__(dpif, &op->u.flow_put);
- break;
-
- case DPIF_OP_FLOW_DEL:
- op->error = dpif_flow_del__(dpif, &op->u.flow_del);
- break;
- case DPIF_OP_EXECUTE:
- op->error = dpif_execute__(dpif, &op->u.execute);
- break;
-
- default:
- NOT_REACHED();
+ default:
+ NOT_REACHED();
+ }
}
}
}
-
/* Returns a string that represents 'type', for use in log messages. */
const char *
dpif_upcall_type_to_string(enum dpif_upcall_type type)
int dpif_execute(struct dpif *,
const struct nlattr *key, size_t key_len,
const struct nlattr *actions, size_t actions_len,
- const struct ofpbuf *);
+ const struct ofpbuf *,
+ bool needs_help);
\f
/* Operation batching interface.
*
};
struct dpif_execute {
+ /* Raw support for execute passed along to the provider. */
const struct nlattr *key; /* Partial flow key (only for metadata). */
size_t key_len; /* Length of 'key' in bytes. */
const struct nlattr *actions; /* Actions to execute on packet. */
size_t actions_len; /* Length of 'actions' in bytes. */
const struct ofpbuf *packet; /* Packet to execute. */
+
+ /* Some dpif providers do not implement every action. The Linux kernel
+ * datapath, in particular, does not implement ARP field modification.
+ *
+ * If this member is set to true, the dpif layer executes in userspace all
+ * of the actions that it can, and for OVS_ACTION_ATTR_OUTPUT and
+ * OVS_ACTION_ATTR_USERSPACE actions it passes the packet through to the
+ * dpif implementation. */
+ bool needs_help;
};
struct dpif_op {
MFM_NONE,
MFS_DECIMAL,
MFP_ARP,
- false,
+ true,
NXM_OF_ARP_OP, "NXM_OF_ARP_OP",
OXM_OF_ARP_OP, "OXM_OF_ARP_OP",
OFPUTIL_P_ANY,
MFM_FULLY,
MFS_IPV4,
MFP_ARP,
- false,
+ true,
NXM_OF_ARP_SPA, "NXM_OF_ARP_SPA",
OXM_OF_ARP_SPA, "OXM_OF_ARP_SPA",
OFPUTIL_P_ANY,
MFM_FULLY,
MFS_IPV4,
MFP_ARP,
- false,
+ true,
NXM_OF_ARP_TPA, "NXM_OF_ARP_TPA",
OXM_OF_ARP_TPA, "OXM_OF_ARP_TPA",
OFPUTIL_P_ANY,
MFM_FULLY,
MFS_ETHERNET,
MFP_ARP,
- false,
+ true,
NXM_NX_ARP_SHA, "NXM_NX_ARP_SHA",
OXM_OF_ARP_SHA, "OXM_OF_ARP_SHA",
OFPUTIL_P_NXM_OXM_ANY,
MFM_FULLY,
MFS_ETHERNET,
MFP_ARP,
- false,
+ true,
NXM_NX_ARP_THA, "NXM_NX_ARP_THA",
OXM_OF_ARP_THA, "OXM_OF_ARP_THA",
OFPUTIL_P_NXM_OXM_ANY,
break;
case MFF_IPV6_LABEL:
- value->be32 &= ~htonl(IPV6_LABEL_MASK);
+ value->be32 &= htonl(IPV6_LABEL_MASK);
break;
case MFF_IP_DSCP:
#include "ofp-print.h"
#include "ofpbuf.h"
#include "packets.h"
+#include "pcap-file.h"
#include "poll-loop.h"
#include "shash.h"
#include "sset.h"
#include "stream.h"
#include "unaligned.h"
+#include "timeval.h"
#include "unixctl.h"
#include "vlog.h"
struct dummy_stream *streams OVS_GUARDED;
size_t n_streams OVS_GUARDED;
+ FILE *tx_pcap, *rx_pcap OVS_GUARDED;
+
struct list rxes OVS_GUARDED; /* List of child "netdev_rx_dummy"s. */
};
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
ovs_mutex_lock(&netdev->mutex);
+
if (netdev->ifindex >= 0) {
smap_add_format(args, "ifindex", "%d", netdev->ifindex);
}
+
if (netdev->pstream) {
smap_add(args, "pstream", pstream_get_name(netdev->pstream));
}
- ovs_mutex_unlock(&netdev->mutex);
+ ovs_mutex_unlock(&netdev->mutex);
return 0;
}
{
struct netdev_dummy *netdev = netdev_dummy_cast(netdev_);
const char *pstream;
+ const char *pcap;
ovs_mutex_lock(&netdev->mutex);
netdev->ifindex = smap_get_int(args, "ifindex", -EOPNOTSUPP);
}
}
}
+
+ if (netdev->rx_pcap) {
+ fclose(netdev->rx_pcap);
+ }
+ if (netdev->tx_pcap && netdev->tx_pcap != netdev->rx_pcap) {
+ fclose(netdev->tx_pcap);
+ }
+ netdev->rx_pcap = netdev->tx_pcap = NULL;
+ pcap = smap_get(args, "pcap");
+ if (pcap) {
+ netdev->rx_pcap = netdev->tx_pcap = pcap_open(pcap, "ab");
+ } else {
+ const char *rx_pcap = smap_get(args, "rx_pcap");
+ const char *tx_pcap = smap_get(args, "tx_pcap");
+
+ if (rx_pcap) {
+ netdev->rx_pcap = pcap_open(rx_pcap, "ab");
+ }
+ if (tx_pcap) {
+ netdev->tx_pcap = pcap_open(tx_pcap, "ab");
+ }
+ }
+
ovs_mutex_unlock(&netdev->mutex);
return 0;
dev->stats.tx_packets++;
dev->stats.tx_bytes += size;
+ if (dev->tx_pcap) {
+ struct ofpbuf packet;
+
+ ofpbuf_use_const(&packet, buffer, size);
+ pcap_write(dev->tx_pcap, &packet);
+ fflush(dev->tx_pcap);
+ }
+
for (i = 0; i < dev->n_streams; i++) {
struct dummy_stream *s = &dev->streams[i];
static void
netdev_dummy_queue_packet(struct netdev_dummy *dummy, struct ofpbuf *packet)
+ OVS_REQUIRES(dummy->mutex)
{
struct netdev_rx_dummy *rx, *prev;
+ if (dummy->rx_pcap) {
+ pcap_write(dummy->rx_pcap, packet);
+ fflush(dummy->rx_pcap);
+ }
prev = NULL;
LIST_FOR_EACH (rx, node, &dummy->rxes) {
if (rx->recv_queue_len < NETDEV_DUMMY_MAX_QUEUE) {
#include "ofpbuf.h"
#include "odp-util.h"
#include "packets.h"
+#include "unaligned.h"
#include "util.h"
static void
ovs_assert(fitness != ODP_FIT_ERROR);
}
+static void
+set_arp(struct ofpbuf *packet, const struct ovs_key_arp *arp_key)
+{
+ struct arp_eth_header *arp = packet->l3;
+
+ arp->ar_op = arp_key->arp_op;
+ memcpy(arp->ar_sha, arp_key->arp_sha, ETH_ADDR_LEN);
+ put_16aligned_be32(&arp->ar_spa, arp_key->arp_sip);
+ memcpy(arp->ar_tha, arp_key->arp_tha, ETH_ADDR_LEN);
+ put_16aligned_be32(&arp->ar_tpa, arp_key->arp_tip);
+}
+
static void
odp_execute_set_action(struct ofpbuf *packet, const struct nlattr *a,
struct flow *flow)
set_mpls_lse(packet, nl_attr_get_be32(a));
break;
+ case OVS_KEY_ATTR_ARP:
+ set_arp(packet, nl_attr_get_unspec(a, sizeof(struct ovs_key_arp)));
+ break;
+
case OVS_KEY_ATTR_UNSPEC:
case OVS_KEY_ATTR_ENCAP:
case OVS_KEY_ATTR_ETHERTYPE:
case OVS_KEY_ATTR_VLAN:
case OVS_KEY_ATTR_ICMP:
case OVS_KEY_ATTR_ICMPV6:
- case OVS_KEY_ATTR_ARP:
case OVS_KEY_ATTR_ND:
case __OVS_KEY_ATTR_MAX:
default:
odp_execute_sample(void *dp, struct ofpbuf *packet, struct flow *key,
const struct nlattr *action,
void (*output)(void *dp, struct ofpbuf *packet,
- uint32_t out_port),
+ const struct flow *key,
+ odp_port_t out_port),
void (*userspace)(void *dp, struct ofpbuf *packet,
const struct flow *key,
- const struct nlattr *a))
+ const struct nlattr *action))
{
const struct nlattr *subactions = NULL;
const struct nlattr *a;
odp_execute_actions(void *dp, struct ofpbuf *packet, struct flow *key,
const struct nlattr *actions, size_t actions_len,
void (*output)(void *dp, struct ofpbuf *packet,
- uint32_t out_port),
+ const struct flow *key,
+ odp_port_t out_port),
void (*userspace)(void *dp, struct ofpbuf *packet,
const struct flow *key,
- const struct nlattr *a))
+ const struct nlattr *action))
{
const struct nlattr *a;
unsigned int left;
switch ((enum ovs_action_attr) type) {
case OVS_ACTION_ATTR_OUTPUT:
if (output) {
- output(dp, packet, nl_attr_get_u32(a));
+ output(dp, packet, key, u32_to_odp(nl_attr_get_u32(a)));
}
break;
case OVS_ACTION_ATTR_USERSPACE: {
- if (userspace) {
- const struct nlattr *userdata;
- userdata = nl_attr_find_nested(a, OVS_USERSPACE_ATTR_USERDATA);
- userspace(dp, packet, key, userdata);
- }
+ userspace(dp, packet, key, a);
break;
}
#include <stddef.h>
#include <stdint.h>
+#include "openvswitch/types.h"
struct flow;
struct nlattr;
odp_execute_actions(void *dp, struct ofpbuf *packet, struct flow *key,
const struct nlattr *actions, size_t actions_len,
void (*output)(void *dp, struct ofpbuf *packet,
- uint32_t out_port),
+ const struct flow *key,
+ odp_port_t out_port),
void (*userspace)(void *dp, struct ofpbuf *packet,
const struct flow *key,
- const struct nlattr *a));
+ const struct nlattr *action));
#endif
}
static const char *
-slow_path_reason_to_string(enum slow_path_reason reason)
+slow_path_reason_to_string(uint32_t reason)
{
- switch (reason) {
- case SLOW_CFM:
- return "cfm";
- case SLOW_LACP:
- return "lacp";
- case SLOW_STP:
- return "stp";
- case SLOW_BFD:
- return "bfd";
- case SLOW_CONTROLLER:
- return "controller";
- case __SLOW_MAX:
- default:
- return NULL;
+ switch ((enum slow_path_reason) reason) {
+#define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
+ SLOW_PATH_REASONS
+#undef SPR
}
+
+ return NULL;
}
-static enum slow_path_reason
-string_to_slow_path_reason(const char *string)
+const char *
+slow_path_reason_to_explanation(enum slow_path_reason reason)
{
- enum slow_path_reason i;
-
- for (i = 1; i < __SLOW_MAX; i++) {
- if (!strcmp(string, slow_path_reason_to_string(i))) {
- return i;
- }
+ switch (reason) {
+#define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
+ SLOW_PATH_REASONS
+#undef SPR
}
- return 0;
+ return "<unknown>";
}
static int
cookie.sflow.output);
} else if (userdata_len == sizeof cookie.slow_path
&& cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
- const char *reason;
- reason = slow_path_reason_to_string(cookie.slow_path.reason);
- reason = reason ? reason : "";
- ds_put_format(ds, ",slow_path(%s)", reason);
+ ds_put_cstr(ds, ",slow_path(");
+ format_flags(ds, slow_path_reason_to_string,
+ cookie.slow_path.reason, ',');
+ ds_put_format(ds, ")");
} else if (userdata_len == sizeof cookie.flow_sample
&& cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
ds_put_format(ds, ",flow_sample(probability=%"PRIu16
odp_put_userspace_action(pid, &cookie, sizeof cookie.sflow,
actions);
return n;
- } else if (sscanf(s, "userspace(pid=%lli,slow_path(%n", &pid, &n) > 0
+ } else if (sscanf(s, "userspace(pid=%lli,slow_path%n", &pid, &n) > 0
&& n > 0) {
union user_action_cookie cookie;
- char reason[32];
-
- if (s[n] == ')' && s[n + 1] == ')') {
- reason[0] = '\0';
- n += 2;
- } else if (sscanf(s + n, "%31[^)]))", reason) > 0) {
- n += strlen(reason) + 2;
- } else {
- return -EINVAL;
- }
+ int res;
cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
cookie.slow_path.unused = 0;
- cookie.slow_path.reason = string_to_slow_path_reason(reason);
+ cookie.slow_path.reason = 0;
- if (reason[0] && !cookie.slow_path.reason) {
+ res = parse_flags(&s[n], slow_path_reason_to_string,
+ &cookie.slow_path.reason);
+ if (res < 0) {
+ return res;
+ }
+ n += res;
+ if (s[n] != ')') {
return -EINVAL;
}
+ n++;
odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path,
actions);
&ipv6_key, sizeof(ipv6_key));
}
-static void
+static enum slow_path_reason
+commit_set_arp_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_arp arp_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_proto == flow->nw_proto &&
+ eth_addr_equals(base->arp_sha, flow->arp_sha) &&
+ eth_addr_equals(base->arp_tha, flow->arp_tha)) {
+ return 0;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.arp_sha, 0xff, sizeof wc->masks.arp_sha);
+ memset(&wc->masks.arp_tha, 0xff, sizeof wc->masks.arp_tha);
+
+ base->nw_src = flow->nw_src;
+ base->nw_dst = flow->nw_dst;
+ base->nw_proto = flow->nw_proto;
+ memcpy(base->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(base->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ arp_key.arp_sip = base->nw_src;
+ arp_key.arp_tip = base->nw_dst;
+ arp_key.arp_op = htons(base->nw_proto);
+ memcpy(arp_key.arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp_key.arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ARP, &arp_key, sizeof arp_key);
+
+ return SLOW_ACTION;
+}
+
+static enum slow_path_reason
commit_set_nw_action(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- /* Check if flow really have an IP header. */
+ /* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
- return;
+ return 0;
}
- if (base->dl_type == htons(ETH_TYPE_IP)) {
+ switch (ntohs(base->dl_type)) {
+ case ETH_TYPE_IP:
commit_set_ipv4_action(flow, base, odp_actions, wc);
- } else if (base->dl_type == htons(ETH_TYPE_IPV6)) {
+ break;
+
+ case ETH_TYPE_IPV6:
commit_set_ipv6_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_ARP:
+ return commit_set_arp_action(flow, base, odp_actions, wc);
}
+
+ return 0;
}
static void
odp_put_pkt_mark_action(base->pkt_mark, odp_actions);
}
+
/* If any of the flow key data that ODP actions can modify are different in
* 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
* key from 'base' into 'flow', and then changes 'base' the same way. Does not
* commit set_tunnel actions. Users should call commit_odp_tunnel_action()
* in addition to this function if needed. Sets fields in 'wc' that are
- * used as part of the action. */
-void
+ * used as part of the action.
+ *
+ * Returns a reason to force processing the flow's packets into the userspace
+ * slow path, if there is one, otherwise 0. */
+enum slow_path_reason
commit_odp_actions(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc,
int *mpls_depth_delta)
{
+ enum slow_path_reason slow;
+
commit_set_ether_addr_action(flow, base, odp_actions, wc);
commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
- commit_set_nw_action(flow, base, odp_actions, wc);
+ slow = commit_set_nw_action(flow, base, odp_actions, wc);
commit_set_port_action(flow, base, odp_actions, wc);
/* Committing MPLS actions should occur after committing nw and port
* actions. This is because committing MPLS actions may alter a packet so
commit_mpls_action(flow, base, odp_actions, wc, mpls_depth_delta);
commit_set_priority_action(flow, base, odp_actions, wc);
commit_set_pkt_mark_action(flow, base, odp_actions, wc);
+
+ return slow;
}
struct ofpbuf;
struct simap;
+#define SLOW_PATH_REASONS \
+ /* These reasons are mutually exclusive. */ \
+ SPR(SLOW_CFM, "cfm", "Consists of CFM packets") \
+ SPR(SLOW_BFD, "bfd", "Consists of BFD packets") \
+ SPR(SLOW_LACP, "lacp", "Consists of LACP packets") \
+ SPR(SLOW_STP, "stp", "Consists of STP packets") \
+ SPR(SLOW_CONTROLLER, "controller", \
+ "Sends \"packet-in\" messages to the OpenFlow controller") \
+ SPR(SLOW_ACTION, "action", \
+ "Uses action(s) not supported by datapath")
+
+/* Indexes for slow-path reasons. Client code uses "enum slow_path_reason"
+ * values instead of these, these are just a way to construct those. */
+enum {
+#define SPR(ENUM, STRING, EXPLANATION) ENUM##_INDEX,
+ SLOW_PATH_REASONS
+#undef SPR
+};
+
+/* Reasons why a subfacet might not be fast-pathable.
+ *
+ * Each reason is a separate bit to allow reasons to be combined. */
+enum slow_path_reason {
+#define SPR(ENUM, STRING, EXPLANATION) ENUM = 1 << ENUM##_INDEX,
+ SLOW_PATH_REASONS
+#undef SPR
+};
+
+const char *slow_path_reason_to_explanation(enum slow_path_reason);
+
#define ODPP_LOCAL ODP_PORT_C(OVSP_LOCAL)
#define ODPP_NONE ODP_PORT_C(UINT32_MAX)
void commit_odp_tunnel_action(const struct flow *, struct flow *base,
struct ofpbuf *odp_actions);
-void commit_odp_actions(const struct flow *, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc,
- int *mpls_depth_delta);
+enum slow_path_reason commit_odp_actions(const struct flow *,
+ struct flow *base,
+ struct ofpbuf *odp_actions,
+ struct flow_wildcards *wc,
+ int *mpls_depth_delta);
\f
/* ofproto-dpif interface.
*
void odp_put_pkt_mark_action(const uint32_t pkt_mark,
struct ofpbuf *odp_actions);
-/* Reasons why a subfacet might not be fast-pathable. */
-enum slow_path_reason {
- SLOW_CFM = 1, /* CFM packets need per-packet processing. */
- SLOW_LACP, /* LACP packets need per-packet processing. */
- SLOW_STP, /* STP packets need per-packet processing. */
- SLOW_BFD, /* BFD packets need per-packet processing. */
- SLOW_CONTROLLER, /* Packets must go to OpenFlow controller. */
- __SLOW_MAX
-};
-
#endif /* odp-util.h */
/*
- * Copyright (c) 2009, 2010, 2012 Nicira, Inc.
+ * Copyright (c) 2009, 2010, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include "pcap-file.h"
#include <errno.h>
#include <inttypes.h>
+#include <stdlib.h>
#include <string.h>
+#include <sys/stat.h>
#include "compiler.h"
#include "ofpbuf.h"
#include "vlog.h"
FILE *
pcap_open(const char *file_name, const char *mode)
{
+ struct stat s;
FILE *file;
+ int error;
- ovs_assert(!strcmp(mode, "rb") || !strcmp(mode, "wb"));
+ ovs_assert(!strcmp(mode, "rb") ||
+ !strcmp(mode, "wb") ||
+ !strcmp(mode, "ab"));
file = fopen(file_name, mode);
if (file == NULL) {
- VLOG_WARN("%s: failed to open pcap file for %s",
- file_name, mode[0] == 'r' ? "reading" : "writing");
+ VLOG_WARN("%s: failed to open pcap file for %s (%s)", file_name,
+ (mode[0] == 'r' ? "reading"
+ : mode[0] == 'w' ? "writing"
+ : "appending"),
+ ovs_strerror(errno));
return NULL;
}
- if (mode[0] == 'r') {
- if (!pcap_read_header(file)) {
+ switch (mode[0]) {
+ case 'r':
+ error = pcap_read_header(file);
+ if (error) {
+ errno = error;
fclose(file);
return NULL;
}
- } else {
+ break;
+
+ case 'w':
pcap_write_header(file);
+ break;
+
+ case 'a':
+ if (!fstat(fileno(file), &s) && !s.st_size) {
+ pcap_write_header(file);
+ }
+ break;
+
+ default:
+ NOT_REACHED();
}
return file;
}
/* Read header. */
if (fread(&prh, sizeof prh, 1, file) != 1) {
- int error = ferror(file) ? errno : EOF;
- VLOG_WARN("failed to read pcap record header: %s",
- ovs_retval_to_string(error));
- return error;
+ if (ferror(file)) {
+ int error = errno;
+ VLOG_WARN("failed to read pcap record header: %s",
+ ovs_retval_to_string(error));
+ return error;
+ } else {
+ return EOF;
+ }
}
/* Calculate length. */
*
* See the large comment in rconn.h for more information. */
struct rconn {
+ struct ovs_mutex mutex;
+
enum state state;
time_t state_entered;
return rconn->allowed_versions;
}
-static unsigned int elapsed_in_this_state(const struct rconn *);
-static unsigned int timeout(const struct rconn *);
-static bool timed_out(const struct rconn *);
-static void state_transition(struct rconn *, enum state);
-static void rconn_set_target__(struct rconn *,
- const char *target, const char *name);
-static int try_send(struct rconn *);
-static void reconnect(struct rconn *);
-static void report_error(struct rconn *, int error);
-static void disconnect(struct rconn *, int error);
-static void flush_queue(struct rconn *);
-static void close_monitor(struct rconn *, size_t idx, int retval);
+static unsigned int elapsed_in_this_state(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex);
+static unsigned int timeout(const struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static bool timed_out(const struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static void state_transition(struct rconn *rc, enum state)
+ OVS_REQUIRES(rc->mutex);
+static void rconn_set_target__(struct rconn *rc,
+ const char *target, const char *name)
+ OVS_REQUIRES(rc->mutex);
+static int rconn_send__(struct rconn *rc, struct ofpbuf *,
+ struct rconn_packet_counter *)
+ OVS_REQUIRES(rc->mutex);
+static int try_send(struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static void reconnect(struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static void report_error(struct rconn *rc, int error) OVS_REQUIRES(rc->mutex);
+static void rconn_disconnect__(struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static void disconnect(struct rconn *rc, int error) OVS_REQUIRES(rc->mutex);
+static void flush_queue(struct rconn *rc) OVS_REQUIRES(rc->mutex);
+static void close_monitor(struct rconn *rc, size_t idx, int retval)
+ OVS_REQUIRES(rc->mutex);
static void copy_to_monitor(struct rconn *, const struct ofpbuf *);
static bool is_connected_state(enum state);
static bool is_admitted_msg(const struct ofpbuf *);
-static bool rconn_logging_connection_attempts__(const struct rconn *);
+static bool rconn_logging_connection_attempts__(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex);
+static int rconn_get_version__(const struct rconn *rconn)
+ OVS_REQUIRES(rconn->mutex);
+
+/* The following prototypes duplicate those in rconn.h, but there we weren't
+ * able to add the OVS_EXCLUDED annotations because the definition of struct
+ * rconn was not visible. */
+
+void rconn_set_max_backoff(struct rconn *rc, int max_backoff)
+ OVS_EXCLUDED(rc->mutex);
+void rconn_connect(struct rconn *rc, const char *target, const char *name)
+ OVS_EXCLUDED(rc->mutex);
+void rconn_connect_unreliably(struct rconn *rc,
+ struct vconn *vconn, const char *name)
+ OVS_EXCLUDED(rc->mutex);
+void rconn_reconnect(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+void rconn_disconnect(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+void rconn_run(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+void rconn_run_wait(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+struct ofpbuf *rconn_recv(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+void rconn_recv_wait(struct rconn *rc) OVS_EXCLUDED(rc->mutex);
+int rconn_send(struct rconn *rc, struct ofpbuf *b,
+ struct rconn_packet_counter *counter)
+ OVS_EXCLUDED(rc->mutex);
+int rconn_send_with_limit(struct rconn *rc, struct ofpbuf *b,
+ struct rconn_packet_counter *counter,
+ int queue_limit)
+ OVS_EXCLUDED(rc->mutex);
+void rconn_add_monitor(struct rconn *rc, struct vconn *vconn)
+ OVS_EXCLUDED(rc->mutex);
+void rconn_set_name(struct rconn *rc, const char *new_name)
+ OVS_EXCLUDED(rc->mutex);
+bool rconn_is_admitted(const struct rconn *rconn) OVS_EXCLUDED(rconn->mutex);
+int rconn_failure_duration(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex);
+ovs_be16 rconn_get_local_port(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex);
+int rconn_get_version(const struct rconn *rconn) OVS_EXCLUDED(rconn->mutex);
+unsigned int rconn_count_txqlen(const struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex);
+
/* Creates and returns a new rconn.
*
{
struct rconn *rc = xzalloc(sizeof *rc);
+ ovs_mutex_init(&rc->mutex);
+
rc->state = S_VOID;
rc->state_entered = time_now();
void
rconn_set_max_backoff(struct rconn *rc, int max_backoff)
+ OVS_EXCLUDED(rc->mutex)
{
+ ovs_mutex_lock(&rc->mutex);
rc->max_backoff = MAX(1, max_backoff);
if (rc->state == S_BACKOFF && rc->backoff > max_backoff) {
rc->backoff = max_backoff;
rc->backoff_deadline = time_now() + max_backoff;
}
}
+ ovs_mutex_unlock(&rc->mutex);
}
int
* but it need not be acceptable to vconn_open(). */
void
rconn_connect(struct rconn *rc, const char *target, const char *name)
+ OVS_EXCLUDED(rc->mutex)
{
- rconn_disconnect(rc);
+ ovs_mutex_lock(&rc->mutex);
+ rconn_disconnect__(rc);
rconn_set_target__(rc, target, name);
rc->reliable = true;
reconnect(rc);
+ ovs_mutex_unlock(&rc->mutex);
}
/* Drops any existing connection on 'rc', then configures 'rc' to use
void
rconn_connect_unreliably(struct rconn *rc,
struct vconn *vconn, const char *name)
+ OVS_EXCLUDED(rc->mutex)
{
ovs_assert(vconn != NULL);
- rconn_disconnect(rc);
+
+ ovs_mutex_lock(&rc->mutex);
+ rconn_disconnect__(rc);
rconn_set_target__(rc, vconn_get_name(vconn), name);
rc->reliable = false;
rc->vconn = vconn;
rc->last_connected = time_now();
state_transition(rc, S_ACTIVE);
+ ovs_mutex_unlock(&rc->mutex);
}
/* If 'rc' is connected, forces it to drop the connection and reconnect. */
void
rconn_reconnect(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
+ ovs_mutex_lock(&rc->mutex);
if (rc->state & (S_ACTIVE | S_IDLE)) {
VLOG_INFO("%s: disconnecting", rc->name);
disconnect(rc, 0);
}
+ ovs_mutex_unlock(&rc->mutex);
}
-void
-rconn_disconnect(struct rconn *rc)
+static void
+rconn_disconnect__(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (rc->state != S_VOID) {
if (rc->vconn) {
}
}
+void
+rconn_disconnect(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
+{
+ ovs_mutex_lock(&rc->mutex);
+ rconn_disconnect__(rc);
+ ovs_mutex_unlock(&rc->mutex);
+}
+
/* Disconnects 'rc' and frees the underlying storage. */
void
rconn_destroy(struct rconn *rc)
if (rc) {
size_t i;
+ ovs_mutex_lock(&rc->mutex);
free(rc->name);
free(rc->target);
vconn_close(rc->vconn);
for (i = 0; i < rc->n_monitors; i++) {
vconn_close(rc->monitors[i]);
}
+ ovs_mutex_unlock(&rc->mutex);
+ ovs_mutex_destroy(&rc->mutex);
+
free(rc);
}
}
static unsigned int
timeout_VOID(const struct rconn *rc OVS_UNUSED)
+ OVS_REQUIRES(rc->mutex)
{
return UINT_MAX;
}
static void
run_VOID(struct rconn *rc OVS_UNUSED)
+ OVS_REQUIRES(rc->mutex)
{
/* Nothing to do. */
}
static void
reconnect(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
int retval;
static unsigned int
timeout_BACKOFF(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return rc->backoff;
}
static void
run_BACKOFF(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (timed_out(rc)) {
reconnect(rc);
static unsigned int
timeout_CONNECTING(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return MAX(1, rc->backoff);
}
static void
run_CONNECTING(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
int retval = vconn_connect(rc->vconn);
if (!retval) {
static void
do_tx_work(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (list_is_empty(&rc->txq)) {
return;
static unsigned int
timeout_ACTIVE(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (rc->probe_interval) {
unsigned int base = MAX(rc->last_activity, rc->state_entered);
static void
run_ACTIVE(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (timed_out(rc)) {
unsigned int base = MAX(rc->last_activity, rc->state_entered);
VLOG_DBG("%s: idle %u seconds, sending inactivity probe",
rc->name, (unsigned int) (time_now() - base));
- version = rconn_get_version(rc);
+ version = rconn_get_version__(rc);
ovs_assert(version >= 0 && version <= 0xff);
/* Ordering is important here: rconn_send() can transition to BACKOFF,
* and we don't want to transition back to IDLE if so, because then we
* can end up queuing a packet with vconn == NULL and then *boom*. */
state_transition(rc, S_IDLE);
- rconn_send(rc, make_echo_request(version), NULL);
+ rconn_send__(rc, make_echo_request(version), NULL);
return;
}
static unsigned int
timeout_IDLE(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return rc->probe_interval;
}
static void
run_IDLE(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (timed_out(rc)) {
VLOG_ERR("%s: no response to inactivity probe after %u "
* connected, attempts to send packets in the send queue, if any. */
void
rconn_run(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
int old_state;
size_t i;
+ ovs_mutex_lock(&rc->mutex);
if (rc->vconn) {
vconn_run(rc->vconn);
}
NOT_REACHED();
}
} while (rc->state != old_state);
+ ovs_mutex_unlock(&rc->mutex);
}
/* Causes the next call to poll_block() to wake up when rconn_run() should be
* called on 'rc'. */
void
rconn_run_wait(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
unsigned int timeo;
size_t i;
+ ovs_mutex_lock(&rc->mutex);
if (rc->vconn) {
vconn_run_wait(rc->vconn);
if ((rc->state & (S_ACTIVE | S_IDLE)) && !list_is_empty(&rc->txq)) {
long long int expires = sat_add(rc->state_entered, timeo);
poll_timer_wait_until(expires * 1000);
}
+ ovs_mutex_unlock(&rc->mutex);
}
/* Attempts to receive a packet from 'rc'. If successful, returns the packet;
* the packet (with ofpbuf_delete()). */
struct ofpbuf *
rconn_recv(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
+ struct ofpbuf *buffer = NULL;
+
+ ovs_mutex_lock(&rc->mutex);
if (rc->state & (S_ACTIVE | S_IDLE)) {
- struct ofpbuf *buffer;
int error = vconn_recv(rc->vconn, &buffer);
if (!error) {
copy_to_monitor(rc, buffer);
if (rc->state == S_IDLE) {
state_transition(rc, S_ACTIVE);
}
- return buffer;
} else if (error != EAGAIN) {
report_error(rc, error);
disconnect(rc, error);
}
}
- return NULL;
+ ovs_mutex_unlock(&rc->mutex);
+
+ return buffer;
}
/* Causes the next call to poll_block() to wake up when a packet may be ready
* to be received by vconn_recv() on 'rc'. */
void
rconn_recv_wait(struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
+ ovs_mutex_lock(&rc->mutex);
if (rc->vconn) {
vconn_wait(rc->vconn, WAIT_RECV);
}
+ ovs_mutex_unlock(&rc->mutex);
}
-/* Sends 'b' on 'rc'. Returns 0 if successful, or ENOTCONN if 'rc' is not
- * currently connected. Takes ownership of 'b'.
- *
- * If 'counter' is non-null, then 'counter' will be incremented while the
- * packet is in flight, then decremented when it has been sent (or discarded
- * due to disconnection). Because 'b' may be sent (or discarded) before this
- * function returns, the caller may not be able to observe any change in
- * 'counter'.
- *
- * There is no rconn_send_wait() function: an rconn has a send queue that it
- * takes care of sending if you call rconn_run(), which will have the side
- * effect of waking up poll_block(). */
-int
-rconn_send(struct rconn *rc, struct ofpbuf *b,
+static int
+rconn_send__(struct rconn *rc, struct ofpbuf *b,
struct rconn_packet_counter *counter)
+ OVS_REQUIRES(rc->mutex)
{
if (rconn_is_connected(rc)) {
COVERAGE_INC(rconn_queued);
}
}
+/* Sends 'b' on 'rc'. Returns 0 if successful, or ENOTCONN if 'rc' is not
+ * currently connected. Takes ownership of 'b'.
+ *
+ * If 'counter' is non-null, then 'counter' will be incremented while the
+ * packet is in flight, then decremented when it has been sent (or discarded
+ * due to disconnection). Because 'b' may be sent (or discarded) before this
+ * function returns, the caller may not be able to observe any change in
+ * 'counter'.
+ *
+ * There is no rconn_send_wait() function: an rconn has a send queue that it
+ * takes care of sending if you call rconn_run(), which will have the side
+ * effect of waking up poll_block(). */
+int
+rconn_send(struct rconn *rc, struct ofpbuf *b,
+ struct rconn_packet_counter *counter)
+ OVS_EXCLUDED(rc->mutex)
+{
+ int error;
+
+ ovs_mutex_lock(&rc->mutex);
+ error = rconn_send__(rc, b, counter);
+ ovs_mutex_unlock(&rc->mutex);
+
+ return error;
+}
+
/* Sends 'b' on 'rc'. Increments 'counter' while the packet is in flight; it
* will be decremented when it has been sent (or discarded due to
* disconnection). Returns 0 if successful, EAGAIN if 'counter->n' is already
int
rconn_send_with_limit(struct rconn *rc, struct ofpbuf *b,
struct rconn_packet_counter *counter, int queue_limit)
+ OVS_EXCLUDED(rc->mutex)
{
- if (counter->n_packets < queue_limit) {
- return rconn_send(rc, b, counter);
+ int error;
+
+ ovs_mutex_lock(&rc->mutex);
+ if (rconn_packet_counter_n_packets(counter) < queue_limit) {
+ error = rconn_send__(rc, b, counter);
} else {
COVERAGE_INC(rconn_overflow);
ofpbuf_delete(b);
- return EAGAIN;
+ error = EAGAIN;
}
+ ovs_mutex_unlock(&rc->mutex);
+
+ return error;
}
/* Returns the total number of packets successfully sent on the underlying
* and received on 'rconn' will be copied. 'rc' takes ownership of 'vconn'. */
void
rconn_add_monitor(struct rconn *rc, struct vconn *vconn)
+ OVS_EXCLUDED(rc->mutex)
{
+ ovs_mutex_lock(&rc->mutex);
if (rc->n_monitors < ARRAY_SIZE(rc->monitors)) {
VLOG_INFO("new monitor connection from %s", vconn_get_name(vconn));
rc->monitors[rc->n_monitors++] = vconn;
vconn_get_name(vconn));
vconn_close(vconn);
}
+ ovs_mutex_unlock(&rc->mutex);
}
/* Returns 'rc''s name. This is a name for human consumption, appropriate for
/* Sets 'rc''s name to 'new_name'. */
void
rconn_set_name(struct rconn *rc, const char *new_name)
+ OVS_EXCLUDED(rc->mutex)
{
+ ovs_mutex_lock(&rc->mutex);
free(rc->name);
rc->name = xstrdup(new_name);
+ ovs_mutex_unlock(&rc->mutex);
}
/* Returns 'rc''s target. This is intended to be a string that may be passed
return is_connected_state(rconn->state);
}
+static bool
+rconn_is_admitted__(const struct rconn *rconn)
+ OVS_REQUIRES(rconn->mutex)
+{
+ return (rconn_is_connected(rconn)
+ && rconn->last_admitted >= rconn->last_connected);
+}
+
/* Returns true if 'rconn' is connected and thought to have been accepted by
* the peer's admission-control policy. */
bool
rconn_is_admitted(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex)
{
- return (rconn_is_connected(rconn)
- && rconn->last_admitted >= rconn->last_connected);
+ bool admitted;
+
+ ovs_mutex_lock(&rconn->mutex);
+ admitted = rconn_is_admitted__(rconn);
+ ovs_mutex_unlock(&rconn->mutex);
+
+ return admitted;
}
/* Returns 0 if 'rconn' is currently connected and considered to have been
* seconds since 'rconn' was last in such a state. */
int
rconn_failure_duration(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex)
{
- return rconn_is_admitted(rconn) ? 0 : time_now() - rconn->last_admitted;
+ int duration;
+
+ ovs_mutex_lock(&rconn->mutex);
+ duration = (rconn_is_admitted__(rconn)
+ ? 0
+ : time_now() - rconn->last_admitted);
+ ovs_mutex_unlock(&rconn->mutex);
+
+ return duration;
}
/* Returns the IP address of the peer, or 0 if the peer's IP address is not
* connection does not contain a port or if the port is not known. */
ovs_be16
rconn_get_local_port(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex)
+{
+ ovs_be16 port;
+
+ ovs_mutex_lock(&rconn->mutex);
+ port = rconn->vconn ? vconn_get_local_port(rconn->vconn) : 0;
+ ovs_mutex_unlock(&rconn->mutex);
+
+ return port;
+}
+
+static int
+rconn_get_version__(const struct rconn *rconn)
+ OVS_REQUIRES(rconn->mutex)
{
- return rconn->vconn ? vconn_get_local_port(rconn->vconn) : 0;
+ return rconn->vconn ? vconn_get_version(rconn->vconn) : -1;
}
/* Returns the OpenFlow version negotiated with the peer, or -1 if there is
* currently no connection or if version negotiation is not yet complete. */
int
rconn_get_version(const struct rconn *rconn)
+ OVS_EXCLUDED(rconn->mutex)
{
- return rconn->vconn ? vconn_get_version(rconn->vconn) : -1;
+ int version;
+
+ ovs_mutex_lock(&rconn->mutex);
+ version = rconn_get_version__(rconn);
+ ovs_mutex_unlock(&rconn->mutex);
+
+ return version;
}
/* Returns the total number of packets successfully received by the underlying
/* Returns the number of messages queued for transmission on 'rc'. */
unsigned int
rconn_count_txqlen(const struct rconn *rc)
+ OVS_EXCLUDED(rc->mutex)
{
- return list_size(&rc->txq);
+ unsigned int len;
+
+ ovs_mutex_lock(&rc->mutex);
+ len = list_size(&rc->txq);
+ ovs_mutex_unlock(&rc->mutex);
+
+ return len;
}
\f
struct rconn_packet_counter *
rconn_packet_counter_create(void)
{
struct rconn_packet_counter *c = xzalloc(sizeof *c);
+ ovs_mutex_init(&c->mutex);
+ ovs_mutex_lock(&c->mutex);
c->ref_cnt = 1;
+ ovs_mutex_unlock(&c->mutex);
return c;
}
rconn_packet_counter_destroy(struct rconn_packet_counter *c)
{
if (c) {
+ bool dead;
+
+ ovs_mutex_lock(&c->mutex);
ovs_assert(c->ref_cnt > 0);
- if (!--c->ref_cnt && !c->n_packets) {
+ dead = !--c->ref_cnt && !c->n_packets;
+ ovs_mutex_unlock(&c->mutex);
+
+ if (dead) {
+ ovs_mutex_destroy(&c->mutex);
free(c);
}
}
void
rconn_packet_counter_inc(struct rconn_packet_counter *c, unsigned int n_bytes)
{
+ ovs_mutex_lock(&c->mutex);
c->n_packets++;
c->n_bytes += n_bytes;
+ ovs_mutex_unlock(&c->mutex);
}
void
rconn_packet_counter_dec(struct rconn_packet_counter *c, unsigned int n_bytes)
{
- ovs_assert(c->n_packets > 0);
- ovs_assert(c->n_bytes >= n_bytes);
+ bool dead = false;
- c->n_bytes -= n_bytes;
+ ovs_mutex_lock(&c->mutex);
+ ovs_assert(c->n_packets > 0);
+ ovs_assert(c->n_packets == 1
+ ? c->n_bytes == n_bytes
+ : c->n_bytes > n_bytes);
c->n_packets--;
- if (!c->n_packets) {
- ovs_assert(!c->n_bytes);
- if (!c->ref_cnt) {
- free(c);
- }
+ c->n_bytes -= n_bytes;
+ dead = !c->n_packets && !c->ref_cnt;
+ ovs_mutex_unlock(&c->mutex);
+
+ if (dead) {
+ ovs_mutex_destroy(&c->mutex);
+ free(c);
}
}
+
+unsigned int
+rconn_packet_counter_n_packets(const struct rconn_packet_counter *c)
+{
+ unsigned int n;
+
+ ovs_mutex_lock(&c->mutex);
+ n = c->n_packets;
+ ovs_mutex_unlock(&c->mutex);
+
+ return n;
+}
+
+unsigned int
+rconn_packet_counter_n_bytes(const struct rconn_packet_counter *c)
+{
+ unsigned int n;
+
+ ovs_mutex_lock(&c->mutex);
+ n = c->n_bytes;
+ ovs_mutex_unlock(&c->mutex);
+
+ return n;
+}
\f
/* Set rc->target and rc->name to 'target' and 'name', respectively. If 'name'
* is null, 'target' is used.
* the target also likely changes these values. */
static void
rconn_set_target__(struct rconn *rc, const char *target, const char *name)
+ OVS_REQUIRES(rc->mutex)
{
free(rc->name);
rc->name = xstrdup(name ? name : target);
* otherwise a positive errno value. */
static int
try_send(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
struct ofpbuf *msg = ofpbuf_from_list(rc->txq.next);
unsigned int n_bytes = msg->size;
* normally. */
static void
report_error(struct rconn *rc, int error)
+ OVS_REQUIRES(rc->mutex)
{
if (error == EOF) {
/* If 'rc' isn't reliable, then we don't really expect this connection
*/
static void
disconnect(struct rconn *rc, int error)
+ OVS_REQUIRES(rc->mutex)
{
rc->last_error = error;
if (rc->reliable) {
state_transition(rc, S_BACKOFF);
} else {
rc->last_disconnected = time_now();
- rconn_disconnect(rc);
+ rconn_disconnect__(rc);
}
}
* counts. */
static void
flush_queue(struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
if (list_is_empty(&rc->txq)) {
return;
static unsigned int
elapsed_in_this_state(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return time_now() - rc->state_entered;
}
static unsigned int
timeout(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
switch (rc->state) {
#define STATE(NAME, VALUE) case S_##NAME: return timeout_##NAME(rc);
static bool
timed_out(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return time_now() >= sat_add(rc->state_entered, timeout(rc));
}
static void
state_transition(struct rconn *rc, enum state state)
+ OVS_REQUIRES(rc->mutex)
{
rc->seqno += (rc->state == S_ACTIVE) != (state == S_ACTIVE);
if (is_connected_state(state) && !is_connected_state(rc->state)) {
static void
close_monitor(struct rconn *rc, size_t idx, int retval)
+ OVS_REQUIRES(rc->mutex)
{
VLOG_DBG("%s: closing monitor connection to %s: %s",
rconn_get_name(rc), vconn_get_name(rc->monitors[idx]),
static void
copy_to_monitor(struct rconn *rc, const struct ofpbuf *b)
+ OVS_REQUIRES(rc->mutex)
{
struct ofpbuf *clone = NULL;
int retval;
* successuflly connected in too long. */
static bool
rconn_logging_connection_attempts__(const struct rconn *rc)
+ OVS_REQUIRES(rc->mutex)
{
return rc->backoff < rc->max_backoff;
}
/*
- * Copyright (c) 2008, 2009, 2010, 2011, 2012 Nicira, Inc.
+ * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
#include <stdint.h>
#include <time.h>
#include "openvswitch/types.h"
+#include "ovs-thread.h"
/* A wrapper around vconn that provides queuing and optionally reliability.
*
* An rconn optionally provides reliable communication, in this sense: the
* rconn will re-connect, with exponential backoff, when the underlying vconn
* disconnects.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * Fully thread-safe.
*/
struct vconn;
/* Counts packets and bytes queued into an rconn by a given source. */
struct rconn_packet_counter {
- unsigned int n_packets; /* Number of packets queued. */
- unsigned int n_bytes; /* Number of bytes queued. */
- int ref_cnt; /* Number of owners. */
+ struct ovs_mutex mutex;
+ unsigned int n_packets OVS_GUARDED; /* Number of packets queued. */
+ unsigned int n_bytes OVS_GUARDED; /* Number of bytes queued. */
+ int ref_cnt OVS_GUARDED; /* Number of owners. */
};
struct rconn_packet_counter *rconn_packet_counter_create(void);
void rconn_packet_counter_inc(struct rconn_packet_counter *, unsigned n_bytes);
void rconn_packet_counter_dec(struct rconn_packet_counter *, unsigned n_bytes);
+unsigned int rconn_packet_counter_n_packets(
+ const struct rconn_packet_counter *);
+unsigned int rconn_packet_counter_n_bytes(const struct rconn_packet_counter *);
+
#endif /* rconn.h */
int retval;
time_init();
+ coverage_clear();
+ coverage_run();
if (*last_wakeup) {
log_poll_interval(*last_wakeup);
}
- coverage_clear();
- coverage_run();
start = time_msec();
timeout_when = MIN(timeout_when, deadline);
VLOG_DEFINE_THIS_MODULE(connmgr);
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
-/* An OpenFlow connection. */
+/* An OpenFlow connection.
+ *
+ *
+ * Thread-safety
+ * =============
+ *
+ * 'ofproto_mutex' must be held whenever an ofconn is created or destroyed or,
+ * more or less equivalently, whenever an ofconn is added to or removed from a
+ * connmgr. 'ofproto_mutex' doesn't protect the data inside the ofconn, except
+ * as specifically noted below. */
struct ofconn {
/* Configuration that persists from one connection to the next. */
uint32_t master_async_config[OAM_N_TYPES]; /* master, other */
uint32_t slave_async_config[OAM_N_TYPES]; /* slave */
- /* Flow monitors. */
- struct hmap monitors; /* Contains "struct ofmonitor"s. */
- struct list updates; /* List of "struct ofpbuf"s. */
- bool sent_abbrev_update; /* Does 'updates' contain NXFME_ABBREV? */
- struct rconn_packet_counter *monitor_counter;
- uint64_t monitor_paused;
+/* Flow monitors (e.g. NXST_FLOW_MONITOR). */
+
+ /* Configuration. Contains "struct ofmonitor"s. */
+ struct hmap monitors OVS_GUARDED_BY(ofproto_mutex);
+
+ /* Flow control.
+ *
+ * When too many flow monitor notifications back up in the transmit buffer,
+ * we pause the transmission of further notifications. These members track
+ * the flow control state.
+ *
+ * When notifications are flowing, 'monitor_paused' is 0. When
+ * notifications are paused, 'monitor_paused' is the value of
+ * 'monitor_seqno' at the point we paused.
+ *
+ * 'monitor_counter' counts the OpenFlow messages and bytes currently in
+ * flight. This value growing too large triggers pausing. */
+ uint64_t monitor_paused OVS_GUARDED_BY(ofproto_mutex);
+ struct rconn_packet_counter *monitor_counter OVS_GUARDED_BY(ofproto_mutex);
+
+ /* State of monitors for a single ongoing flow_mod.
+ *
+ * 'updates' is a list of "struct ofpbuf"s that contain
+ * NXST_FLOW_MONITOR_REPLY messages representing the changes made by the
+ * current flow_mod.
+ *
+ * When 'updates' is nonempty, 'sent_abbrev_update' is true if 'updates'
+ * contains an update event of type NXFME_ABBREV and false otherwise.. */
+ struct list updates OVS_GUARDED_BY(ofproto_mutex);
+ bool sent_abbrev_update OVS_GUARDED_BY(ofproto_mutex);
};
static struct ofconn *ofconn_create(struct connmgr *, struct rconn *,
- enum ofconn_type, bool enable_async_msgs);
-static void ofconn_destroy(struct ofconn *);
-static void ofconn_flush(struct ofconn *);
+ enum ofconn_type, bool enable_async_msgs)
+ OVS_REQUIRES(ofproto_mutex);
+static void ofconn_destroy(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
+static void ofconn_flush(struct ofconn *) OVS_REQUIRES(ofproto_mutex);
static void ofconn_reconfigure(struct ofconn *,
const struct ofproto_controller *);
return;
}
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH_SAFE (ofconn, next_ofconn, node, &mgr->all_conns) {
ofconn_destroy(ofconn);
}
+ ovs_mutex_unlock(&ofproto_mutex);
+
hmap_destroy(&mgr->controllers);
HMAP_FOR_EACH_SAFE (ofservice, next_ofservice, node, &mgr->services) {
rconn_connect_unreliably(rconn, vconn, name);
free(name);
+ ovs_mutex_lock(&ofproto_mutex);
ofconn = ofconn_create(mgr, rconn, OFCONN_SERVICE,
ofservice->enable_async_msgs);
+ ovs_mutex_unlock(&ofproto_mutex);
+
ofconn_set_rate_limit(ofconn, ofservice->rate_limit,
ofservice->burst_limit);
} else if (retval != EAGAIN) {
/* OpenFlow configuration. */
static void add_controller(struct connmgr *, const char *target, uint8_t dscp,
- uint32_t allowed_versions);
+ uint32_t allowed_versions)
+ OVS_REQUIRES(ofproto_mutex);
static struct ofconn *find_controller_by_target(struct connmgr *,
const char *target);
static void update_fail_open(struct connmgr *);
connmgr_set_controllers(struct connmgr *mgr,
const struct ofproto_controller *controllers,
size_t n_controllers, uint32_t allowed_versions)
+ OVS_EXCLUDED(ofproto_mutex)
{
bool had_controllers = connmgr_has_controllers(mgr);
struct shash new_controllers;
struct ofservice *ofservice, *next_ofservice;
size_t i;
+ /* Required to add and remove ofconns. This could probably be narrowed to
+ * cover a smaller amount of code, if that yielded some benefit. */
+ ovs_mutex_lock(&ofproto_mutex);
+
/* Create newly configured controllers and services.
* Create a name to ofproto_controller mapping in 'new_controllers'. */
shash_init(&new_controllers);
if (had_controllers != connmgr_has_controllers(mgr)) {
ofproto_flush_flows(mgr->ofproto);
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
/* Drops the connections between 'mgr' and all of its primary and secondary
static void
add_controller(struct connmgr *mgr, const char *target, uint8_t dscp,
uint32_t allowed_versions)
+ OVS_REQUIRES(ofproto_mutex)
{
char *name = ofconn_make_name(mgr, target);
struct ofconn *ofconn;
* connection to the next. */
static void
ofconn_flush(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *monitor, *next_monitor;
int i;
static void
ofconn_destroy(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
ofconn_flush(ofconn);
static bool
ofconn_may_recv(const struct ofconn *ofconn)
{
- int count = ofconn->reply_counter->n_packets;
+ int count = rconn_packet_counter_n_packets(ofconn->reply_counter);
return (!ofconn->blocked || ofconn->retry) && count < OFCONN_REPLY_MAX;
}
}
}
+ ovs_mutex_lock(&ofproto_mutex);
if (!rconn_is_alive(ofconn->rconn)) {
ofconn_destroy(ofconn);
} else if (!rconn_is_connected(ofconn->rconn)) {
ofconn_flush(ofconn);
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
enum ofperr
ofmonitor_create(const struct ofputil_flow_monitor_request *request,
struct ofconn *ofconn, struct ofmonitor **monitorp)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *m;
struct ofmonitor *
ofmonitor_lookup(struct ofconn *ofconn, uint32_t id)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofmonitor *m;
void
ofmonitor_destroy(struct ofmonitor *m)
+ OVS_REQUIRES(ofproto_mutex)
{
if (m) {
minimatch_destroy(&m->match);
void
ofmonitor_flush(struct connmgr *mgr)
+ OVS_REQUIRES(ofproto_mutex)
{
struct ofconn *ofconn;
struct ofpbuf *msg, *next;
LIST_FOR_EACH_SAFE (msg, next, list_node, &ofconn->updates) {
+ unsigned int n_bytes;
+
list_remove(&msg->list_node);
ofconn_send(ofconn, msg, ofconn->monitor_counter);
- if (!ofconn->monitor_paused
- && ofconn->monitor_counter->n_bytes > 128 * 1024) {
+ n_bytes = rconn_packet_counter_n_bytes(ofconn->monitor_counter);
+ if (!ofconn->monitor_paused && n_bytes > 128 * 1024) {
struct ofpbuf *pause;
COVERAGE_INC(ofmonitor_pause);
static void
ofmonitor_resume(struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
{
struct rule_collection rules;
struct ofpbuf *resumed;
ofconn->monitor_paused = 0;
}
+static bool
+ofmonitor_may_resume(const struct ofconn *ofconn)
+ OVS_REQUIRES(ofproto_mutex)
+{
+ return (ofconn->monitor_paused != 0
+ && !rconn_packet_counter_n_packets(ofconn->monitor_counter));
+}
+
static void
ofmonitor_run(struct connmgr *mgr)
{
struct ofconn *ofconn;
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- if (ofconn->monitor_paused && !ofconn->monitor_counter->n_packets) {
+ if (ofmonitor_may_resume(ofconn)) {
COVERAGE_INC(ofmonitor_resume);
ofmonitor_resume(ofconn);
}
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
static void
{
struct ofconn *ofconn;
+ ovs_mutex_lock(&ofproto_mutex);
LIST_FOR_EACH (ofconn, node, &mgr->all_conns) {
- if (ofconn->monitor_paused && !ofconn->monitor_counter->n_packets) {
+ if (ofmonitor_may_resume(ofconn)) {
poll_immediate_wake();
}
}
+ ovs_mutex_unlock(&ofproto_mutex);
}
struct ofputil_flow_monitor_request;
enum ofperr ofmonitor_create(const struct ofputil_flow_monitor_request *,
- struct ofconn *, struct ofmonitor **);
-struct ofmonitor *ofmonitor_lookup(struct ofconn *, uint32_t id);
-void ofmonitor_destroy(struct ofmonitor *);
+ struct ofconn *, struct ofmonitor **)
+ OVS_REQUIRES(ofproto_mutex);
+struct ofmonitor *ofmonitor_lookup(struct ofconn *, uint32_t id)
+ OVS_REQUIRES(ofproto_mutex);
+void ofmonitor_destroy(struct ofmonitor *)
+ OVS_REQUIRES(ofproto_mutex);
void ofmonitor_report(struct connmgr *, struct rule *,
enum nx_flow_update_event, enum ofp_flow_removed_reason,
const struct ofconn *abbrev_ofconn, ovs_be32 abbrev_xid)
OVS_REQUIRES(ofproto_mutex);
-void ofmonitor_flush(struct connmgr *);
+void ofmonitor_flush(struct connmgr *) OVS_REQUIRES(ofproto_mutex);
struct rule_collection;
void ofmonitor_collect_resume_rules(struct ofmonitor *, uint64_t seqno,
- struct rule_collection *);
+ struct rule_collection *)
+ OVS_REQUIRES(ofproto_mutex);
void ofmonitor_compose_refresh_updates(struct rule_collection *rules,
- struct list *msgs);
+ struct list *msgs)
+ OVS_REQUIRES(ofproto_mutex);
#endif /* connmgr.h */
void
ofproto_dpif_monitor_run_fast(void)
{
- struct mport *mport;
- static uint32_t buf_stub[128 / 4];
+ uint32_t stub[512 / 4];
struct ofpbuf packet;
+ struct mport *mport;
+ ofpbuf_use_stub(&packet, stub, sizeof stub);
ovs_rwlock_rdlock(&monitor_rwlock);
HMAP_FOR_EACH (mport, hmap_node, &monitor_hmap) {
if (mport->cfm && cfm_should_send_ccm(mport->cfm)) {
- ofpbuf_use_stub(&packet, buf_stub, sizeof buf_stub);
+ ofpbuf_clear(&packet);
cfm_compose_ccm(mport->cfm, &packet, mport->hw_addr);
ofproto_dpif_send_packet(mport->ofport, &packet);
}
if (mport->bfd && bfd_should_send_packet(mport->bfd)) {
- ofpbuf_use_stub(&packet, buf_stub, sizeof buf_stub);
+ ofpbuf_clear(&packet);
bfd_put_packet(mport->bfd, &packet, mport->hw_addr);
ofproto_dpif_send_packet(mport->ofport, &packet);
}
}
ovs_rwlock_unlock(&monitor_rwlock);
+ ofpbuf_uninit(&packet);
}
/* Executes bfd_run(), cfm_run() on all mports. */
#include "ofpbuf.h"
#include "ofproto-dpif-ipfix.h"
#include "ofproto-dpif-sflow.h"
-#include "ofproto-dpif.h"
#include "packets.h"
#include "poll-loop.h"
#include "vlog.h"
ovs_mutex_unlock(&handler->mutex);
handle_upcalls(handler->udpif, &misses);
+
+ coverage_clear();
}
}
\f
* all the packets in each miss. */
fail_open = false;
HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
- struct flow_wildcards wc;
- struct rule_dpif *rule;
struct xlate_in xin;
- flow_wildcards_init_catchall(&wc);
- rule_dpif_lookup(miss->ofproto, &miss->flow, &wc, &rule);
- if (rule_dpif_fail_open(rule)) {
- fail_open = true;
- }
- rule_dpif_credit_stats(rule, &miss->stats);
- xlate_in_init(&xin, miss->ofproto, &miss->flow, rule,
+ xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL,
miss->stats.tcp_flags, NULL);
xin.may_learn = true;
xin.resubmit_stats = &miss->stats;
xlate_actions(&xin, &miss->xout);
- flow_wildcards_or(&miss->xout.wc, &miss->xout.wc, &wc);
- rule_dpif_unref(rule);
+ fail_open = fail_open || miss->xout.fail_open;
}
/* Now handle the packets individually in order of arrival. In the common
struct ofpbuf *packet = upcall->dpif_upcall.packet;
if (miss->xout.slow) {
- struct rule_dpif *rule;
struct xlate_in xin;
- rule_dpif_lookup(miss->ofproto, &miss->flow, NULL, &rule);
- xlate_in_init(&xin, miss->ofproto, &miss->flow, rule, 0, packet);
+ xlate_in_init(&xin, miss->ofproto, &miss->flow, NULL, 0, packet);
xlate_actions_for_side_effects(&xin);
- rule_dpif_unref(rule);
}
if (miss->xout.odp_actions.size) {
op->u.execute.packet = packet;
op->u.execute.actions = miss->xout.odp_actions.data;
op->u.execute.actions_len = miss->xout.odp_actions.size;
+ op->u.execute.needs_help = (miss->xout.slow & SLOW_ACTION) != 0;
}
}
special = process_special(ctx, &ctx->xin->flow, peer,
ctx->xin->packet);
if (special) {
- ctx->xout->slow = special;
+ ctx->xout->slow |= special;
} else if (may_receive(peer, ctx)) {
if (xport_stp_forward_state(peer)) {
xlate_table_action(ctx, flow->in_port.ofp_port, 0, true);
}
if (out_port != ODPP_NONE) {
- commit_odp_actions(flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc,
- &ctx->mpls_depth_delta);
+ ctx->xout->slow |= commit_odp_actions(flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
nl_msg_put_odp_port(&ctx->xout->odp_actions, OVS_ACTION_ATTR_OUTPUT,
out_port);
struct ofpbuf *packet;
struct flow key;
- ovs_assert(!ctx->xout->slow || ctx->xout->slow == SLOW_CONTROLLER);
- ctx->xout->slow = SLOW_CONTROLLER;
+ ctx->xout->slow |= SLOW_CONTROLLER;
if (!ctx->xin->packet) {
return;
}
key.pkt_mark = 0;
memset(&key.tunnel, 0, sizeof key.tunnel);
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc,
- &ctx->mpls_depth_delta);
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
odp_execute_actions(NULL, packet, &key, ctx->xout->odp_actions.data,
ctx->xout->odp_actions.size, NULL, NULL);
* the same percentage. */
uint32_t probability = (os->probability << 16) | os->probability;
- commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
- &ctx->xout->odp_actions, &ctx->xout->wc,
- &ctx->mpls_depth_delta);
+ ctx->xout->slow |= commit_odp_actions(&ctx->xin->flow, &ctx->base_flow,
+ &ctx->xout->odp_actions,
+ &ctx->xout->wc,
+ &ctx->mpls_depth_delta);
compose_flow_sample_cookie(os->probability, os->collector_set_id,
os->obs_domain_id, os->obs_point_id, &cookie);
{
struct flow_wildcards *wc = &xout->wc;
struct flow *flow = &xin->flow;
+ struct rule_dpif *rule = NULL;
struct rule_actions *actions = NULL;
enum slow_path_reason special;
ctx.exit = false;
ctx.mpls_depth_delta = 0;
+ if (!xin->ofpacts && !ctx.rule) {
+ rule_dpif_lookup(ctx.xbridge->ofproto, flow, wc, &rule);
+ if (ctx.xin->resubmit_stats) {
+ rule_dpif_credit_stats(rule, ctx.xin->resubmit_stats);
+ }
+ ctx.rule = rule;
+ }
+ xout->fail_open = ctx.rule && rule_dpif_fail_open(ctx.rule);
+
if (xin->ofpacts) {
ofpacts = xin->ofpacts;
ofpacts_len = xin->ofpacts_len;
- } else if (xin->rule) {
- actions = rule_dpif_get_actions(xin->rule);
+ } else if (ctx.rule) {
+ actions = rule_dpif_get_actions(ctx.rule);
ofpacts = actions->ofpacts;
ofpacts_len = actions->ofpacts_len;
} else {
in_port = get_ofp_port(ctx.xbridge, flow->in_port.ofp_port);
special = process_special(&ctx, flow, in_port, ctx.xin->packet);
if (special) {
- ctx.xout->slow = special;
+ ctx.xout->slow |= special;
} else {
size_t sample_actions_len;
out:
rule_actions_unref(actions);
+ rule_dpif_unref(rule);
}
/* Sends 'packet' out 'ofport'.
int
xlate_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
{
- uint64_t odp_actions_stub[1024 / 8];
struct xport *xport;
- struct ofpbuf key, odp_actions;
- struct dpif_flow_stats stats;
- struct odputil_keybuf keybuf;
struct ofpact_output output;
- struct xlate_out xout;
- struct xlate_in xin;
struct flow flow;
union flow_in_port in_port_;
int error;
- ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
ofpact_init(&output.ofpact, OFPACT_OUTPUT, sizeof output);
/* Use OFPP_NONE as the in_port to avoid special packet processing. */
in_port_.ofp_port = OFPP_NONE;
ovs_rwlock_rdlock(&xlate_rwlock);
xport = xport_lookup(ofport);
if (!xport) {
- error = EINVAL;
ovs_rwlock_unlock(&xlate_rwlock);
- goto out;
+ return EINVAL;
}
-
- odp_flow_key_from_flow(&key, &flow, ofp_port_to_odp_port(xport->xbridge, OFPP_LOCAL));
- dpif_flow_stats_extract(&flow, packet, time_msec(), &stats);
output.port = xport->ofp_port;
output.max_len = 0;
- xlate_in_init(&xin, xport->xbridge->ofproto, &flow, NULL, 0, packet);
- xin.ofpacts_len = sizeof output;
- xin.ofpacts = &output.ofpact;
- xin.resubmit_stats = &stats;
- /* Calls xlate_actions__ directly, since the rdlock is acquired. */
- xlate_actions__(&xin, &xout);
- error = dpif_execute(xport->xbridge->dpif,
- key.data, key.size,
- xout.odp_actions.data, xout.odp_actions.size,
- packet);
+ error = ofproto_dpif_execute_actions(xport->xbridge->ofproto, &flow, NULL,
+ &output.ofpact, sizeof output,
+ packet);
ovs_rwlock_unlock(&xlate_rwlock);
-
-out:
- xlate_out_uninit(&xout);
return error;
}
struct flow_wildcards wc;
enum slow_path_reason slow; /* 0 if fast path may be used. */
+ bool fail_open; /* Initial rule is fail open? */
bool has_learn; /* Actions include NXAST_LEARN? */
bool has_normal; /* Actions output to OFPP_NORMAL? */
bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
* not if we are just revalidating. */
bool may_learn;
- /* The rule initiating translation or NULL. */
+ /* The rule initiating translation or NULL. If both 'rule' and 'ofpacts'
+ * are NULL, xlate_actions() will do the initial rule lookup itself. */
struct rule_dpif *rule;
/* The actions to translate. If 'rule' is not NULL, these may be NULL. */
}
}
-/* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
- * 'packet', which arrived on 'in_port'. */
-static bool
-execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
- const struct nlattr *odp_actions, size_t actions_len,
- struct ofpbuf *packet)
+/* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
+ * 'flow' must reflect the data in 'packet'. */
+int
+ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
+ const struct flow *flow,
+ struct rule_dpif *rule,
+ const struct ofpact *ofpacts, size_t ofpacts_len,
+ struct ofpbuf *packet)
{
struct odputil_keybuf keybuf;
+ struct dpif_flow_stats stats;
+ struct xlate_out xout;
+ struct xlate_in xin;
+ ofp_port_t in_port;
struct ofpbuf key;
int error;
+ ovs_assert((rule != NULL) != (ofpacts != NULL));
+
+ dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
+ if (rule) {
+ rule_dpif_credit_stats(rule, &stats);
+ }
+
+ xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
+ xin.ofpacts = ofpacts;
+ xin.ofpacts_len = ofpacts_len;
+ xin.resubmit_stats = &stats;
+ xlate_actions(&xin, &xout);
+
ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto, flow->in_port.ofp_port));
+ in_port = flow->in_port.ofp_port;
+ if (in_port == OFPP_NONE) {
+ in_port = OFPP_LOCAL;
+ }
+ odp_flow_key_from_flow(&key, flow, ofp_port_to_odp_port(ofproto, in_port));
error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
- odp_actions, actions_len, packet);
- return !error;
+ xout.odp_actions.data, xout.odp_actions.size, packet,
+ (xout.slow & SLOW_ACTION) != 0);
+ xlate_out_uninit(&xout);
+
+ return error;
}
/* Remove 'facet' from its ofproto and free up the associated memory:
struct xlate_out xout;
struct xlate_in xin;
-
- struct rule_dpif *rule;
bool ok;
/* Check the datapath actions for consistency. */
- rule_dpif_lookup(facet->ofproto, &facet->flow, NULL, &rule);
- xlate_in_init(&xin, facet->ofproto, &facet->flow, rule, 0, NULL);
+ xlate_in_init(&xin, facet->ofproto, &facet->flow, NULL, 0, NULL);
xlate_actions(&xin, &xout);
- rule_dpif_unref(rule);
ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
&& facet->xout.slow == xout.slow;
struct dpif_flow_stats *stats, bool may_learn)
{
struct ofport_dpif *in_port;
- struct rule_dpif *rule;
struct xlate_in xin;
in_port = get_ofp_port(ofproto, flow->in_port.ofp_port);
netdev_vport_inc_rx(in_port->up.netdev, stats);
}
- rule_dpif_lookup(ofproto, flow, NULL, &rule);
- rule_dpif_credit_stats(rule, stats);
- xlate_in_init(&xin, ofproto, flow, rule, stats->tcp_flags, NULL);
+ xlate_in_init(&xin, ofproto, flow, NULL, stats->tcp_flags, NULL);
xin.resubmit_stats = stats;
xin.may_learn = may_learn;
xlate_actions_for_side_effects(&xin);
- rule_dpif_unref(rule);
}
static void
struct ofpbuf *packet)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
- struct dpif_flow_stats stats;
- struct xlate_out xout;
- struct xlate_in xin;
- dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
- rule_dpif_credit_stats(rule, &stats);
-
- xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
- xin.resubmit_stats = &stats;
- xlate_actions(&xin, &xout);
-
- execute_odp_actions(ofproto, flow, xout.odp_actions.data,
- xout.odp_actions.size, packet);
-
- xlate_out_uninit(&xout);
+ ofproto_dpif_execute_actions(ofproto, flow, rule, NULL, 0, packet);
}
static enum ofperr
const struct ofpact *ofpacts, size_t ofpacts_len)
{
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
- struct odputil_keybuf keybuf;
- struct dpif_flow_stats stats;
- struct xlate_out xout;
- struct xlate_in xin;
- struct ofpbuf key;
-
-
- ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
- odp_flow_key_from_flow(&key, flow,
- ofp_port_to_odp_port(ofproto,
- flow->in_port.ofp_port));
-
- dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
-
- xlate_in_init(&xin, ofproto, flow, NULL, stats.tcp_flags, packet);
- xin.resubmit_stats = &stats;
- xin.ofpacts_len = ofpacts_len;
- xin.ofpacts = ofpacts;
-
- xlate_actions(&xin, &xout);
- dpif_execute(ofproto->backer->dpif, key.data, key.size,
- xout.odp_actions.data, xout.odp_actions.size, packet);
- xlate_out_uninit(&xout);
+ ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
+ ofpacts_len, packet);
return 0;
}
\f
trace.xout.odp_actions.size);
if (trace.xout.slow) {
+ enum slow_path_reason slow;
+
ds_put_cstr(ds, "\nThis flow is handled by the userspace "
"slow path because it:");
- switch (trace.xout.slow) {
- case SLOW_CFM:
- ds_put_cstr(ds, "\n\t- Consists of CFM packets.");
- break;
- case SLOW_LACP:
- ds_put_cstr(ds, "\n\t- Consists of LACP packets.");
- break;
- case SLOW_STP:
- ds_put_cstr(ds, "\n\t- Consists of STP packets.");
- break;
- case SLOW_BFD:
- ds_put_cstr(ds, "\n\t- Consists of BFD packets.");
- break;
- case SLOW_CONTROLLER:
- ds_put_cstr(ds, "\n\t- Sends \"packet-in\" messages "
- "to the OpenFlow controller.");
- break;
- case __SLOW_MAX:
- NOT_REACHED();
+
+ slow = trace.xout.slow;
+ while (slow) {
+ enum slow_path_reason bit = rightmost_1bit(slow);
+
+ ds_put_format(ds, "\n\t- %s.",
+ slow_path_reason_to_explanation(bit));
+
+ slow &= ~bit;
}
}
ovs_be16 vlan_tci);
bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
+int ofproto_dpif_execute_actions(struct ofproto_dpif *, const struct flow *,
+ struct rule_dpif *, const struct ofpact *,
+ size_t ofpacts_len, struct ofpbuf *);
void ofproto_dpif_send_packet_in(struct ofproto_dpif *,
struct ofputil_packet_in *pin);
int ofproto_dpif_send_packet(const struct ofport_dpif *, struct ofpbuf *);
return 0;
error:
- ovs_mutex_unlock(&ofproto_mutex);
-
for (i = 0; i < n_monitors; i++) {
ofmonitor_destroy(monitors[i]);
}
free(monitors);
+ ovs_mutex_unlock(&ofproto_mutex);
+
return error;
}
])
AT_CLEANUP
+# ofp_table_mod.config is actually "reserved for future use" in OF1.3.
AT_SETUP([OFPT_TABLE_MOD - OF1.3])
AT_KEYWORDS([ofp-print])
AT_CHECK([ovs-ofctl ofp-print "\
cookie=0xb dl_src=50:55:55:55:55:55 dl_type=0x8847 actions=load:1000->OXM_OF_MPLS_LABEL[[]],controller
cookie=0xd dl_src=60:66:66:66:66:66 actions=pop_mpls:0x0800,controller
cookie=0xc dl_src=70:77:77:77:77:77 actions=push_mpls:0x8848,load:1000->OXM_OF_MPLS_LABEL[[]],load:7->OXM_OF_MPLS_TC[[]],controller
+cookie=0xd dl_src=80:88:88:88:88:88 arp actions=load:2->OXM_OF_ARP_OP[[]],controller,load:0xc0a88001->OXM_OF_ARP_SPA[[]],controller,load:0x404444444441->OXM_OF_ARP_THA[[]],controller
])
AT_CHECK([ovs-ofctl add-flows br0 flows.txt])
udp,metadata=0,in_port=0,dl_vlan=80,dl_vlan_pcp=0,dl_src=80:81:81:81:81:81,dl_dst=82:82:82:82:82:82,nw_src=83.83.83.83,nw_dst=84.84.84.84,nw_tos=0,nw_ecn=0,nw_ttl=0,tp_src=85,tp_dst=86 udp_csum:43a1
])
-ovs-appctl time/stop
+dnl Modified ARP controller action.
+AT_CHECK([ovs-ofctl monitor br0 65534 -P nxm --detach --pidfile 2> ofctl_monitor.log])
+
+for i in 1 2 3; do
+ ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=80:88:88:88:88:88,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),arp(sip=192.168.0.1,tip=192.168.0.2,op=1,sha=50:54:00:00:00:05,tha=00:00:00:00:00:00)'
+done
+
+OVS_WAIT_UNTIL([ovs-appctl -t ovs-ofctl exit])
+AT_CHECK([cat ofctl_monitor.log], [0], [dnl
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+NXT_PACKET_IN (xid=0x0): cookie=0xd total_len=60 in_port=1 (via action) data_len=60 (unbuffered)
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+])
+
AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
dnl Checksum SCTP.
cookie=0xa, n_packets=3, n_bytes=180, dl_src=41:44:44:44:44:42 actions=push_mpls:0x8847,load:0xa->OXM_OF_MPLS_LABEL[[]],load:0x3->OXM_OF_MPLS_TC[[]],pop_mpls:0x0800,CONTROLLER:65535
cookie=0xb, n_packets=3, n_bytes=180, mpls,dl_src=50:55:55:55:55:55 actions=load:0x3e8->OXM_OF_MPLS_LABEL[[]],CONTROLLER:65535
cookie=0xc, n_packets=3, n_bytes=180, dl_src=70:77:77:77:77:77 actions=push_mpls:0x8848,load:0x3e8->OXM_OF_MPLS_LABEL[[]],load:0x7->OXM_OF_MPLS_TC[[]],CONTROLLER:65535
+ cookie=0xd, n_packets=3, n_bytes=180, arp,dl_src=80:88:88:88:88:88 actions=load:0x2->NXM_OF_ARP_OP[[]],CONTROLLER:65535,load:0xc0a88001->NXM_OF_ARP_SPA[[]],CONTROLLER:65535,load:0x404444444441->NXM_NX_ARP_THA[[]],CONTROLLER:65535
cookie=0xd, n_packets=3, n_bytes=186, dl_src=60:66:66:66:66:66 actions=pop_mpls:0x0800,CONTROLLER:65535
n_packets=3, n_bytes=180, dl_src=10:11:11:11:11:11 actions=CONTROLLER:65535
NXST_FLOW reply:
OVS_VSWITCHD_STOP
AT_CLEANUP
+AT_SETUP([ofproto-dpif - ARP modification slow-path])
+OVS_VSWITCHD_START
+ADD_OF_PORTS([br0], [1], [2])
+
+ovs-vsctl -- set Interface p2 type=dummy options:pcap=p2.pcap
+ovs-ofctl add-flow br0 'in_port=1,arp actions=load:2->OXM_OF_ARP_OP[[]],2,load:0xc0a88001->OXM_OF_ARP_SPA[[]],2,load:0x404444444441->OXM_OF_ARP_THA[[]],2'
+
+# Input some packets that should follow the arp modification slow-path.
+for i in 1 2 3; do
+ ovs-appctl netdev-dummy/receive p1 'in_port(1),eth(src=80:88:88:88:88:88,dst=ff:ff:ff:ff:ff:ff),eth_type(0x0806),arp(sip=192.168.0.1,tip=192.168.0.2,op=1,sha=50:54:00:00:00:05,tha=00:00:00:00:00:00)'
+done
+AT_CHECK([ovs-appctl time/warp 5000], [0], [ignore])
+
+# Check the packets that were output.
+AT_CHECK([ovs-ofctl parse-pcap p2.pcap], [0], [dnl
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.0.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=00:00:00:00:00:00
+arp,metadata=0,in_port=0,vlan_tci=0x0000,dl_src=80:88:88:88:88:88,dl_dst=ff:ff:ff:ff:ff:ff,arp_spa=192.168.128.1,arp_tpa=192.168.0.2,arp_op=2,arp_sha=50:54:00:00:00:05,arp_tha=40:44:44:44:44:41
+])
+
+# Check that each of the packets actually passed through the slow-path.
+AT_CHECK([ovs-appctl coverage/show], [0], [stdout])
+AT_CHECK([sed -n 's/[[ ]]\{2,\}/ /g
+s/^dpif_execute_with_help.*total: //p' stdout], [0], [3
+])
+
+OVS_VSWITCHD_STOP
+AT_CLEANUP
+
AT_SETUP([ofproto-dpif - VLAN handling])
OVS_VSWITCHD_START(
[set Bridge br0 fail-mode=standalone -- \
#include "openflow/nicira-ext.h"
#include "openflow/openflow.h"
#include "packets.h"
+#include "pcap-file.h"
#include "poll-loop.h"
#include "random.h"
#include "stream-ssl.h"
ds_destroy(&in);
}
+/* "parse-pcap PCAP": read packets from PCAP and print their flows. */
+static void
+ofctl_parse_pcap(int argc OVS_UNUSED, char *argv[])
+{
+ FILE *pcap;
+
+ pcap = pcap_open(argv[1], "rb");
+ if (!pcap) {
+ ovs_fatal(errno, "%s: open failed", argv[1]);
+ }
+
+ for (;;) {
+ struct ofpbuf *packet;
+ struct flow flow;
+ int error;
+
+ error = pcap_read(pcap, &packet);
+ if (error == EOF) {
+ break;
+ } else if (error) {
+ ovs_fatal(error, "%s: read failed", argv[1]);
+ }
+
+ flow_extract(packet, 0, 0, NULL, NULL, &flow);
+ flow_print(stdout, &flow);
+ putchar('\n');
+ ofpbuf_delete(packet);
+ }
+}
+
/* "check-vlan VLAN_TCI VLAN_TCI_MASK": converts the specified vlan_tci and
* mask values to and from various formats and prints the results. */
static void
{ "parse-ofp11-match", 0, 0, ofctl_parse_ofp11_match },
{ "parse-ofp11-actions", 0, 0, ofctl_parse_ofp11_actions },
{ "parse-ofp11-instructions", 0, 0, ofctl_parse_ofp11_instructions },
+ { "parse-pcap", 1, 1, ofctl_parse_pcap },
{ "check-vlan", 2, 2, ofctl_check_vlan },
{ "print-error", 1, 1, ofctl_print_error },
{ "encode-error-reply", 2, 2, ofctl_encode_error_reply },