static int parse_odp_key_mask_attr(const char *, const struct simap *port_names,
struct ofpbuf *, struct ofpbuf *);
static void format_odp_key_attr(const struct nlattr *a,
- const struct nlattr *ma, struct ds *ds,
+ const struct nlattr *ma,
+ const struct hmap *portno_names, struct ds *ds,
bool verbose);
/* Returns one the following for the action with the given OVS_ACTION_ATTR_*
}
static const char *
-slow_path_reason_to_string(enum slow_path_reason reason)
+slow_path_reason_to_string(uint32_t reason)
{
- switch (reason) {
- case SLOW_CFM:
- return "cfm";
- case SLOW_LACP:
- return "lacp";
- case SLOW_STP:
- return "stp";
- case SLOW_BFD:
- return "bfd";
- case SLOW_CONTROLLER:
- return "controller";
- case __SLOW_MAX:
- default:
- return NULL;
+ switch ((enum slow_path_reason) reason) {
+#define SPR(ENUM, STRING, EXPLANATION) case ENUM: return STRING;
+ SLOW_PATH_REASONS
+#undef SPR
}
+
+ return NULL;
}
-static enum slow_path_reason
-string_to_slow_path_reason(const char *string)
+const char *
+slow_path_reason_to_explanation(enum slow_path_reason reason)
{
- enum slow_path_reason i;
-
- for (i = 1; i < __SLOW_MAX; i++) {
- if (!strcmp(string, slow_path_reason_to_string(i))) {
- return i;
- }
+ switch (reason) {
+#define SPR(ENUM, STRING, EXPLANATION) case ENUM: return EXPLANATION;
+ SLOW_PATH_REASONS
+#undef SPR
}
- return 0;
+ return "<unknown>";
}
static int
cookie.sflow.output);
} else if (userdata_len == sizeof cookie.slow_path
&& cookie.type == USER_ACTION_COOKIE_SLOW_PATH) {
- const char *reason;
- reason = slow_path_reason_to_string(cookie.slow_path.reason);
- reason = reason ? reason : "";
- ds_put_format(ds, ",slow_path(%s)", reason);
+ ds_put_cstr(ds, ",slow_path(");
+ format_flags(ds, slow_path_reason_to_string,
+ cookie.slow_path.reason, ',');
+ ds_put_format(ds, ")");
} else if (userdata_len == sizeof cookie.flow_sample
&& cookie.type == USER_ACTION_COOKIE_FLOW_SAMPLE) {
ds_put_format(ds, ",flow_sample(probability=%"PRIu16
break;
case OVS_ACTION_ATTR_SET:
ds_put_cstr(ds, "set(");
- format_odp_key_attr(nl_attr_get(a), NULL, ds, true);
+ format_odp_key_attr(nl_attr_get(a), NULL, NULL, ds, true);
ds_put_cstr(ds, ")");
break;
case OVS_ACTION_ATTR_PUSH_VLAN:
odp_put_userspace_action(pid, &cookie, sizeof cookie.sflow,
actions);
return n;
- } else if (sscanf(s, "userspace(pid=%lli,slow_path(%n", &pid, &n) > 0
+ } else if (sscanf(s, "userspace(pid=%lli,slow_path%n", &pid, &n) > 0
&& n > 0) {
union user_action_cookie cookie;
- char reason[32];
-
- if (s[n] == ')' && s[n + 1] == ')') {
- reason[0] = '\0';
- n += 2;
- } else if (sscanf(s + n, "%31[^)]))", reason) > 0) {
- n += strlen(reason) + 2;
- } else {
- return -EINVAL;
- }
+ int res;
cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
cookie.slow_path.unused = 0;
- cookie.slow_path.reason = string_to_slow_path_reason(reason);
+ cookie.slow_path.reason = 0;
- if (reason[0] && !cookie.slow_path.reason) {
+ res = parse_flags(&s[n], slow_path_reason_to_string,
+ &cookie.slow_path.reason);
+ if (res < 0) {
+ return res;
+ }
+ n += res;
+ if (s[n] != ')') {
return -EINVAL;
}
+ n++;
odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path,
actions);
return is_exact;
}
+void
+odp_portno_names_set(struct hmap *portno_names, odp_port_t port_no,
+ char *port_name)
+{
+ struct odp_portno_names *odp_portno_names;
+
+ odp_portno_names = xmalloc(sizeof *odp_portno_names);
+ odp_portno_names->port_no = port_no;
+ odp_portno_names->name = xstrdup(port_name);
+ hmap_insert(portno_names, &odp_portno_names->hmap_node,
+ hash_odp_port(port_no));
+}
+
+static char *
+odp_portno_names_get(const struct hmap *portno_names, odp_port_t port_no)
+{
+ struct odp_portno_names *odp_portno_names;
+
+ HMAP_FOR_EACH_IN_BUCKET (odp_portno_names, hmap_node,
+ hash_odp_port(port_no), portno_names) {
+ if (odp_portno_names->port_no == port_no) {
+ return odp_portno_names->name;
+ }
+ }
+ return NULL;
+}
+
+void
+odp_portno_names_destroy(struct hmap *portno_names)
+{
+ struct odp_portno_names *odp_portno_names, *odp_portno_names_next;
+ HMAP_FOR_EACH_SAFE (odp_portno_names, odp_portno_names_next,
+ hmap_node, portno_names) {
+ hmap_remove(portno_names, &odp_portno_names->hmap_node);
+ free(odp_portno_names->name);
+ free(odp_portno_names);
+ }
+}
static void
format_odp_key_attr(const struct nlattr *a, const struct nlattr *ma,
- struct ds *ds, bool verbose)
+ const struct hmap *portno_names, struct ds *ds,
+ bool verbose)
{
struct flow_tnl tun_key;
enum ovs_key_attr attr = nl_attr_type(a);
case OVS_KEY_ATTR_ENCAP:
if (ma && nl_attr_get_size(ma) && nl_attr_get_size(a)) {
odp_flow_format(nl_attr_get(a), nl_attr_get_size(a),
- nl_attr_get(ma), nl_attr_get_size(ma), ds, verbose);
- } else if (nl_attr_get_size(a)) {
- odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, ds,
+ nl_attr_get(ma), nl_attr_get_size(ma), NULL, ds,
verbose);
+ } else if (nl_attr_get_size(a)) {
+ odp_flow_format(nl_attr_get(a), nl_attr_get_size(a), NULL, 0, NULL,
+ ds, verbose);
}
break;
break;
case OVS_KEY_ATTR_IN_PORT:
- ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
- if (!is_exact) {
- ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
+ if (portno_names && verbose && is_exact) {
+ char *name = odp_portno_names_get(portno_names,
+ u32_to_odp(nl_attr_get_u32(a)));
+ if (name) {
+ ds_put_format(ds, "%s", name);
+ } else {
+ ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
+ }
+ } else {
+ ds_put_format(ds, "%"PRIu32, nl_attr_get_u32(a));
+ if (!is_exact) {
+ ds_put_format(ds, "/%#"PRIx32, nl_attr_get_u32(ma));
+ }
}
break;
/* Appends to 'ds' a string representation of the 'key_len' bytes of
* OVS_KEY_ATTR_* attributes in 'key'. If non-null, additionally formats the
- * 'mask_len' bytes of 'mask' which apply to 'key'. */
+ * 'mask_len' bytes of 'mask' which apply to 'key'. If 'portno_names' is
+ * non-null and 'verbose' is true, translates odp port number to its name. */
void
odp_flow_format(const struct nlattr *key, size_t key_len,
const struct nlattr *mask, size_t mask_len,
- struct ds *ds, bool verbose)
+ const struct hmap *portno_names, struct ds *ds, bool verbose)
{
if (key_len) {
const struct nlattr *a;
if (!first_field) {
ds_put_char(ds, ',');
}
- format_odp_key_attr(a, ma, ds, verbose);
+ format_odp_key_attr(a, ma, portno_names, ds, verbose);
first_field = false;
}
ofpbuf_clear(&ofp);
odp_flow_key_format(const struct nlattr *key,
size_t key_len, struct ds *ds)
{
- odp_flow_format(key, key_len, NULL, 0, ds, true);
+ odp_flow_format(key, key_len, NULL, 0, NULL, ds, true);
}
static void
int name_len;
name = s + 8;
- name_len = strcspn(s, ")");
+ name_len = strcspn(name, ")");
node = simap_find_len(port_names, name, name_len);
if (node) {
nl_msg_put_u32(key, OVS_KEY_ATTR_IN_PORT, node->data);
arp_key->arp_op = htons(data->nw_proto);
memcpy(arp_key->arp_sha, data->arp_sha, ETH_ADDR_LEN);
memcpy(arp_key->arp_tha, data->arp_tha, ETH_ADDR_LEN);
- }
-
- if (flow->mpls_depth) {
+ } else if (eth_type_mpls(flow->dl_type)) {
struct ovs_key_mpls *mpls_key;
mpls_key = nl_msg_put_unspec_uninit(buf, OVS_KEY_ATTR_MPLS,
uint64_t present_attrs, int out_of_range_attr,
uint64_t expected_attrs, struct flow *flow,
const struct nlattr *key, size_t key_len,
- const struct flow *src_flow)
+ const struct flow *src_flow)
{
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
bool is_mask = src_flow != flow;
enum ovs_key_attr expected_bit = 0xff;
if (eth_type_mpls(src_flow->dl_type)) {
- if (!is_mask) {
- expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
-
- if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS))) {
- return ODP_FIT_TOO_LITTLE;
- }
- flow->mpls_lse = nl_attr_get_be32(attrs[OVS_KEY_ATTR_MPLS]);
- flow->mpls_depth++;
+ if (!is_mask) {
+ expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
+
+ if (!(present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS))) {
+ return ODP_FIT_TOO_LITTLE;
+ }
+ flow->mpls_lse = nl_attr_get_be32(attrs[OVS_KEY_ATTR_MPLS]);
} else if (present_attrs & (UINT64_C(1) << OVS_KEY_ATTR_MPLS)) {
flow->mpls_lse = nl_attr_get_be32(attrs[OVS_KEY_ATTR_MPLS]);
return ODP_FIT_ERROR;
}
expected_attrs |= (UINT64_C(1) << OVS_KEY_ATTR_MPLS);
- if (flow->mpls_lse) {
- /* XXX Is this needed? */
- flow->mpls_depth = 0xffff;
- }
}
goto done;
} else if (src_flow->dl_type == htons(ETH_TYPE_IP)) {
flow->nw_tos = ipv4_key->ipv4_tos;
flow->nw_ttl = ipv4_key->ipv4_ttl;
if (is_mask) {
- flow->nw_frag = ipv4_key->ipv4_frag;
+ flow->nw_frag = ipv4_key->ipv4_frag;
check_start = ipv4_key;
check_len = sizeof *ipv4_key;
expected_bit = OVS_KEY_ATTR_IPV4;
flow->nw_tos = ipv6_key->ipv6_tclass;
flow->nw_ttl = ipv6_key->ipv6_hlimit;
if (is_mask) {
- flow->nw_frag = ipv6_key->ipv6_frag;
+ flow->nw_frag = ipv6_key->ipv6_frag;
check_start = ipv6_key;
check_len = sizeof *ipv6_key;
expected_bit = OVS_KEY_ATTR_IPV6;
} else {
tci = nl_attr_get_be16(attrs[OVS_KEY_ATTR_VLAN]);
if (!is_mask) {
- if (tci == htons(0)) {
+ if (tci == htons(0)) {
/* Corner case for a truncated 802.1Q header. */
- if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
- return ODP_FIT_TOO_MUCH;
- }
- return fitness;
- } else if (!(tci & htons(VLAN_CFI))) {
- VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
- "but CFI bit is not set", ntohs(tci));
- return ODP_FIT_ERROR;
- }
+ if (fitness == ODP_FIT_PERFECT && nl_attr_get_size(encap)) {
+ return ODP_FIT_TOO_MUCH;
+ }
+ return fitness;
+ } else if (!(tci & htons(VLAN_CFI))) {
+ VLOG_ERR_RL(&rl, "OVS_KEY_ATTR_VLAN 0x%04"PRIx16" is nonzero "
+ "but CFI bit is not set", ntohs(tci));
+ return ODP_FIT_ERROR;
+ }
}
/* Set vlan_tci.
* Remove the TPID from dl_type since it's not the real Ethertype. */
}
static void
-commit_vlan_action(const struct flow *flow, struct flow *base,
+commit_vlan_action(ovs_be16 vlan_tci, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- if (base->vlan_tci == flow->vlan_tci) {
+ if (base->vlan_tci == vlan_tci) {
return;
}
nl_msg_put_flag(odp_actions, OVS_ACTION_ATTR_POP_VLAN);
}
- if (flow->vlan_tci & htons(VLAN_CFI)) {
+ if (vlan_tci & htons(VLAN_CFI)) {
struct ovs_action_push_vlan vlan;
vlan.vlan_tpid = htons(ETH_TYPE_VLAN);
- vlan.vlan_tci = flow->vlan_tci;
+ vlan.vlan_tci = vlan_tci;
nl_msg_put_unspec(odp_actions, OVS_ACTION_ATTR_PUSH_VLAN,
&vlan, sizeof vlan);
}
- base->vlan_tci = flow->vlan_tci;
+ base->vlan_tci = vlan_tci;
}
static void
commit_mpls_action(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ int *mpls_depth_delta)
{
- if (flow->mpls_lse == base->mpls_lse &&
- flow->mpls_depth == base->mpls_depth) {
+ if (flow->mpls_lse == base->mpls_lse && !*mpls_depth_delta) {
return;
}
memset(&wc->masks.mpls_lse, 0xff, sizeof wc->masks.mpls_lse);
- if (flow->mpls_depth < base->mpls_depth) {
- if (base->mpls_depth - flow->mpls_depth > 1) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
- VLOG_WARN_RL(&rl, "Multiple mpls_pop actions reduced to "
- " a single mpls_pop action");
- }
-
+ switch (*mpls_depth_delta) {
+ case -1:
nl_msg_put_be16(odp_actions, OVS_ACTION_ATTR_POP_MPLS, flow->dl_type);
- } else if (flow->mpls_depth > base->mpls_depth) {
+ break;
+ case 1: {
struct ovs_action_push_mpls *mpls;
- if (flow->mpls_depth - base->mpls_depth > 1) {
- static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
- VLOG_WARN_RL(&rl, "Multiple mpls_push actions reduced to "
- " a single mpls_push action");
- }
-
mpls = nl_msg_put_unspec_uninit(odp_actions, OVS_ACTION_ATTR_PUSH_MPLS,
sizeof *mpls);
memset(mpls, 0, sizeof *mpls);
mpls->mpls_ethertype = flow->dl_type;
mpls->mpls_lse = flow->mpls_lse;
- } else {
+ break;
+ }
+ case 0: {
struct ovs_key_mpls mpls_key;
mpls_key.mpls_lse = flow->mpls_lse;
commit_set_action(odp_actions, OVS_KEY_ATTR_MPLS,
&mpls_key, sizeof(mpls_key));
+ break;
+ }
+ default:
+ NOT_REACHED();
}
base->dl_type = flow->dl_type;
base->mpls_lse = flow->mpls_lse;
- base->mpls_depth = flow->mpls_depth;
+ *mpls_depth_delta = 0;
}
static void
&ipv6_key, sizeof(ipv6_key));
}
-static void
+static enum slow_path_reason
+commit_set_arp_action(const struct flow *flow, struct flow *base,
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+{
+ struct ovs_key_arp arp_key;
+
+ if (base->nw_src == flow->nw_src &&
+ base->nw_dst == flow->nw_dst &&
+ base->nw_proto == flow->nw_proto &&
+ eth_addr_equals(base->arp_sha, flow->arp_sha) &&
+ eth_addr_equals(base->arp_tha, flow->arp_tha)) {
+ return 0;
+ }
+
+ memset(&wc->masks.nw_src, 0xff, sizeof wc->masks.nw_src);
+ memset(&wc->masks.nw_dst, 0xff, sizeof wc->masks.nw_dst);
+ memset(&wc->masks.nw_proto, 0xff, sizeof wc->masks.nw_proto);
+ memset(&wc->masks.arp_sha, 0xff, sizeof wc->masks.arp_sha);
+ memset(&wc->masks.arp_tha, 0xff, sizeof wc->masks.arp_tha);
+
+ base->nw_src = flow->nw_src;
+ base->nw_dst = flow->nw_dst;
+ base->nw_proto = flow->nw_proto;
+ memcpy(base->arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(base->arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ arp_key.arp_sip = base->nw_src;
+ arp_key.arp_tip = base->nw_dst;
+ arp_key.arp_op = htons(base->nw_proto);
+ memcpy(arp_key.arp_sha, flow->arp_sha, ETH_ADDR_LEN);
+ memcpy(arp_key.arp_tha, flow->arp_tha, ETH_ADDR_LEN);
+
+ commit_set_action(odp_actions, OVS_KEY_ATTR_ARP, &arp_key, sizeof arp_key);
+
+ return SLOW_ACTION;
+}
+
+static enum slow_path_reason
commit_set_nw_action(const struct flow *flow, struct flow *base,
struct ofpbuf *odp_actions, struct flow_wildcards *wc)
{
- /* Check if flow really have an IP header. */
+ /* Check if 'flow' really has an L3 header. */
if (!flow->nw_proto) {
- return;
+ return 0;
}
- if (base->dl_type == htons(ETH_TYPE_IP)) {
+ switch (ntohs(base->dl_type)) {
+ case ETH_TYPE_IP:
commit_set_ipv4_action(flow, base, odp_actions, wc);
- } else if (base->dl_type == htons(ETH_TYPE_IPV6)) {
+ break;
+
+ case ETH_TYPE_IPV6:
commit_set_ipv6_action(flow, base, odp_actions, wc);
+ break;
+
+ case ETH_TYPE_ARP:
+ return commit_set_arp_action(flow, base, odp_actions, wc);
}
+
+ return 0;
}
static void
odp_put_pkt_mark_action(base->pkt_mark, odp_actions);
}
+
/* If any of the flow key data that ODP actions can modify are different in
* 'base' and 'flow', appends ODP actions to 'odp_actions' that change the flow
* key from 'base' into 'flow', and then changes 'base' the same way. Does not
* commit set_tunnel actions. Users should call commit_odp_tunnel_action()
* in addition to this function if needed. Sets fields in 'wc' that are
- * used as part of the action. */
-void
+ * used as part of the action.
+ *
+ * Returns a reason to force processing the flow's packets into the userspace
+ * slow path, if there is one, otherwise 0. */
+enum slow_path_reason
commit_odp_actions(const struct flow *flow, struct flow *base,
- struct ofpbuf *odp_actions, struct flow_wildcards *wc)
+ struct ofpbuf *odp_actions, struct flow_wildcards *wc,
+ int *mpls_depth_delta)
{
+ enum slow_path_reason slow;
+
commit_set_ether_addr_action(flow, base, odp_actions, wc);
- commit_vlan_action(flow, base, odp_actions, wc);
- commit_set_nw_action(flow, base, odp_actions, wc);
+ commit_vlan_action(flow->vlan_tci, base, odp_actions, wc);
+ slow = commit_set_nw_action(flow, base, odp_actions, wc);
commit_set_port_action(flow, base, odp_actions, wc);
/* Committing MPLS actions should occur after committing nw and port
* actions. This is because committing MPLS actions may alter a packet so
* that it is no longer IP and thus nw and port actions are no longer valid.
*/
- commit_mpls_action(flow, base, odp_actions, wc);
+ commit_mpls_action(flow, base, odp_actions, wc, mpls_depth_delta);
commit_set_priority_action(flow, base, odp_actions, wc);
commit_set_pkt_mark_action(flow, base, odp_actions, wc);
+
+ return slow;
}