if (NXM_HASMASK(header)) {
memcpy(cookie_mask, p + 4 + width, width);
} else {
- *cookie_mask = htonll(UINT64_MAX);
+ *cookie_mask = OVS_BE64_MAX;
}
error = 0;
}
strict, match, NULL, NULL);
}
-/* Parses the oxm formatted match description preceded by a struct ofp11_match
- * in 'b' with length 'match_len'. Stores the result in 'match'.
+/* Parses the oxm formatted match description preceded by a struct
+ * ofp11_match_header in 'b'. Stores the result in 'match'.
*
* Fails with an error when encountering unknown OXM headers.
*
}
/* Behaves the same as oxm_pull_match() with one exception. Skips over unknown
- * PXM headers instead of failing with an error when they are encountered. */
+ * OXM headers instead of failing with an error when they are encountered. */
enum ofperr
oxm_pull_match_loose(struct ofpbuf *b, struct match *match)
{
case 0:
break;
- case CONSTANT_HTONS(UINT16_MAX):
+ case OVS_BE16_MAX:
nxm_put_16(b, header, value);
break;
case 0:
break;
- case CONSTANT_HTONL(UINT32_MAX):
+ case OVS_BE32_MAX:
nxm_put_32(b, header, value);
break;
case 0:
break;
- case CONSTANT_HTONLL(UINT64_MAX):
+ case OVS_BE64_MAX:
nxm_put_64(b, header, value);
break;
flow->tp_src, match->wc.masks.tp_src);
nxm_put_16m(b, oxm ? OXM_OF_UDP_DST : NXM_OF_UDP_DST,
flow->tp_dst, match->wc.masks.tp_dst);
+ } else if (flow->nw_proto == IPPROTO_SCTP) {
+ nxm_put_16m(b, OXM_OF_SCTP_SRC, flow->tp_src,
+ match->wc.masks.tp_src);
+ nxm_put_16m(b, OXM_OF_SCTP_DST, flow->tp_dst,
+ match->wc.masks.tp_dst);
} else if (flow->nw_proto == icmp_proto) {
if (match->wc.masks.tp_src) {
nxm_put_8(b, icmp_type, ntohs(flow->tp_src));
int match_len;
int i;
- BUILD_ASSERT_DECL(FLOW_WC_SEQ == 20);
+ BUILD_ASSERT_DECL(FLOW_WC_SEQ == 21);
/* Metadata. */
if (match->wc.masks.in_port.ofp_port) {
htonl(flow->regs[i]), htonl(match->wc.masks.regs[i]));
}
+ /* Mark. */
+ nxm_put_32m(b, NXM_NX_PKT_MARK, htonl(flow->pkt_mark),
+ htonl(match->wc.masks.pkt_mark));
+
/* OpenFlow 1.1+ Metadata. */
nxm_put_64m(b, OXM_OF_METADATA, flow->metadata, match->wc.masks.metadata);
/* ofp12_action_set_field is padded to 64 bits by zero */
if (oasf_len != ROUND_UP(sizeof(*oasf) + oxm_length, 8)) {
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_OFPBAC_BAD_SET_LEN;
}
if (!is_all_zeros((const uint8_t *)(oasf) + sizeof *oasf + oxm_length,
oasf_len - oxm_length - sizeof *oasf)) {
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_OFPBAC_BAD_SET_ARGUMENT;
}
if (NXM_HASMASK(oxm_header)) {
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_OFPBAC_BAD_SET_TYPE;
}
mf = mf_from_nxm_header(oxm_header);
if (!mf) {
- return OFPERR_OFPBAC_BAD_ARGUMENT;
+ return OFPERR_OFPBAC_BAD_SET_TYPE;
}
load = ofpact_put_REG_LOAD(ofpacts);
ofpact_set_field_init(load, mf, oasf + 1);
nxm_execute_reg_move(const struct ofpact_reg_move *move,
struct flow *flow, struct flow_wildcards *wc)
{
- union mf_subvalue mask_value;
union mf_value src_value;
union mf_value dst_value;
- memset(&mask_value, 0xff, sizeof mask_value);
- mf_write_subfield_flow(&move->src, &mask_value, &wc->masks);
+ mf_mask_field_and_prereqs(move->dst.field, &wc->masks);
+ mf_mask_field_and_prereqs(move->src.field, &wc->masks);
mf_get_value(move->dst.field, flow, &dst_value);
mf_get_value(move->src.field, flow, &src_value);
}
void
-nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow)
-{
+nxm_execute_reg_load(const struct ofpact_reg_load *load, struct flow *flow,
+ struct flow_wildcards *wc)
+{
+ /* Since at the datapath interface we do not have set actions for
+ * individual fields, but larger sets of fields for a given protocol
+ * layer, the set action will in practice only ever apply to exactly
+ * matched flows for the given protocol layer. For example, if the
+ * reg_load changes the IP TTL, the corresponding datapath action will
+ * rewrite also the IP addresses and TOS byte. Since these other field
+ * values may not be explicitly set, they depend on the incoming flow field
+ * values, and are hence all of them are set in the wildcards masks, when
+ * the action is committed to the datapath. For the rare case, where the
+ * reg_load action does not actually change the value, and no other flow
+ * field values are set (or loaded), the datapath action is skipped, and
+ * no mask bits are set. Such a datapath flow should, however, be
+ * dependent on the specific field value, so the corresponding wildcard
+ * mask bits must be set, lest the datapath flow be applied to packets
+ * containing some other value in the field and the field value remain
+ * unchanged regardless of the incoming value.
+ *
+ * We set the masks here for the whole fields, and their prerequisities.
+ * Even if only the lower byte of a TCP destination port is set,
+ * we set the mask for the whole field, and also the ip_proto in the IP
+ * header, so that the kernel flow would not be applied on, e.g., a UDP
+ * packet, or any other IP protocol in addition to TCP packets.
+ */
+ mf_mask_field_and_prereqs(load->dst.field, &wc->masks);
mf_write_subfield_flow(&load->dst, &load->subvalue, flow);
}
void
nxm_reg_load(const struct mf_subfield *dst, uint64_t src_data,
- struct flow *flow)
+ struct flow *flow, struct flow_wildcards *wc)
{
union mf_subvalue src_subvalue;
+ union mf_subvalue mask_value;
ovs_be64 src_data_be = htonll(src_data);
+ memset(&mask_value, 0xff, sizeof mask_value);
+ mf_write_subfield_flow(dst, &mask_value, &wc->masks);
+
bitwise_copy(&src_data_be, sizeof src_data_be, 0,
&src_subvalue, sizeof src_subvalue, 0,
sizeof src_data_be * 8);
void
nxm_execute_stack_pop(const struct ofpact_stack *pop,
- struct flow *flow, struct ofpbuf *stack)
+ struct flow *flow, struct flow_wildcards *wc,
+ struct ofpbuf *stack)
{
union mf_subvalue *src_value;
/* Only pop if stack is not empty. Otherwise, give warning. */
if (src_value) {
+ union mf_subvalue mask_value;
+
+ memset(&mask_value, 0xff, sizeof mask_value);
+ mf_write_subfield_flow(&pop->subfield, &mask_value, &wc->masks);
mf_write_subfield_flow(&pop->subfield, src_value, flow);
} else {
if (!VLOG_DROP_WARN(&rl)) {