2 * Copyright (c) 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "wdp-xflow.h"
26 #include "mac-learning.h"
32 #include "openflow/nicira-ext.h"
33 #include "openflow/openflow.h"
35 #include "poll-loop.h"
36 #include "port-array.h"
43 #include "wdp-provider.h"
45 #include "xflow-util.h"
49 VLOG_DEFINE_THIS_MODULE(wdp_xflow)
53 TABLEID_CLASSIFIER = 1
56 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
58 /* Maximum numbers of rules. */
59 #define WX_MAX_WILD 65536 /* Wildcarded rules. */
60 #define WX_MAX_EXACT 1048576 /* Exact-match rules. */
63 struct hmap_node hmap_node;
64 struct wdp_port wdp_port;
69 struct list list_node;
72 struct classifier cls;
73 struct netdev_monitor *netdev_monitor;
74 struct hmap ports; /* Contains "struct wx_port"s. */
75 struct shash port_by_name;
76 long long int next_expiration;
79 /* Rules that might need to be revalidated. */
80 bool need_revalidate; /* Revalidate all subrules? */
81 bool revalidate_all; /* Revalidate all subrules and other rules? */
82 struct tag_set revalidate_set; /* Tag set of (sub)rules to revalidate. */
84 /* Hooks for ovs-vswitchd. */
85 const struct ofhooks *ofhooks;
88 /* Used by default ofhooks. */
89 struct mac_learning *ml;
91 /* List of "struct wdp_packets" queued for the controller by
92 * execute_xflow_actions(). */
93 #define MAX_CTL_PACKETS 50
94 struct list ctl_packets;
98 static const struct ofhooks default_ofhooks;
100 static struct list all_wx = LIST_INITIALIZER(&all_wx);
102 static int wx_port_init(struct wx *);
103 static struct wx_port *wx_port_get(const struct wx *, uint16_t xflow_port);
104 static void wx_port_process_change(struct wx *wx, int error, char *devname,
105 wdp_port_poll_cb_func *cb, void *aux);
106 static void wx_port_refresh_groups(struct wx *);
108 static void wx_purge_ctl_packets__(struct wx *);
116 wx_cast(const struct wdp *wdp)
118 return CONTAINER_OF(wdp, struct wx, wdp);
122 wx_xlate_actions(struct wx *, const union ofp_action *, size_t n,
123 const flow_t *flow, const struct ofpbuf *packet,
124 tag_type *tags, struct xflow_actions *out,
125 bool *may_set_up_flow);
130 uint64_t packet_count; /* Number of packets received. */
131 uint64_t byte_count; /* Number of bytes received. */
132 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
133 long long int used; /* Last-used time (0 if never used). */
134 tag_type tags; /* Tags (set only by hooks). */
136 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
137 * exact-match rule (having cr.wc.wildcards of 0) generated from the
138 * wildcard rule 'super'. In this case, 'list' is an element of the
141 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
142 * a list of subrules. A super-rule with no wildcards (where
143 * cr.wc.wildcards is 0) will never have any subrules. */
144 struct wx_rule *super;
149 * A super-rule with wildcard fields never has xflow actions (since the
150 * datapath only supports exact-match flows). */
151 bool installed; /* Installed in datapath? */
152 bool may_install; /* True ordinarily; false if actions must
153 * be reassessed for every packet. */
155 union xflow_action *xflow_actions;
158 static void wx_rule_destroy(struct wx *, struct wx_rule *);
159 static void wx_rule_update_actions(struct wx *, struct wx_rule *);
160 static void wx_rule_execute(struct wx *, struct wx_rule *,
161 struct ofpbuf *packet, const flow_t *);
162 static bool wx_rule_make_actions(struct wx *, struct wx_rule *,
163 const struct ofpbuf *packet);
164 static void wx_rule_install(struct wx *, struct wx_rule *,
165 struct wx_rule *displaced_rule);
167 static struct wx_rule *
168 wx_rule_cast(const struct cls_rule *cls_rule)
170 return cls_rule ? CONTAINER_OF(cls_rule, struct wx_rule, wr.cr) : NULL;
173 /* Returns true if 'rule' is merely an implementation detail that should be
174 * hidden from the client. */
176 wx_rule_is_hidden(const struct wx_rule *rule)
178 return rule->super != NULL;
182 wx_rule_free(struct wx_rule *rule)
184 wdp_rule_uninit(&rule->wr);
185 free(rule->xflow_actions);
190 wx_rule_account(struct wx *wx OVS_UNUSED, struct wx_rule *rule OVS_UNUSED,
191 uint64_t extra_bytes OVS_UNUSED)
193 /* XXX call account_cb hook */
197 wx_rule_post_uninstall(struct wx *wx, struct wx_rule *rule)
199 struct wx_rule *super = rule->super;
201 wx_rule_account(wx, rule, 0);
203 /* XXX netflow expiration */
206 super->packet_count += rule->packet_count;
207 super->byte_count += rule->byte_count;
209 /* Reset counters to prevent double counting if the rule ever gets
211 rule->packet_count = 0;
212 rule->byte_count = 0;
213 rule->accounted_bytes = 0;
215 //XXX netflow_flow_clear(&rule->nf_flow);
220 xflow_flow_stats_to_msec(const struct xflow_flow_stats *stats)
222 return (stats->used_sec
223 ? stats->used_sec * 1000 + stats->used_nsec / 1000000
228 wx_rule_update_time(struct wx *wx OVS_UNUSED, struct wx_rule *rule,
229 const struct xflow_flow_stats *stats)
231 long long int used = xflow_flow_stats_to_msec(stats);
232 if (used > rule->used) {
234 if (rule->super && used > rule->super->used) {
235 rule->super->used = used;
237 //XXX netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
242 wx_rule_update_stats(struct wx *wx, struct wx_rule *rule,
243 const struct xflow_flow_stats *stats)
245 if (stats->n_packets) {
246 wx_rule_update_time(wx, rule, stats);
247 rule->packet_count += stats->n_packets;
248 rule->byte_count += stats->n_bytes;
249 /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->tcp_flags); */
254 wx_rule_uninstall(struct wx *wx, struct wx_rule *rule)
256 assert(!rule->wr.cr.flow.wildcards);
257 if (rule->installed) {
258 struct xflow_flow xflow_flow;
260 xflow_key_from_flow(&xflow_flow.key, &rule->wr.cr.flow);
261 xflow_flow.actions = NULL;
262 xflow_flow.n_actions = 0;
263 xflow_flow.flags = 0;
264 if (!xfif_flow_del(wx->xfif, &xflow_flow)) {
265 wx_rule_update_stats(wx, rule, &xflow_flow.stats);
267 rule->installed = false;
269 wx_rule_post_uninstall(wx, rule);
275 is_controller_rule(struct wx_rule *rule)
277 /* If the only action is send to the controller then don't report
278 * NetFlow expiration messages since it is just part of the control
279 * logic for the network and not real traffic. */
283 && rule->super->n_actions == 1
284 && action_outputs_to_port(&rule->super->actions[0],
285 htons(OFPP_CONTROLLER)));
290 wx_rule_remove(struct wx *wx, struct wx_rule *rule)
292 if (rule->wr.cr.flow.wildcards) {
293 COVERAGE_INC(wx_del_wc_flow);
294 wx->need_revalidate = true;
296 wx_rule_uninstall(wx, rule);
298 classifier_remove(&wx->cls, &rule->wr.cr);
299 wx_rule_destroy(wx, rule);
303 wx_rule_revalidate(struct wx *wx, struct wx_rule *rule)
305 const flow_t *flow = &rule->wr.cr.flow;
307 COVERAGE_INC(wx_rule_revalidate);
309 struct wx_rule *super;
310 super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
312 wx_rule_remove(wx, rule);
314 } else if (super != rule->super) {
315 COVERAGE_INC(wx_revalidate_moved);
316 list_remove(&rule->list);
317 list_push_back(&super->list, &rule->list);
319 rule->wr.hard_timeout = super->wr.hard_timeout;
320 rule->wr.idle_timeout = super->wr.idle_timeout;
321 rule->wr.created = super->wr.created;
326 wx_rule_update_actions(wx, rule);
330 /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
331 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
332 * through all of its subrules and revalidates them, destroying any that no
333 * longer has a super-rule (which is probably all of them).
335 * Before calling this function, the caller must make have removed 'rule' from
336 * the classifier. If 'rule' is an exact-match rule, the caller is also
337 * responsible for ensuring that it has been uninstalled from the datapath. */
339 wx_rule_destroy(struct wx *wx, struct wx_rule *rule)
342 struct wx_rule *subrule, *next;
343 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
344 wx_rule_revalidate(wx, subrule);
347 list_remove(&rule->list);
354 wx_rule_has_out_port(const struct wx_rule *rule, uint16_t out_port)
356 const union ofp_action *oa;
357 struct actions_iterator i;
359 if (out_port == htons(OFPP_NONE)) {
362 for (oa = actions_first(&i, rule->wr.actions,
365 oa = actions_next(&i)) {
366 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
374 /* Caller is responsible for initializing the 'cr' and ofp_table_id members of
375 * the returned rule. */
376 static struct wx_rule *
377 wx_rule_create(struct wx_rule *super,
378 const union ofp_action *actions, size_t n_actions,
379 uint16_t idle_timeout, uint16_t hard_timeout)
381 struct wx_rule *rule = xzalloc(sizeof *rule);
382 wdp_rule_init(&rule->wr, actions, n_actions);
383 rule->wr.idle_timeout = idle_timeout;
384 rule->wr.hard_timeout = hard_timeout;
385 rule->used = rule->wr.created;
388 list_push_back(&super->list, &rule->list);
390 list_init(&rule->list);
393 netflow_flow_clear(&rule->nf_flow);
394 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
400 /* Executes, within 'wx', the 'n_actions' actions in 'actions' on 'packet',
401 * which arrived on 'in_port'.
403 * Takes ownership of 'packet'. */
405 execute_xflow_actions(struct wx *wx, uint16_t in_port,
406 const union xflow_action *actions, size_t n_actions,
407 struct ofpbuf *packet)
409 if (n_actions == 1 && actions[0].type == XFLOWAT_CONTROLLER
410 && wx->n_ctl_packets < MAX_CTL_PACKETS) {
411 /* As an optimization, avoid a round-trip from userspace to kernel to
412 * userspace. This also avoids possibly filling up kernel packet
413 * buffers along the way. */
414 struct wdp_packet *wdp_packet;
416 if (!(wx->wdp_listen_mask & WDP_CHAN_ACTION)) {
420 wdp_packet = xmalloc(sizeof *wdp_packet);
421 wdp_packet->channel = WDP_CHAN_ACTION;
422 wdp_packet->tun_id = 0;
423 wdp_packet->in_port = in_port;
424 wdp_packet->send_len = actions[0].controller.arg;
425 wdp_packet->payload = packet;
427 list_push_back(&wx->ctl_packets, &wdp_packet->list);
433 error = xfif_execute(wx->xfif, in_port, actions, n_actions, packet);
434 ofpbuf_delete(packet);
439 /* Executes the actions indicated by 'rule' on 'packet', which is in flow
440 * 'flow' and is considered to have arrived on xflow port 'in_port'. 'packet'
441 * must have at least sizeof(struct ofp_packet_in) bytes of headroom.
443 * The flow that 'packet' actually contains does not need to actually match
444 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
445 * the packet and byte counters for 'rule' will be credited for the packet sent
446 * out whether or not the packet actually matches 'rule'.
448 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
449 * the caller must already have accurately composed xflow actions for it given
450 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
451 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
452 * function will compose a set of xflow actions based on 'rule''s OpenFlow
453 * actions and apply them to 'packet'.
455 * Takes ownership of 'packet'. */
457 wx_rule_execute(struct wx *wx, struct wx_rule *rule,
458 struct ofpbuf *packet, const flow_t *flow)
460 const union xflow_action *actions;
461 struct xflow_flow_stats stats;
463 struct xflow_actions a;
465 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
467 /* Grab or compose the xflow actions.
469 * The special case for an exact-match 'rule' where 'flow' is not the
470 * rule's flow is important to avoid, e.g., sending a packet out its input
471 * port simply because the xflow actions were composed for the wrong
473 if (rule->wr.cr.flow.wildcards
474 || !flow_equal_headers(flow, &rule->wr.cr.flow))
476 struct wx_rule *super = rule->super ? rule->super : rule;
477 if (wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, flow,
478 packet, NULL, &a, NULL)) {
479 ofpbuf_delete(packet);
483 n_actions = a.n_actions;
485 actions = rule->xflow_actions;
486 n_actions = rule->n_xflow_actions;
489 /* Execute the xflow actions. */
490 flow_extract_stats(flow, packet, &stats);
491 if (!execute_xflow_actions(wx, flow->in_port,
492 actions, n_actions, packet)) {
493 wx_rule_update_stats(wx, rule, &stats);
494 rule->used = time_msec();
495 //XXX netflow_flow_update_time(wx->netflow, &rule->nf_flow, rule->used);
499 /* Inserts 'rule' into 'p''s flow table.
501 * If 'packet' is nonnull, takes ownership of 'packet', executes 'rule''s
502 * actions on it and credits the statistics for sending the packet to 'rule'.
503 * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of
506 wx_rule_insert(struct wx *wx, struct wx_rule *rule, struct ofpbuf *packet,
509 struct wx_rule *displaced_rule;
511 /* Insert the rule in the classifier. */
512 displaced_rule = wx_rule_cast(classifier_insert(&wx->cls, &rule->wr.cr));
513 if (!rule->wr.cr.flow.wildcards) {
514 wx_rule_make_actions(wx, rule, packet);
517 /* Send the packet and credit it to the rule. */
520 flow_extract(packet, 0, in_port, &flow);
521 wx_rule_execute(wx, rule, packet, &flow);
524 /* Install the rule in the datapath only after sending the packet, to
525 * avoid packet reordering. */
526 if (rule->wr.cr.flow.wildcards) {
527 COVERAGE_INC(wx_add_wc_flow);
528 wx->need_revalidate = true;
530 wx_rule_install(wx, rule, displaced_rule);
533 /* Free the rule that was displaced, if any. */
534 if (displaced_rule) {
535 rule->wr.client_data = displaced_rule->wr.client_data;
536 wx_rule_destroy(wx, displaced_rule);
540 static struct wx_rule *
541 wx_rule_create_subrule(struct wx *wx, struct wx_rule *rule, const flow_t *flow)
543 struct wx_rule *subrule;
545 subrule = wx_rule_create(rule, NULL, 0,
546 rule->wr.idle_timeout,
547 rule->wr.hard_timeout);
548 /* Subrules aren't really in any OpenFlow table, so don't bother with
549 * subrule->wr.ofp_table_id. */
550 COVERAGE_INC(wx_subrule_create);
551 cls_rule_from_flow(flow, &subrule->wr.cr);
552 classifier_insert_exact(&wx->cls, &subrule->wr.cr);
557 /* Returns true if the actions changed, false otherwise. */
559 wx_rule_make_actions(struct wx *wx, struct wx_rule *rule,
560 const struct ofpbuf *packet)
562 const struct wx_rule *super;
563 struct xflow_actions a;
566 assert(!rule->wr.cr.flow.wildcards);
568 super = rule->super ? rule->super : rule;
569 wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions,
570 &rule->wr.cr.flow, packet,
571 &rule->tags, &a, &rule->may_install);
573 actions_len = a.n_actions * sizeof *a.actions;
574 if (rule->n_xflow_actions != a.n_actions
575 || memcmp(rule->xflow_actions, a.actions, actions_len)) {
576 COVERAGE_INC(wx_xflow_unchanged);
577 free(rule->xflow_actions);
578 rule->n_xflow_actions = a.n_actions;
579 rule->xflow_actions = xmemdup(a.actions, actions_len);
587 do_put_flow(struct wx *wx, struct wx_rule *rule, int flags,
588 struct xflow_flow_put *put)
590 memset(&put->flow.stats, 0, sizeof put->flow.stats);
591 xflow_key_from_flow(&put->flow.key, &rule->wr.cr.flow);
592 put->flow.actions = rule->xflow_actions;
593 put->flow.n_actions = rule->n_xflow_actions;
596 return xfif_flow_put(wx->xfif, put);
600 wx_rule_install(struct wx *wx, struct wx_rule *rule, struct wx_rule *displaced_rule)
602 assert(!rule->wr.cr.flow.wildcards);
604 if (rule->may_install) {
605 struct xflow_flow_put put;
606 if (!do_put_flow(wx, rule,
607 XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS,
609 rule->installed = true;
610 if (displaced_rule) {
611 wx_rule_update_stats(wx, displaced_rule, &put.flow.stats);
612 wx_rule_post_uninstall(wx, displaced_rule);
615 } else if (displaced_rule) {
616 wx_rule_uninstall(wx, displaced_rule);
621 wx_rule_reinstall(struct wx *wx, struct wx_rule *rule)
623 if (rule->installed) {
624 struct xflow_flow_put put;
625 COVERAGE_INC(wx_dp_missed);
626 do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put);
628 wx_rule_install(wx, rule, NULL);
633 wx_rule_update_actions(struct wx *wx, struct wx_rule *rule)
635 bool actions_changed;
637 uint16_t new_out_iface, old_out_iface;
639 old_out_iface = rule->nf_flow.output_iface;
641 actions_changed = wx_rule_make_actions(wx, rule, NULL);
643 if (rule->may_install) {
644 if (rule->installed) {
645 if (actions_changed) {
646 struct xflow_flow_put put;
647 do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY
648 | XFLOWPF_ZERO_STATS, &put);
649 wx_rule_update_stats(wx, rule, &put.flow.stats);
651 /* Temporarily set the old output iface so that NetFlow
652 * messages have the correct output interface for the old
654 new_out_iface = rule->nf_flow.output_iface;
655 rule->nf_flow.output_iface = old_out_iface;
657 wx_rule_post_uninstall(wx, rule);
658 //rule->nf_flow.output_iface = new_out_iface;
661 wx_rule_install(wx, rule, NULL);
664 wx_rule_uninstall(wx, rule);
669 add_output_group_action(struct xflow_actions *actions, uint16_t group,
670 uint16_t *nf_output_iface)
672 xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group;
674 if (group == WX_GROUP_ALL || group == WX_GROUP_FLOOD) {
675 *nf_output_iface = NF_OUT_FLOOD;
680 add_controller_action(struct xflow_actions *actions, uint16_t max_len)
682 union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER);
683 a->controller.arg = max_len;
686 struct wx_xlate_ctx {
688 flow_t flow; /* Flow to which these actions correspond. */
689 int recurse; /* Recursion level, via xlate_table_action. */
691 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
692 * null pointer if we are revalidating
693 * without a packet to refer to. */
696 struct xflow_actions *out; /* Datapath actions. */
697 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
698 bool may_set_up_flow; /* True ordinarily; false if the actions must
699 * be reassessed for every packet. */
700 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
703 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
704 struct wx_xlate_ctx *ctx);
707 add_output_action(struct wx_xlate_ctx *ctx, uint16_t port)
709 const struct wx_port *wx_port = wx_port_get(ctx->wx, port);
712 if (wx_port->wdp_port.opp.config & OFPPC_NO_FWD) {
713 /* Forwarding disabled on port. */
718 * We don't have an ofport record for this port, but it doesn't hurt to
719 * allow forwarding to it anyhow. Maybe such a port will appear later
720 * and we're pre-populating the flow table.
724 xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port;
725 //ctx->nf_output_iface = port;
728 static struct wx_rule *
729 wx_rule_lookup_valid(struct wx *wx, const flow_t *flow)
731 struct wx_rule *rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
733 /* The rule we found might not be valid, since we could be in need of
734 * revalidation. If it is not valid, don't return it. */
737 && wx->need_revalidate
738 && !wx_rule_revalidate(wx, rule)) {
739 COVERAGE_INC(wx_invalidated);
747 xlate_table_action(struct wx_xlate_ctx *ctx, uint16_t in_port)
750 uint16_t old_in_port;
751 struct wx_rule *rule;
753 /* Look up a flow with 'in_port' as the input port. Then restore the
754 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
755 * have surprising behavior). */
756 old_in_port = ctx->flow.in_port;
757 ctx->flow.in_port = in_port;
758 rule = wx_rule_lookup_valid(ctx->wx, &ctx->flow);
759 ctx->flow.in_port = old_in_port;
767 do_xlate_actions(rule->wr.actions, rule->wr.n_actions, ctx);
774 xlate_output_action__(struct wx_xlate_ctx *ctx,
775 uint16_t port, uint16_t max_len)
778 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
780 ctx->nf_output_iface = NF_OUT_DROP;
784 add_output_action(ctx, ctx->flow.in_port);
787 xlate_table_action(ctx, ctx->flow.in_port);
790 if (!ctx->wx->ofhooks->normal_cb(&ctx->flow, ctx->packet,
792 &ctx->nf_output_iface,
794 COVERAGE_INC(wx_uninstallable);
795 ctx->may_set_up_flow = false;
800 add_output_group_action(ctx->out, WX_GROUP_FLOOD,
801 &ctx->nf_output_iface);
804 add_output_group_action(ctx->out, WX_GROUP_ALL, &ctx->nf_output_iface);
806 case OFPP_CONTROLLER:
807 add_controller_action(ctx->out, max_len);
810 add_output_action(ctx, XFLOWP_LOCAL);
813 xflow_port = ofp_port_to_xflow_port(port);
814 if (xflow_port != ctx->flow.in_port) {
815 add_output_action(ctx, xflow_port);
820 if (prev_nf_output_iface == NF_OUT_FLOOD) {
821 ctx->nf_output_iface = NF_OUT_FLOOD;
822 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
823 ctx->nf_output_iface = prev_nf_output_iface;
824 } else if (prev_nf_output_iface != NF_OUT_DROP &&
825 ctx->nf_output_iface != NF_OUT_FLOOD) {
826 ctx->nf_output_iface = NF_OUT_MULTI;
831 xlate_output_action(struct wx_xlate_ctx *ctx,
832 const struct ofp_action_output *oao)
834 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
837 /* If the final xflow action in 'ctx' is "pop priority", drop it, as an
838 * optimization, because we're going to add another action that sets the
839 * priority immediately after, or because there are no actions following the
842 remove_pop_action(struct wx_xlate_ctx *ctx)
844 size_t n = ctx->out->n_actions;
845 if (n > 0 && ctx->out->actions[n - 1].type == XFLOWAT_POP_PRIORITY) {
846 ctx->out->n_actions--;
851 xlate_enqueue_action(struct wx_xlate_ctx *ctx,
852 const struct ofp_action_enqueue *oae)
854 uint16_t ofp_port, xflow_port;
858 error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(oae->queue_id),
861 /* Fall back to ordinary output action. */
862 xlate_output_action__(ctx, ntohs(oae->port), 0);
866 /* Figure out xflow output port. */
867 ofp_port = ntohs(oae->port);
868 if (ofp_port != OFPP_IN_PORT) {
869 xflow_port = ofp_port_to_xflow_port(ofp_port);
871 xflow_port = ctx->flow.in_port;
874 /* Add xflow actions. */
875 remove_pop_action(ctx);
876 xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority
878 add_output_action(ctx, xflow_port);
879 xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY);
881 /* Update NetFlow output port. */
882 if (ctx->nf_output_iface == NF_OUT_DROP) {
883 ctx->nf_output_iface = xflow_port;
884 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
885 ctx->nf_output_iface = NF_OUT_MULTI;
890 xlate_set_queue_action(struct wx_xlate_ctx *ctx,
891 const struct nx_action_set_queue *nasq)
896 error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(nasq->queue_id),
899 /* Couldn't translate queue to a priority, so ignore. A warning
900 * has already been logged. */
904 remove_pop_action(ctx);
905 xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority
910 xlate_nicira_action(struct wx_xlate_ctx *ctx,
911 const struct nx_action_header *nah)
913 const struct nx_action_resubmit *nar;
914 const struct nx_action_set_tunnel *nast;
915 const struct nx_action_set_queue *nasq;
916 union xflow_action *oa;
917 int subtype = ntohs(nah->subtype);
919 assert(nah->vendor == htonl(NX_VENDOR_ID));
922 nar = (const struct nx_action_resubmit *) nah;
923 xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port)));
926 case NXAST_SET_TUNNEL:
927 nast = (const struct nx_action_set_tunnel *) nah;
928 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TUNNEL);
929 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
932 case NXAST_DROP_SPOOFED_ARP:
933 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
934 xflow_actions_add(ctx->out, XFLOWAT_DROP_SPOOFED_ARP);
938 case NXAST_SET_QUEUE:
939 nasq = (const struct nx_action_set_queue *) nah;
940 xlate_set_queue_action(ctx, nasq);
943 case NXAST_POP_QUEUE:
944 xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY);
947 /* If you add a new action here that modifies flow data, don't forget to
948 * update the flow key in ctx->flow at the same time. */
951 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
957 do_xlate_actions(const union ofp_action *in, size_t n_in,
958 struct wx_xlate_ctx *ctx)
960 struct actions_iterator iter;
961 const union ofp_action *ia;
962 const struct wx_port *port;
964 port = wx_port_get(ctx->wx, ctx->flow.in_port);
966 const struct ofp_phy_port *opp = &port->wdp_port.opp;
967 if (opp->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
968 opp->config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
969 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
970 /* Drop this flow. */
975 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
976 uint16_t type = ntohs(ia->type);
977 union xflow_action *oa;
981 xlate_output_action(ctx, &ia->output);
984 case OFPAT_SET_VLAN_VID:
985 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
986 oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK);
987 oa->dl_tci.mask = htons(VLAN_VID_MASK);
988 ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
991 case OFPAT_SET_VLAN_PCP:
992 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
993 oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT)
995 oa->dl_tci.mask = htons(VLAN_PCP_MASK);
997 if (ctx->flow.dl_vlan == htons(OFP_VLAN_NONE)) {
998 ctx->flow.dl_vlan = htons(0);
1000 ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
1003 case OFPAT_STRIP_VLAN:
1004 xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN);
1005 ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
1006 ctx->flow.dl_vlan_pcp = 0;
1009 case OFPAT_SET_DL_SRC:
1010 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC);
1011 memcpy(oa->dl_addr.dl_addr,
1012 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1013 memcpy(ctx->flow.dl_src,
1014 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1017 case OFPAT_SET_DL_DST:
1018 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST);
1019 memcpy(oa->dl_addr.dl_addr,
1020 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1021 memcpy(ctx->flow.dl_dst,
1022 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1025 case OFPAT_SET_NW_SRC:
1026 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC);
1027 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
1030 case OFPAT_SET_NW_DST:
1031 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST);
1032 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
1035 case OFPAT_SET_NW_TOS:
1036 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS);
1037 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
1040 case OFPAT_SET_TP_SRC:
1041 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC);
1042 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
1045 case OFPAT_SET_TP_DST:
1046 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST);
1047 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
1051 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
1055 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
1059 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
1065 /* Returns true if 'flow' and 'actions' may be set up as a flow in the kernel.
1066 * This is true most of the time, but we don't allow flows that would prevent
1067 * DHCP replies from being seen by the local port to be set up in the
1070 * We only need this, strictly speaking, when in-band control is turned on. */
1072 wx_may_set_up(const flow_t *flow, const struct xflow_actions *actions)
1074 if (flow->dl_type == htons(ETH_TYPE_IP)
1075 && flow->nw_proto == IP_TYPE_UDP
1076 && flow->tp_src == htons(DHCP_SERVER_PORT)
1077 && flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
1080 for (i = 0; i < actions->n_actions; i++) {
1081 const struct xflow_action_output *oao = &actions->actions[i].output;
1082 if (oao->type == XFLOWAT_OUTPUT && oao->port == XFLOWP_LOCAL) {
1093 wx_xlate_actions(struct wx *wx, const union ofp_action *in, size_t n_in,
1094 const flow_t *flow, const struct ofpbuf *packet,
1095 tag_type *tags, struct xflow_actions *out,
1096 bool *may_set_up_flow)
1098 tag_type no_tags = 0;
1099 struct wx_xlate_ctx ctx;
1100 COVERAGE_INC(wx_ofp2xflow);
1101 xflow_actions_init(out);
1105 ctx.packet = packet;
1107 ctx.tags = tags ? tags : &no_tags;
1108 ctx.may_set_up_flow = true;
1109 ctx.nf_output_iface = NF_OUT_DROP;
1110 do_xlate_actions(in, n_in, &ctx);
1111 remove_pop_action(&ctx);
1113 if (may_set_up_flow) {
1114 *may_set_up_flow = ctx.may_set_up_flow && wx_may_set_up(flow, out);
1117 if (nf_output_iface) {
1118 *nf_output_iface = ctx.nf_output_iface;
1121 if (xflow_actions_overflow(out)) {
1122 COVERAGE_INC(xflow_overflow);
1123 xflow_actions_init(out);
1124 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
1130 update_used(struct wx *wx)
1132 struct xflow_flow *flows;
1137 error = xfif_flow_list_all(wx->xfif, &flows, &n_flows);
1142 for (i = 0; i < n_flows; i++) {
1143 struct xflow_flow *f = &flows[i];
1144 struct wx_rule *rule;
1147 xflow_key_to_flow(&f->key, &flow);
1148 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, &flow));
1149 if (!rule || !rule->installed) {
1150 COVERAGE_INC(wx_unexpected_rule);
1151 xfif_flow_del(wx->xfif, f);
1155 wx_rule_update_time(wx, rule, &f->stats);
1156 wx_rule_account(wx, rule, f->stats.n_bytes);
1162 uninstall_idle_flow(struct wx *wx, struct wx_rule *rule)
1164 assert(rule->installed);
1165 assert(!rule->wr.cr.flow.wildcards);
1168 wx_rule_remove(wx, rule);
1170 wx_rule_uninstall(wx, rule);
1175 expire_rule(struct cls_rule *cls_rule, void *wx_)
1177 struct wx *wx = wx_;
1178 struct wx_rule *rule = wx_rule_cast(cls_rule);
1179 long long int hard_expire, idle_expire, expire, now;
1181 hard_expire = (rule->wr.hard_timeout
1182 ? rule->wr.created + rule->wr.hard_timeout * 1000
1184 idle_expire = (rule->wr.idle_timeout
1185 && (rule->super || list_is_empty(&rule->list))
1186 ? rule->used + rule->wr.idle_timeout * 1000
1188 expire = MIN(hard_expire, idle_expire);
1192 if (rule->installed && now >= rule->used + 5000) {
1193 uninstall_idle_flow(wx, rule);
1194 } else if (!rule->wr.cr.flow.wildcards) {
1195 //XXX active_timeout(wx, rule);
1201 COVERAGE_INC(wx_expired);
1203 /* Update stats. This code will be a no-op if the rule expired
1204 * due to an idle timeout. */
1205 if (rule->wr.cr.flow.wildcards) {
1206 struct wx_rule *subrule, *next;
1207 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
1208 wx_rule_remove(wx, subrule);
1211 wx_rule_uninstall(wx, rule);
1215 if (!wx_rule_is_hidden(rule)) {
1216 send_flow_removed(wx, rule, now,
1218 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
1221 wx_rule_remove(wx, rule);
1226 struct revalidate_cbdata {
1228 bool revalidate_all; /* Revalidate all exact-match rules? */
1229 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
1230 struct tag_set revalidate_set; /* Set of tags to revalidate. */
1234 revalidate_rule(struct wx *wx, struct wx_rule *rule)
1236 const flow_t *flow = &rule->wr.cr.flow;
1238 COVERAGE_INC(wx_revalidate_rule);
1240 struct wx_rule *super;
1241 super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
1243 wx_rule_remove(wx, rule);
1245 } else if (super != rule->super) {
1246 COVERAGE_INC(wx_revalidate_moved);
1247 list_remove(&rule->list);
1248 list_push_back(&super->list, &rule->list);
1249 rule->super = super;
1250 rule->wr.hard_timeout = super->wr.hard_timeout;
1251 rule->wr.idle_timeout = super->wr.idle_timeout;
1252 rule->wr.created = super->wr.created;
1257 wx_rule_update_actions(wx, rule);
1262 revalidate_cb(struct cls_rule *sub_, void *cbdata_)
1264 struct wx_rule *sub = wx_rule_cast(sub_);
1265 struct revalidate_cbdata *cbdata = cbdata_;
1267 if (cbdata->revalidate_all
1268 || (cbdata->revalidate_subrules && sub->super)
1269 || tag_set_intersects(&cbdata->revalidate_set, sub->tags)) {
1270 revalidate_rule(cbdata->wx, sub);
1276 wx_run_one(struct wx *wx)
1278 if (time_msec() >= wx->next_expiration) {
1279 COVERAGE_INC(wx_expiration);
1280 wx->next_expiration = time_msec() + 1000;
1283 classifier_for_each(&wx->cls, CLS_INC_ALL, expire_rule, wx);
1285 /* XXX account_checkpoint_cb */
1288 if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
1289 struct revalidate_cbdata cbdata;
1291 cbdata.revalidate_all = wx->revalidate_all;
1292 cbdata.revalidate_subrules = wx->need_revalidate;
1293 cbdata.revalidate_set = wx->revalidate_set;
1294 tag_set_init(&wx->revalidate_set);
1295 COVERAGE_INC(wx_revalidate);
1296 classifier_for_each(&wx->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
1297 wx->need_revalidate = false;
1306 LIST_FOR_EACH (wx, list_node, &all_wx) {
1313 wx_wait_one(struct wx *wx)
1315 if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
1316 poll_immediate_wake();
1317 } else if (wx->next_expiration != LLONG_MAX) {
1318 poll_timer_wait_until(wx->next_expiration);
1327 LIST_FOR_EACH (wx, list_node, &all_wx) {
1333 static int wx_flow_flush(struct wdp *);
1336 wx_enumerate(const struct wdp_class *wdp_class, struct svec *all_wdps)
1338 struct svec names = SVEC_EMPTY_INITIALIZER;
1339 int error = xf_enumerate_names(wdp_class->type, &names);
1340 svec_move(all_wdps, &names);
1345 wx_open(const struct wdp_class *wdp_class, const char *name, bool create,
1352 ? xfif_create_and_open(name, wdp_class->type, &xfif)
1353 : xfif_open(name, wdp_class->type, &xfif));
1357 wx = xzalloc(sizeof *wx);
1358 list_push_back(&all_wx, &wx->list_node);
1359 wdp_init(&wx->wdp, wdp_class, name, 0, 0);
1361 classifier_init(&wx->cls);
1362 wx->netdev_monitor = netdev_monitor_create();
1363 hmap_init(&wx->ports);
1364 shash_init(&wx->port_by_name);
1365 wx->next_expiration = time_msec() + 1000;
1366 tag_set_init(&wx->revalidate_set);
1370 wx->ofhooks = &default_ofhooks;
1372 wx->ml = mac_learning_create();
1374 list_init(&wx->ctl_packets);
1383 wx_close(struct wdp *wdp)
1385 struct wx *wx = wx_cast(wdp);
1388 xfif_close(wx->xfif);
1389 classifier_destroy(&wx->cls);
1390 netdev_monitor_destroy(wx->netdev_monitor);
1391 list_remove(&wx->list_node);
1392 mac_learning_destroy(wx->ml);
1393 hmap_destroy(&wx->ports);
1394 shash_destroy(&wx->port_by_name);
1399 wx_get_all_names(const struct wdp *wdp, struct svec *all_names)
1401 struct wx *wx = wx_cast(wdp);
1403 return xfif_get_all_names(wx->xfif, all_names);
1407 wx_destroy(struct wdp *wdp)
1409 struct wx *wx = wx_cast(wdp);
1411 return xfif_delete(wx->xfif);
1415 wx_get_features(const struct wdp *wdp, struct ofpbuf **featuresp)
1417 struct wx *wx = wx_cast(wdp);
1418 struct ofp_switch_features *osf;
1420 struct wx_port *port;
1422 buf = ofpbuf_new(sizeof *osf);
1423 osf = ofpbuf_put_zeros(buf, sizeof *osf);
1425 osf->capabilities = htonl(OFPC_ARP_MATCH_IP);
1426 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1427 (1u << OFPAT_SET_VLAN_VID) |
1428 (1u << OFPAT_SET_VLAN_PCP) |
1429 (1u << OFPAT_STRIP_VLAN) |
1430 (1u << OFPAT_SET_DL_SRC) |
1431 (1u << OFPAT_SET_DL_DST) |
1432 (1u << OFPAT_SET_NW_SRC) |
1433 (1u << OFPAT_SET_NW_DST) |
1434 (1u << OFPAT_SET_NW_TOS) |
1435 (1u << OFPAT_SET_TP_SRC) |
1436 (1u << OFPAT_SET_TP_DST) |
1437 (1u << OFPAT_ENQUEUE));
1439 HMAP_FOR_EACH (port, hmap_node, &wx->ports) {
1440 const struct ofp_phy_port *opp = &port->wdp_port.opp;
1441 hton_ofp_phy_port(ofpbuf_put(buf, opp, sizeof *opp));
1449 count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
1451 struct wx_rule *rule = wx_rule_cast(cls_rule);
1452 int *n_subrules = n_subrules_;
1461 wx_get_stats(const struct wdp *wdp, struct wdp_stats *stats)
1463 struct wx *wx = wx_cast(wdp);
1464 struct xflow_stats xflow_stats;
1467 error = xfif_get_xf_stats(wx->xfif, &xflow_stats);
1468 stats->max_ports = xflow_stats.max_ports;
1473 wx_get_table_stats(const struct wdp *wdp, struct ofpbuf *stats)
1475 struct wx *wx = wx_cast(wdp);
1476 struct xflow_stats xflow_stats;
1477 struct ofp_table_stats *exact, *wild;
1480 xfif_get_xf_stats(wx->xfif, &xflow_stats);
1481 /* XXX should pass up errors, but there are no appropriate OpenFlow error
1485 classifier_for_each(&wx->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
1487 exact = ofpbuf_put_zeros(stats, sizeof *exact);
1488 exact->table_id = TABLEID_HASH;
1489 strcpy(exact->name, "exact");
1490 exact->wildcards = htonl(0);
1491 exact->max_entries = htonl(MIN(WX_MAX_EXACT, xflow_stats.max_capacity));
1492 exact->active_count = htonl(classifier_count_exact(&wx->cls) - n_subrules);
1493 exact->lookup_count = htonll(xflow_stats.n_hit + xflow_stats.n_missed);
1494 exact->matched_count = htonll(xflow_stats.n_hit);
1496 wild = ofpbuf_put_zeros(stats, sizeof *exact);
1497 wild->table_id = TABLEID_CLASSIFIER;
1498 strcpy(wild->name, "classifier");
1499 wild->wildcards = htonl(OVSFW_ALL);
1500 wild->max_entries = htonl(WX_MAX_WILD);
1501 wild->active_count = htonl(classifier_count_wild(&wx->cls));
1502 wild->lookup_count = htonll(0); /* XXX */
1503 wild->matched_count = htonll(0); /* XXX */
1509 wx_get_drop_frags(const struct wdp *wdp, bool *drop_frags)
1511 struct wx *wx = wx_cast(wdp);
1513 return xfif_get_drop_frags(wx->xfif, drop_frags);
1517 wx_set_drop_frags(struct wdp *wdp, bool drop_frags)
1519 struct wx *wx = wx_cast(wdp);
1521 return xfif_set_drop_frags(wx->xfif, drop_frags);
1525 wx_port_add(struct wdp *wdp, const char *devname,
1526 bool internal, uint16_t *port_no)
1528 struct wx *wx = wx_cast(wdp);
1529 uint16_t xflow_flags = internal ? XFLOW_PORT_INTERNAL : 0;
1530 return xfif_port_add(wx->xfif, devname, xflow_flags, port_no);
1534 wx_port_del(struct wdp *wdp, uint16_t port_no)
1536 struct wx *wx = wx_cast(wdp);
1538 return xfif_port_del(wx->xfif, port_no);
1542 wx_answer_port_query(const struct wx_port *port, struct wdp_port *portp)
1545 wdp_port_copy(portp, &port->wdp_port);
1553 wx_port_query_by_number(const struct wdp *wdp, uint16_t port_no,
1554 struct wdp_port *portp)
1556 struct wx *wx = wx_cast(wdp);
1557 struct wx_port *wx_port = wx_port_get(wx, ofp_port_to_xflow_port(port_no));
1559 return wx_answer_port_query(wx_port, portp);
1563 wx_port_query_by_name(const struct wdp *wdp, const char *devname,
1564 struct wdp_port *portp)
1566 struct wx *wx = wx_cast(wdp);
1568 return wx_answer_port_query(shash_find_data(&wx->port_by_name, devname),
1573 wx_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config)
1575 struct wx *wx = wx_cast(wdp);
1576 struct wx_port *port;
1577 struct ofp_phy_port *opp;
1580 port = wx_port_get(wx, ofp_port_to_xflow_port(port_no));
1584 opp = &port->wdp_port.opp;
1585 changes = config ^ opp->config;
1587 if (changes & OFPPC_PORT_DOWN) {
1588 struct netdev *netdev = port->wdp_port.netdev;
1591 if (config & OFPPC_PORT_DOWN) {
1592 error = netdev_turn_flags_off(netdev, NETDEV_UP, true);
1594 error = netdev_turn_flags_on(netdev, NETDEV_UP, true);
1597 opp->config ^= OFPPC_PORT_DOWN;
1601 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
1602 if (changes & REVALIDATE_BITS) {
1603 COVERAGE_INC(wx_costly_flags);
1604 opp->config ^= changes & REVALIDATE_BITS;
1605 wx->need_revalidate = true;
1607 #undef REVALIDATE_BITS
1609 if (changes & OFPPC_NO_FLOOD) {
1610 opp->config ^= OFPPC_NO_FLOOD;
1611 wx_port_refresh_groups(wx);
1614 if (changes & OFPPC_NO_PACKET_IN) {
1615 opp->config ^= OFPPC_NO_PACKET_IN;
1622 wx_port_list(const struct wdp *wdp, struct wdp_port **portsp, size_t *n_portsp)
1624 struct wx *wx = wx_cast(wdp);
1625 struct wdp_port *ports;
1626 struct wx_port *port;
1629 *n_portsp = n_ports = hmap_count(&wx->ports);
1630 *portsp = ports = xmalloc(n_ports * sizeof *ports);
1632 HMAP_FOR_EACH (port, hmap_node, &wx->ports) {
1633 wdp_port_copy(&ports[i++], &port->wdp_port);
1635 assert(i == n_ports);
1641 wx_port_poll(struct wdp *wdp, wdp_port_poll_cb_func *cb, void *aux)
1643 struct wx *wx = wx_cast(wdp);
1649 while ((error = xfif_port_poll(wx->xfif, &devname)) != EAGAIN) {
1650 wx_port_process_change(wx, error, devname, cb, aux);
1651 if (error && error != ENOBUFS) {
1655 while ((error = netdev_monitor_poll(wx->netdev_monitor,
1656 &devname)) != EAGAIN) {
1657 wx_port_process_change(wx, error, devname, cb, aux);
1658 if (error && error != ENOBUFS) {
1666 wx_port_poll_wait(const struct wdp *wdp)
1668 struct wx *wx = wx_cast(wdp);
1670 xfif_port_poll_wait(wx->xfif);
1671 netdev_monitor_poll_wait(wx->netdev_monitor);
1675 static struct wdp_rule *
1676 wx_flow_get(const struct wdp *wdp, const flow_t *flow, unsigned int include)
1678 struct wx *wx = wx_cast(wdp);
1679 struct wx_rule *rule;
1682 table_id = flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
1683 if (!(include & (1u << table_id))) {
1687 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, flow));
1688 return rule && !wx_rule_is_hidden(rule) ? &rule->wr : NULL;
1691 static struct wdp_rule *
1692 wx_flow_match(const struct wdp *wdp, const flow_t *flow)
1694 struct wx *wx = wx_cast(wdp);
1695 struct wx_rule *rule;
1697 rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
1699 if (wx_rule_is_hidden(rule)) {
1708 struct wx_for_each_thunk_aux {
1709 wdp_flow_cb_func *client_callback;
1714 wx_for_each_thunk(struct cls_rule *cls_rule, void *aux_)
1716 struct wx_for_each_thunk_aux *aux = aux_;
1717 struct wx_rule *rule = wx_rule_cast(cls_rule);
1719 if (!wx_rule_is_hidden(rule)) {
1720 return aux->client_callback(&rule->wr, aux->client_aux);
1726 wx_flow_for_each_match(const struct wdp *wdp, const flow_t *target,
1727 unsigned int include,
1728 wdp_flow_cb_func *client_callback, void *client_aux)
1730 struct wx *wx = wx_cast(wdp);
1731 struct wx_for_each_thunk_aux aux;
1735 if (include & (1u << TABLEID_HASH)) {
1736 cls_include |= CLS_INC_EXACT;
1738 if (include & (1u << TABLEID_CLASSIFIER)) {
1739 cls_include |= CLS_INC_WILD;
1742 aux.client_callback = client_callback;
1743 aux.client_aux = client_aux;
1744 return classifier_for_each_match(&wx->cls, target, cls_include,
1745 wx_for_each_thunk, &aux);
1748 /* Obtains statistic counters for 'rule' within 'wx' and stores them into
1749 * '*stats'. If 'rule' is a wildcarded rule, the returned statistic include
1750 * statistics for all of 'rule''s subrules. */
1752 query_stats(struct wx *wx, struct wx_rule *rule, struct wdp_flow_stats *stats)
1754 struct wx_rule *subrule;
1755 struct xflow_flow *xflow_flows;
1756 size_t n_xflow_flows;
1758 /* Start from historical data for 'rule' itself that are no longer tracked
1759 * by the datapath. This counts, for example, subrules that have
1761 stats->n_packets = rule->packet_count;
1762 stats->n_bytes = rule->byte_count;
1763 stats->inserted = rule->wr.created;
1764 stats->used = LLONG_MIN;
1765 stats->tcp_flags = 0;
1768 /* Prepare to ask the datapath for statistics on 'rule', or if it is
1769 * wildcarded then on all of its subrules.
1771 * Also, add any statistics that are not tracked by the datapath for each
1772 * subrule. This includes, for example, statistics for packets that were
1773 * executed "by hand" by ofproto via xfif_execute() but must be accounted
1775 n_xflow_flows = rule->wr.cr.flow.wildcards ? list_size(&rule->list) : 1;
1776 xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows);
1777 if (rule->wr.cr.flow.wildcards) {
1779 LIST_FOR_EACH (subrule, list, &rule->list) {
1780 xflow_key_from_flow(&xflow_flows[i++].key, &subrule->wr.cr.flow);
1781 stats->n_packets += subrule->packet_count;
1782 stats->n_bytes += subrule->byte_count;
1785 xflow_key_from_flow(&xflow_flows[0].key, &rule->wr.cr.flow);
1788 /* Fetch up-to-date statistics from the datapath and add them in. */
1789 if (!xfif_flow_get_multiple(wx->xfif, xflow_flows, n_xflow_flows)) {
1791 for (i = 0; i < n_xflow_flows; i++) {
1792 struct xflow_flow *xflow_flow = &xflow_flows[i];
1795 stats->n_packets += xflow_flow->stats.n_packets;
1796 stats->n_bytes += xflow_flow->stats.n_bytes;
1797 used = xflow_flow_stats_to_msec(&xflow_flow->stats);
1798 if (used > stats->used) {
1801 stats->tcp_flags |= xflow_flow->stats.tcp_flags;
1808 wx_flow_get_stats(const struct wdp *wdp,
1809 const struct wdp_rule *wdp_rule,
1810 struct wdp_flow_stats *stats)
1812 struct wx *wx = wx_cast(wdp);
1813 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1815 query_stats(wx, rule, stats);
1820 wx_flow_overlaps(const struct wdp *wdp, const flow_t *flow)
1822 struct wx *wx = wx_cast(wdp);
1824 /* XXX overlap with a subrule? */
1825 return classifier_rule_overlaps(&wx->cls, flow);
1829 wx_flow_put(struct wdp *wdp, const struct wdp_flow_put *put,
1830 struct wdp_flow_stats *old_stats, struct wdp_rule **rulep)
1832 struct wx *wx = wx_cast(wdp);
1833 struct wx_rule *rule;
1834 uint8_t ofp_table_id;
1836 ofp_table_id = put->flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
1837 if (put->ofp_table_id != 0xff && put->ofp_table_id != ofp_table_id) {
1838 return ofp_mkerr_nicira(OFPET_FLOW_MOD_FAILED, NXFMFC_BAD_TABLE_ID);
1841 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, put->flow));
1842 if (rule && wx_rule_is_hidden(rule)) {
1847 if (!(put->flags & WDP_PUT_MODIFY)) {
1851 if (!(put->flags & WDP_PUT_CREATE)) {
1854 if ((put->flow->wildcards
1855 ? classifier_count_wild(&wx->cls) >= WX_MAX_WILD
1856 : classifier_count_exact(&wx->cls) >= WX_MAX_EXACT)) {
1857 /* XXX subrules should not count against exact-match limit */
1858 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
1862 rule = wx_rule_create(NULL, put->actions, put->n_actions,
1863 put->idle_timeout, put->hard_timeout);
1864 cls_rule_from_flow(put->flow, &rule->wr.cr);
1865 rule->wr.ofp_table_id = ofp_table_id;
1866 wx_rule_insert(wx, rule, NULL, 0);
1870 memset(old_stats, 0, sizeof *old_stats);
1880 wx_flow_delete(struct wdp *wdp, struct wdp_rule *wdp_rule,
1881 struct wdp_flow_stats *final_stats)
1883 struct wx *wx = wx_cast(wdp);
1884 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1886 wx_rule_remove(wx, rule);
1888 memset(final_stats, 0, sizeof *final_stats); /* XXX */
1894 wx_flush_rule(struct cls_rule *cls_rule, void *wx_)
1896 struct wx_rule *rule = wx_rule_cast(cls_rule);
1897 struct wx *wx = wx_;
1899 /* Mark the flow as not installed, even though it might really be
1900 * installed, so that wx_rule_remove() doesn't bother trying to uninstall
1901 * it. There is no point in uninstalling it individually since we are
1902 * about to blow away all the flows with xfif_flow_flush(). */
1903 rule->installed = false;
1905 wx_rule_remove(wx, rule);
1911 wx_flow_flush(struct wdp *wdp)
1913 struct wx *wx = wx_cast(wdp);
1915 COVERAGE_INC(wx_flow_flush);
1916 classifier_for_each(&wx->cls, CLS_INC_ALL, wx_flush_rule, wx);
1917 xfif_flow_flush(wx->xfif);
1922 wx_execute(struct wdp *wdp, uint16_t in_port,
1923 const union ofp_action actions[], int n_actions,
1924 const struct ofpbuf *packet)
1926 struct wx *wx = wx_cast(wdp);
1927 struct xflow_actions xflow_actions;
1931 flow_extract((struct ofpbuf *) packet, 0, in_port, &flow);
1932 error = wx_xlate_actions(wx, actions, n_actions, &flow, packet,
1933 NULL, &xflow_actions, NULL);
1937 return xfif_execute(wx->xfif, ofp_port_to_xflow_port(in_port),
1938 xflow_actions.actions, xflow_actions.n_actions,
1943 wx_flow_inject(struct wdp *wdp, struct wdp_rule *wdp_rule,
1944 uint16_t in_port, const struct ofpbuf *packet)
1946 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1949 error = wx_execute(wdp, in_port, rule->wr.actions, rule->wr.n_actions,
1952 rule->packet_count++;
1953 rule->byte_count += packet->size;
1954 rule->used = time_msec();
1960 wx_recv_get_mask(const struct wdp *wdp, int *listen_mask)
1962 struct wx *wx = wx_cast(wdp);
1963 int xflow_listen_mask;
1966 error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
1969 if (xflow_listen_mask & XFLOWL_MISS) {
1970 *listen_mask |= 1 << WDP_CHAN_MISS;
1972 if (xflow_listen_mask & XFLOWL_ACTION) {
1973 *listen_mask |= 1 << WDP_CHAN_ACTION;
1975 if (xflow_listen_mask & XFLOWL_SFLOW) {
1976 *listen_mask |= 1 << WDP_CHAN_SFLOW;
1983 wx_recv_set_mask(struct wdp *wdp, int listen_mask)
1985 struct wx *wx = wx_cast(wdp);
1986 int xflow_listen_mask;
1988 wx->wdp_listen_mask = listen_mask;
1990 xflow_listen_mask = 0;
1991 if (listen_mask & (1 << WDP_CHAN_MISS)) {
1992 xflow_listen_mask |= XFLOWL_MISS;
1994 if (listen_mask & (1 << WDP_CHAN_ACTION)) {
1995 xflow_listen_mask |= XFLOWL_ACTION;
1997 wx_purge_ctl_packets__(wx);
1999 if (listen_mask & (1 << WDP_CHAN_SFLOW)) {
2000 xflow_listen_mask |= XFLOWL_SFLOW;
2003 return xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2007 wx_get_sflow_probability(const struct wdp *wdp, uint32_t *probability)
2009 struct wx *wx = wx_cast(wdp);
2011 return xfif_get_sflow_probability(wx->xfif, probability);
2015 wx_set_sflow_probability(struct wdp *wdp, uint32_t probability)
2017 struct wx *wx = wx_cast(wdp);
2019 return xfif_set_sflow_probability(wx->xfif, probability);
2023 wx_translate_xflow_msg(struct xflow_msg *msg, struct ofpbuf *payload,
2024 struct wdp_packet *packet)
2026 packet->in_port = xflow_port_to_ofp_port(msg->port);
2027 packet->send_len = 0;
2030 switch (msg->type) {
2031 case _XFLOWL_MISS_NR:
2032 packet->channel = WDP_CHAN_MISS;
2033 packet->payload = payload;
2034 packet->tun_id = msg->arg;
2037 case _XFLOWL_ACTION_NR:
2038 packet->channel = WDP_CHAN_ACTION;
2039 packet->payload = payload;
2040 packet->send_len = msg->arg;
2043 case _XFLOWL_SFLOW_NR:
2045 ofpbuf_delete(payload);
2049 VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32,
2051 ofpbuf_delete(payload);
2056 static const uint8_t *
2057 get_local_mac(const struct wx *wx)
2059 const struct wx_port *port = wx_port_get(wx, XFLOWP_LOCAL);
2060 return port ? port->wdp_port.opp.hw_addr : NULL;
2063 /* Returns true if 'packet' is a DHCP reply to the local port. Such a reply
2064 * should be sent to the local port regardless of the flow table.
2066 * We only need this, strictly speaking, when in-band control is turned on. */
2068 wx_is_local_dhcp_reply(const struct wx *wx,
2069 const flow_t *flow, const struct ofpbuf *packet)
2071 if (flow->dl_type == htons(ETH_TYPE_IP)
2072 && flow->nw_proto == IP_TYPE_UDP
2073 && flow->tp_src == htons(DHCP_SERVER_PORT)
2074 && flow->tp_dst == htons(DHCP_CLIENT_PORT)
2077 const uint8_t *local_mac = get_local_mac(wx);
2078 struct dhcp_header *dhcp = ofpbuf_at(
2079 packet, (char *)packet->l7 - (char *)packet->data, sizeof *dhcp);
2080 return dhcp && local_mac && eth_addr_equals(dhcp->chaddr, local_mac);
2086 /* Determines whether 'payload' that arrived on 'in_port' is included in any of
2087 * the flows in 'wx''s OpenFlow flow table. If so, then it adds a
2088 * corresponding flow to the xfif's exact-match flow table, taking ownership of
2089 * 'payload', and returns true. If not, it returns false and the caller
2090 * retains ownership of 'payload'. */
2092 wx_explode_rule(struct wx *wx, uint16_t in_port, struct ofpbuf *payload)
2094 struct wx_rule *rule;
2097 flow_extract(payload, 0, xflow_port_to_ofp_port(in_port), &flow);
2099 if (wx_is_local_dhcp_reply(wx, &flow, payload)) {
2100 union xflow_action action;
2102 memset(&action, 0, sizeof(action));
2103 action.output.type = XFLOWAT_OUTPUT;
2104 action.output.port = XFLOWP_LOCAL;
2105 xfif_execute(wx->xfif, in_port, &action, 1, payload);
2108 rule = wx_rule_lookup_valid(wx, &flow);
2113 if (rule->wr.cr.flow.wildcards) {
2114 rule = wx_rule_create_subrule(wx, rule, &flow);
2115 wx_rule_make_actions(wx, rule, payload);
2117 if (!rule->may_install) {
2118 /* The rule is not installable, that is, we need to process every
2119 * packet, so process the current packet and set its actions into
2121 wx_rule_make_actions(wx, rule, payload);
2123 /* XXX revalidate rule if it needs it */
2127 wx_rule_execute(wx, rule, payload, &flow);
2128 wx_rule_reinstall(wx, rule);
2134 wx_recv(struct wdp *wdp, struct wdp_packet *packet)
2136 struct wx *wx = wx_cast(wdp);
2139 if (wx->n_ctl_packets) {
2140 struct wdp_packet *wdp_packet;
2142 wdp_packet = CONTAINER_OF(list_pop_front(&wx->ctl_packets),
2143 struct wdp_packet, list);
2144 wx->n_ctl_packets--;
2146 *packet = *wdp_packet;
2152 /* XXX need to avoid 50*50 potential cost for caller. */
2153 for (i = 0; i < 50; i++) {
2154 struct xflow_msg *msg;
2158 error = xfif_recv(wx->xfif, &buf);
2163 msg = ofpbuf_pull(buf, sizeof *msg);
2164 if (msg->type != _XFLOWL_MISS_NR
2165 || !wx_explode_rule(wx, msg->port, buf)) {
2166 return wx_translate_xflow_msg(msg, buf, packet);
2173 wx_recv_purge_queue__(struct wx *wx, int max, int xflow_listen_mask,
2178 error = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2182 while (max > 0 && (error = xfif_recv(wx->xfif, &buf)) == 0) {
2187 if (error && error != EAGAIN) {
2193 wx_purge_ctl_packets__(struct wx *wx)
2195 struct wdp_packet *this, *next;
2197 LIST_FOR_EACH_SAFE (this, next, list, &wx->ctl_packets) {
2198 list_remove(&this->list);
2199 ofpbuf_delete(this->payload);
2202 wx->n_ctl_packets = 0;
2206 wx_recv_purge(struct wdp *wdp)
2208 struct wx *wx = wx_cast(wdp);
2209 struct xflow_stats xflow_stats;
2210 int xflow_listen_mask;
2213 xfif_get_xf_stats(wx->xfif, &xflow_stats);
2215 error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
2216 if (error || !(xflow_listen_mask & XFLOWL_ALL)) {
2220 if (xflow_listen_mask & XFLOWL_MISS) {
2221 wx_recv_purge_queue__(wx, xflow_stats.max_miss_queue, XFLOWL_MISS,
2224 if (xflow_listen_mask & XFLOWL_ACTION) {
2225 wx_recv_purge_queue__(wx, xflow_stats.max_action_queue, XFLOWL_ACTION,
2227 wx_purge_ctl_packets__(wx);
2229 if (xflow_listen_mask & XFLOWL_SFLOW) {
2230 wx_recv_purge_queue__(wx, xflow_stats.max_sflow_queue, XFLOWL_SFLOW,
2234 retval = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2235 return retval ? retval : error;
2240 wx_recv_wait(struct wdp *wdp)
2242 struct wx *wx = wx_cast(wdp);
2244 if (wx->n_ctl_packets) {
2245 poll_immediate_wake();
2247 xfif_recv_wait(wx->xfif);
2252 wx_set_ofhooks(struct wdp *wdp, const struct ofhooks *ofhooks, void *aux)
2254 struct wx *wx = wx_cast(wdp);
2256 if (wx->ofhooks == &default_ofhooks) {
2257 mac_learning_destroy(wx->ml);
2261 wx->ofhooks = ofhooks;
2267 wx_revalidate(struct wdp *wdp, tag_type tag)
2269 struct wx *wx = wx_cast(wdp);
2271 tag_set_add(&wx->revalidate_set, tag);
2275 wx_revalidate_all(struct wdp *wdp)
2277 struct wx *wx = wx_cast(wdp);
2279 wx->revalidate_all = true;
2282 static void wx_port_update(struct wx *, const char *devname,
2283 wdp_port_poll_cb_func *cb, void *aux);
2284 static void wx_port_reinit(struct wx *, wdp_port_poll_cb_func *cb, void *aux);
2287 wx_port_process_change(struct wx *wx, int error, char *devname,
2288 wdp_port_poll_cb_func *cb, void *aux)
2290 if (error == ENOBUFS) {
2291 wx_port_reinit(wx, cb, aux);
2292 } else if (!error) {
2293 wx_port_update(wx, devname, cb, aux);
2299 wx_port_refresh_group(struct wx *wx, unsigned int group)
2303 struct wx_port *port;
2305 assert(group == WX_GROUP_ALL || group == WX_GROUP_FLOOD);
2307 ports = xmalloc(hmap_count(&wx->ports) * sizeof *ports);
2309 HMAP_FOR_EACH (port, hmap_node, &wx->ports) {
2310 const struct ofp_phy_port *opp = &port->wdp_port.opp;
2311 if (group == WX_GROUP_ALL || !(opp->config & OFPPC_NO_FLOOD)) {
2312 ports[n_ports++] = port->xflow_port;
2315 xfif_port_group_set(wx->xfif, group, ports, n_ports);
2322 wx_port_refresh_groups(struct wx *wx)
2324 wx_port_refresh_group(wx, WX_GROUP_FLOOD);
2325 wx_port_refresh_group(wx, WX_GROUP_ALL);
2329 wx_port_reinit(struct wx *wx, wdp_port_poll_cb_func *cb, void *aux)
2331 struct svec devnames;
2332 struct wx_port *wx_port;
2333 struct xflow_port *xflow_ports;
2334 size_t n_xflow_ports;
2337 svec_init(&devnames);
2338 HMAP_FOR_EACH (wx_port, hmap_node, &wx->ports) {
2339 svec_add (&devnames, (char *) wx_port->wdp_port.opp.name);
2341 xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports);
2342 for (i = 0; i < n_xflow_ports; i++) {
2343 svec_add(&devnames, xflow_ports[i].devname);
2347 svec_sort_unique(&devnames);
2348 for (i = 0; i < devnames.n; i++) {
2349 wx_port_update(wx, devnames.names[i], cb, aux);
2351 svec_destroy(&devnames);
2353 wx_port_refresh_groups(wx);
2356 static struct wx_port *
2357 make_wx_port(const struct xflow_port *xflow_port)
2359 struct netdev_options netdev_options;
2360 enum netdev_flags flags;
2361 struct wx_port *wx_port;
2362 struct wdp_port *wdp_port;
2363 struct netdev *netdev;
2367 memset(&netdev_options, 0, sizeof netdev_options);
2368 netdev_options.name = xflow_port->devname;
2369 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
2371 error = netdev_open(&netdev_options, &netdev);
2373 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
2374 "cannot be opened (%s)",
2375 xflow_port->devname, xflow_port->port,
2376 xflow_port->devname, strerror(error));
2380 wx_port = xmalloc(sizeof *wx_port);
2381 wx_port->xflow_port = xflow_port->port;
2382 wdp_port = &wx_port->wdp_port;
2383 wdp_port->netdev = netdev;
2384 wdp_port->opp.port_no = xflow_port_to_ofp_port(xflow_port->port);
2385 netdev_get_etheraddr(netdev, wdp_port->opp.hw_addr);
2386 strncpy((char *) wdp_port->opp.name, xflow_port->devname,
2387 sizeof wdp_port->opp.name);
2388 wdp_port->opp.name[sizeof wdp_port->opp.name - 1] = '\0';
2390 netdev_get_flags(netdev, &flags);
2391 wdp_port->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
2393 netdev_get_carrier(netdev, &carrier);
2394 wdp_port->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
2396 netdev_get_features(netdev,
2397 &wdp_port->opp.curr, &wdp_port->opp.advertised,
2398 &wdp_port->opp.supported, &wdp_port->opp.peer);
2400 wdp_port->devname = xstrdup(xflow_port->devname);
2401 wdp_port->internal = (xflow_port->flags & XFLOW_PORT_INTERNAL) != 0;
2406 wx_port_conflicts(const struct wx *wx, const struct xflow_port *xflow_port)
2408 if (wx_port_get(wx, xflow_port->port)) {
2409 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
2412 } else if (shash_find(&wx->port_by_name, xflow_port->devname)) {
2413 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
2414 xflow_port->devname);
2422 wx_port_equal(const struct wx_port *a_, const struct wx_port *b_)
2424 const struct ofp_phy_port *a = &a_->wdp_port.opp;
2425 const struct ofp_phy_port *b = &b_->wdp_port.opp;
2427 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
2428 return (a->port_no == b->port_no
2429 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
2430 && !strcmp((char *) a->name, (char *) b->name)
2431 && a->state == b->state
2432 && a->config == b->config
2433 && a->curr == b->curr
2434 && a->advertised == b->advertised
2435 && a->supported == b->supported
2436 && a->peer == b->peer);
2440 wx_port_install(struct wx *wx, struct wx_port *wx_port)
2442 const struct ofp_phy_port *opp = &wx_port->wdp_port.opp;
2443 uint16_t xflow_port = ofp_port_to_xflow_port(opp->port_no);
2444 const char *name = (const char *) opp->name;
2446 netdev_monitor_add(wx->netdev_monitor, wx_port->wdp_port.netdev);
2447 hmap_insert(&wx->ports, &wx_port->hmap_node, hash_int(xflow_port, 0));
2448 shash_add(&wx->port_by_name, name, wx_port);
2452 wx_port_remove(struct wx *wx, struct wx_port *wx_port)
2454 const struct ofp_phy_port *opp = &wx_port->wdp_port.opp;
2455 const char *name = (const char *) opp->name;
2457 netdev_monitor_remove(wx->netdev_monitor, wx_port->wdp_port.netdev);
2458 hmap_remove(&wx->ports, &wx_port->hmap_node);
2459 shash_delete(&wx->port_by_name, shash_find(&wx->port_by_name, name));
2463 wx_port_free(struct wx_port *wx_port)
2466 wdp_port_free(&wx_port->wdp_port);
2472 wx_port_update(struct wx *wx, const char *devname,
2473 wdp_port_poll_cb_func *cb, void *aux)
2475 struct xflow_port xflow_port;
2476 struct wx_port *old_wx_port;
2477 struct wx_port *new_wx_port;
2480 COVERAGE_INC(wx_update_port);
2482 /* Query the datapath for port information. */
2483 error = xfif_port_query_by_name(wx->xfif, devname, &xflow_port);
2485 /* Find the old wx_port. */
2486 old_wx_port = shash_find_data(&wx->port_by_name, devname);
2489 /* There's no port named 'devname' but there might be a port with
2490 * the same port number. This could happen if a port is deleted
2491 * and then a new one added in its place very quickly, or if a port
2492 * is renamed. In the former case we want to send an OFPPR_DELETE
2493 * and an OFPPR_ADD, and in the latter case we want to send a
2494 * single OFPPR_MODIFY. We can distinguish the cases by comparing
2495 * the old port's ifindex against the new port, or perhaps less
2496 * reliably but more portably by comparing the old port's MAC
2497 * against the new port's MAC. However, this code isn't that smart
2498 * and always sends an OFPPR_MODIFY (XXX). */
2499 old_wx_port = wx_port_get(wx, xflow_port.port);
2501 } else if (error != ENOENT && error != ENODEV) {
2502 VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error "
2503 "%s", strerror(error));
2507 /* Create a new wx_port. */
2508 new_wx_port = !error ? make_wx_port(&xflow_port) : NULL;
2510 /* Eliminate a few pathological cases. */
2511 if (!old_wx_port && !new_wx_port) {
2513 } else if (old_wx_port && new_wx_port) {
2514 /* Most of the 'config' bits are OpenFlow soft state, but
2515 * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
2516 * OpenFlow bits from old_wx_port. (make_wx_port() only sets
2517 * OFPPC_PORT_DOWN and leaves the other bits 0.) */
2518 struct ofp_phy_port *new_opp = &new_wx_port->wdp_port.opp;
2519 struct ofp_phy_port *old_opp = &old_wx_port->wdp_port.opp;
2520 new_opp->config |= old_opp->config & ~OFPPC_PORT_DOWN;
2522 if (wx_port_equal(old_wx_port, new_wx_port)) {
2523 /* False alarm--no change. */
2524 wx_port_free(new_wx_port);
2529 /* Now deal with the normal cases. */
2531 wx_port_remove(wx, old_wx_port);
2534 wx_port_install(wx, new_wx_port);
2539 (*cb)(&new_wx_port->wdp_port.opp, OFPPR_ADD, aux);
2540 } else if (!new_wx_port) {
2541 (*cb)(&old_wx_port->wdp_port.opp, OFPPR_DELETE, aux);
2543 (*cb)(&new_wx_port->wdp_port.opp, OFPPR_MODIFY, aux);
2546 /* Update port groups. */
2547 wx_port_refresh_groups(wx);
2550 wx_port_free(old_wx_port);
2554 wx_port_init(struct wx *wx)
2556 struct xflow_port *ports;
2561 error = xfif_port_list(wx->xfif, &ports, &n_ports);
2566 for (i = 0; i < n_ports; i++) {
2567 const struct xflow_port *xflow_port = &ports[i];
2568 if (!wx_port_conflicts(wx, xflow_port)) {
2569 struct wx_port *wx_port = make_wx_port(xflow_port);
2571 wx_port_install(wx, wx_port);
2576 wx_port_refresh_groups(wx);
2580 /* Returns the port in 'wx' with xflow port number 'xflow_port'. */
2581 static struct wx_port *
2582 wx_port_get(const struct wx *wx, uint16_t xflow_port)
2584 struct wx_port *port;
2586 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node, hash_int(xflow_port, 0),
2588 if (port->xflow_port == xflow_port) {
2596 wdp_xflow_register(void)
2598 static const struct wdp_class wdp_xflow_class = {
2614 wx_port_query_by_number,
2615 wx_port_query_by_name,
2622 wx_flow_for_each_match,
2632 wx_get_sflow_probability,
2633 wx_set_sflow_probability,
2642 static bool inited = false;
2655 xf_enumerate_types(&types);
2658 SVEC_FOR_EACH (i, type, &types) {
2659 struct wdp_class *class;
2661 class = xmalloc(sizeof *class);
2662 *class = wdp_xflow_class;
2663 class->type = xstrdup(type);
2668 if (!wdp_register_provider(class)) {
2673 svec_destroy(&types);
2677 default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
2678 struct xflow_actions *actions, tag_type *tags,
2679 uint16_t *nf_output_iface, void *wx_)
2681 struct wx *wx = wx_;
2684 /* Drop frames for reserved multicast addresses. */
2685 if (eth_addr_is_reserved(flow->dl_dst)) {
2689 /* Learn source MAC (but don't try to learn from revalidation). */
2690 if (packet != NULL) {
2691 tag_type rev_tag = mac_learning_learn(wx->ml, flow->dl_src,
2693 GRAT_ARP_LOCK_NONE);
2695 /* The log messages here could actually be useful in debugging,
2696 * so keep the rate limit relatively high. */
2697 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
2698 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
2699 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
2700 tag_set_add(&wx->revalidate_set, rev_tag);
2704 /* Determine output port. */
2705 out_port = mac_learning_lookup_tag(wx->ml, flow->dl_dst, 0, tags,
2708 add_output_group_action(actions, WX_GROUP_FLOOD, nf_output_iface);
2709 } else if (out_port != flow->in_port) {
2710 xflow_actions_add(actions, XFLOWAT_OUTPUT)->output.port = out_port;
2711 *nf_output_iface = out_port;
2719 static const struct ofhooks default_ofhooks = {
2721 default_normal_ofhook_cb,