2 * Copyright (c) 2010, 2011 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "wdp-xflow.h"
26 #include "mac-learning.h"
32 #include "openflow/nicira-ext.h"
33 #include "openflow/openflow.h"
35 #include "poll-loop.h"
36 #include "port-array.h"
43 #include "wdp-provider.h"
45 #include "xflow-util.h"
49 VLOG_DEFINE_THIS_MODULE(wdp_xflow)
53 TABLEID_CLASSIFIER = 1
56 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
58 /* Maximum numbers of rules. */
59 #define WX_MAX_WILD 65536 /* Wildcarded rules. */
60 #define WX_MAX_EXACT 1048576 /* Exact-match rules. */
63 struct hmap_node hmap_node;
64 struct wdp_port wdp_port;
69 struct list list_node;
72 struct classifier cls;
73 struct netdev_monitor *netdev_monitor;
74 struct hmap ports; /* Contains "struct wx_port"s. */
75 struct shash port_by_name;
76 long long int next_expiration;
79 /* Rules that might need to be revalidated. */
80 bool need_revalidate; /* Revalidate all subrules? */
81 bool revalidate_all; /* Revalidate all subrules and other rules? */
82 struct tag_set revalidate_set; /* Tag set of (sub)rules to revalidate. */
84 /* Hooks for ovs-vswitchd. */
85 const struct ofhooks *ofhooks;
88 /* Used by default ofhooks. */
89 struct mac_learning *ml;
91 /* List of "struct wdp_packets" queued for the controller by
92 * execute_xflow_actions(). */
93 #define MAX_CTL_PACKETS 50
94 struct list ctl_packets;
98 static const struct ofhooks default_ofhooks;
100 static struct list all_wx = LIST_INITIALIZER(&all_wx);
102 static int wx_port_init(struct wx *);
103 static struct wx_port *wx_port_get(const struct wx *, uint16_t xflow_port);
104 static void wx_port_process_change(struct wx *wx, int error, char *devname,
105 wdp_port_poll_cb_func *cb, void *aux);
106 static void wx_port_refresh_groups(struct wx *);
108 static bool wx_make_wdp_port(const struct xflow_port *, struct wdp_port *);
110 static void wx_purge_ctl_packets__(struct wx *);
118 wx_cast(const struct wdp *wdp)
120 return CONTAINER_OF(wdp, struct wx, wdp);
124 wx_xlate_actions(struct wx *, const union ofp_action *, size_t n,
125 const flow_t *flow, const struct ofpbuf *packet,
126 tag_type *tags, struct xflow_actions *out,
127 bool *may_set_up_flow);
132 uint64_t packet_count; /* Number of packets received. */
133 uint64_t byte_count; /* Number of bytes received. */
134 uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
135 long long int used; /* Last-used time (0 if never used). */
136 tag_type tags; /* Tags (set only by hooks). */
138 /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
139 * exact-match rule (having cr.wc.wildcards of 0) generated from the
140 * wildcard rule 'super'. In this case, 'list' is an element of the
143 * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
144 * a list of subrules. A super-rule with no wildcards (where
145 * cr.wc.wildcards is 0) will never have any subrules. */
146 struct wx_rule *super;
151 * A super-rule with wildcard fields never has xflow actions (since the
152 * datapath only supports exact-match flows). */
153 bool installed; /* Installed in datapath? */
154 bool may_install; /* True ordinarily; false if actions must
155 * be reassessed for every packet. */
157 union xflow_action *xflow_actions;
160 static void wx_rule_destroy(struct wx *, struct wx_rule *);
161 static void wx_rule_update_actions(struct wx *, struct wx_rule *);
162 static void wx_rule_execute(struct wx *, struct wx_rule *,
163 struct ofpbuf *packet, const flow_t *);
164 static bool wx_rule_make_actions(struct wx *, struct wx_rule *,
165 const struct ofpbuf *packet);
166 static void wx_rule_install(struct wx *, struct wx_rule *,
167 struct wx_rule *displaced_rule);
169 static struct wx_rule *
170 wx_rule_cast(const struct cls_rule *cls_rule)
172 return cls_rule ? CONTAINER_OF(cls_rule, struct wx_rule, wr.cr) : NULL;
175 /* Returns true if 'rule' is merely an implementation detail that should be
176 * hidden from the client. */
178 wx_rule_is_hidden(const struct wx_rule *rule)
180 return rule->super != NULL;
184 wx_rule_free(struct wx_rule *rule)
186 wdp_rule_uninit(&rule->wr);
187 free(rule->xflow_actions);
192 wx_rule_account(struct wx *wx OVS_UNUSED, struct wx_rule *rule OVS_UNUSED,
193 uint64_t extra_bytes OVS_UNUSED)
195 /* XXX call account_cb hook */
199 wx_rule_post_uninstall(struct wx *wx, struct wx_rule *rule)
201 struct wx_rule *super = rule->super;
203 wx_rule_account(wx, rule, 0);
205 /* XXX netflow expiration */
208 super->packet_count += rule->packet_count;
209 super->byte_count += rule->byte_count;
211 /* Reset counters to prevent double counting if the rule ever gets
213 rule->packet_count = 0;
214 rule->byte_count = 0;
215 rule->accounted_bytes = 0;
217 //XXX netflow_flow_clear(&rule->nf_flow);
222 xflow_flow_stats_to_msec(const struct xflow_flow_stats *stats)
224 return (stats->used_sec
225 ? stats->used_sec * 1000 + stats->used_nsec / 1000000
230 wx_rule_update_time(struct wx *wx OVS_UNUSED, struct wx_rule *rule,
231 const struct xflow_flow_stats *stats)
233 long long int used = xflow_flow_stats_to_msec(stats);
234 if (used > rule->used) {
236 if (rule->super && used > rule->super->used) {
237 rule->super->used = used;
239 //XXX netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
244 wx_rule_update_stats(struct wx *wx, struct wx_rule *rule,
245 const struct xflow_flow_stats *stats)
247 if (stats->n_packets) {
248 wx_rule_update_time(wx, rule, stats);
249 rule->packet_count += stats->n_packets;
250 rule->byte_count += stats->n_bytes;
251 /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->tcp_flags); */
256 wx_rule_uninstall(struct wx *wx, struct wx_rule *rule)
258 assert(!rule->wr.cr.flow.wildcards);
259 if (rule->installed) {
260 struct xflow_flow xflow_flow;
262 xflow_key_from_flow(&xflow_flow.key, &rule->wr.cr.flow);
263 xflow_flow.actions = NULL;
264 xflow_flow.n_actions = 0;
265 xflow_flow.flags = 0;
266 if (!xfif_flow_del(wx->xfif, &xflow_flow)) {
267 wx_rule_update_stats(wx, rule, &xflow_flow.stats);
269 rule->installed = false;
271 wx_rule_post_uninstall(wx, rule);
277 is_controller_rule(struct wx_rule *rule)
279 /* If the only action is send to the controller then don't report
280 * NetFlow expiration messages since it is just part of the control
281 * logic for the network and not real traffic. */
285 && rule->super->n_actions == 1
286 && action_outputs_to_port(&rule->super->actions[0],
287 htons(OFPP_CONTROLLER)));
292 wx_rule_remove(struct wx *wx, struct wx_rule *rule)
294 if (rule->wr.cr.flow.wildcards) {
295 COVERAGE_INC(wx_del_wc_flow);
296 wx->need_revalidate = true;
298 wx_rule_uninstall(wx, rule);
300 classifier_remove(&wx->cls, &rule->wr.cr);
301 wx_rule_destroy(wx, rule);
305 wx_rule_revalidate(struct wx *wx, struct wx_rule *rule)
307 const flow_t *flow = &rule->wr.cr.flow;
309 COVERAGE_INC(wx_rule_revalidate);
311 struct wx_rule *super;
312 super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
314 wx_rule_remove(wx, rule);
316 } else if (super != rule->super) {
317 COVERAGE_INC(wx_revalidate_moved);
318 list_remove(&rule->list);
319 list_push_back(&super->list, &rule->list);
321 rule->wr.hard_timeout = super->wr.hard_timeout;
322 rule->wr.idle_timeout = super->wr.idle_timeout;
323 rule->wr.created = super->wr.created;
328 wx_rule_update_actions(wx, rule);
332 /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
333 * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
334 * through all of its subrules and revalidates them, destroying any that no
335 * longer has a super-rule (which is probably all of them).
337 * Before calling this function, the caller must make have removed 'rule' from
338 * the classifier. If 'rule' is an exact-match rule, the caller is also
339 * responsible for ensuring that it has been uninstalled from the datapath. */
341 wx_rule_destroy(struct wx *wx, struct wx_rule *rule)
344 struct wx_rule *subrule, *next;
345 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
346 wx_rule_revalidate(wx, subrule);
349 list_remove(&rule->list);
356 wx_rule_has_out_port(const struct wx_rule *rule, uint16_t out_port)
358 const union ofp_action *oa;
359 struct actions_iterator i;
361 if (out_port == htons(OFPP_NONE)) {
364 for (oa = actions_first(&i, rule->wr.actions,
367 oa = actions_next(&i)) {
368 if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
376 /* Caller is responsible for initializing the 'cr' and ofp_table_id members of
377 * the returned rule. */
378 static struct wx_rule *
379 wx_rule_create(struct wx_rule *super,
380 const union ofp_action *actions, size_t n_actions,
381 uint16_t idle_timeout, uint16_t hard_timeout)
383 struct wx_rule *rule = xzalloc(sizeof *rule);
384 wdp_rule_init(&rule->wr, actions, n_actions);
385 rule->wr.idle_timeout = idle_timeout;
386 rule->wr.hard_timeout = hard_timeout;
387 rule->used = rule->wr.created;
390 list_push_back(&super->list, &rule->list);
392 list_init(&rule->list);
395 netflow_flow_clear(&rule->nf_flow);
396 netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
402 /* Executes, within 'wx', the 'n_actions' actions in 'actions' on 'packet',
403 * which arrived on 'in_port'.
405 * Takes ownership of 'packet'. */
407 execute_xflow_actions(struct wx *wx, uint16_t in_port,
408 const union xflow_action *actions, size_t n_actions,
409 struct ofpbuf *packet)
411 if (n_actions == 1 && actions[0].type == XFLOWAT_CONTROLLER
412 && wx->n_ctl_packets < MAX_CTL_PACKETS) {
413 /* As an optimization, avoid a round-trip from userspace to kernel to
414 * userspace. This also avoids possibly filling up kernel packet
415 * buffers along the way. */
416 struct wdp_packet *wdp_packet;
418 if (!(wx->wdp_listen_mask & WDP_CHAN_ACTION)) {
422 wdp_packet = xmalloc(sizeof *wdp_packet);
423 wdp_packet->channel = WDP_CHAN_ACTION;
424 wdp_packet->tun_id = 0;
425 wdp_packet->in_port = in_port;
426 wdp_packet->send_len = actions[0].controller.arg;
427 wdp_packet->payload = packet;
429 list_push_back(&wx->ctl_packets, &wdp_packet->list);
435 error = xfif_execute(wx->xfif, in_port, actions, n_actions, packet);
436 ofpbuf_delete(packet);
441 /* Executes the actions indicated by 'rule' on 'packet', which is in flow
442 * 'flow' and is considered to have arrived on xflow port 'in_port'. 'packet'
443 * must have at least sizeof(struct ofp_packet_in) bytes of headroom.
445 * The flow that 'packet' actually contains does not need to actually match
446 * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
447 * the packet and byte counters for 'rule' will be credited for the packet sent
448 * out whether or not the packet actually matches 'rule'.
450 * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
451 * the caller must already have accurately composed xflow actions for it given
452 * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
453 * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
454 * function will compose a set of xflow actions based on 'rule''s OpenFlow
455 * actions and apply them to 'packet'.
457 * Takes ownership of 'packet'. */
459 wx_rule_execute(struct wx *wx, struct wx_rule *rule,
460 struct ofpbuf *packet, const flow_t *flow)
462 const union xflow_action *actions;
463 struct xflow_flow_stats stats;
465 struct xflow_actions a;
467 assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in));
469 /* Grab or compose the xflow actions.
471 * The special case for an exact-match 'rule' where 'flow' is not the
472 * rule's flow is important to avoid, e.g., sending a packet out its input
473 * port simply because the xflow actions were composed for the wrong
475 if (rule->wr.cr.flow.wildcards
476 || !flow_equal_headers(flow, &rule->wr.cr.flow))
478 struct wx_rule *super = rule->super ? rule->super : rule;
479 if (wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, flow,
480 packet, NULL, &a, NULL)) {
481 ofpbuf_delete(packet);
485 n_actions = a.n_actions;
487 actions = rule->xflow_actions;
488 n_actions = rule->n_xflow_actions;
491 /* Execute the xflow actions. */
492 flow_extract_stats(flow, packet, &stats);
493 if (!execute_xflow_actions(wx, flow->in_port,
494 actions, n_actions, packet)) {
495 wx_rule_update_stats(wx, rule, &stats);
496 rule->used = time_msec();
497 //XXX netflow_flow_update_time(wx->netflow, &rule->nf_flow, rule->used);
501 /* Inserts 'rule' into 'p''s flow table.
503 * If 'packet' is nonnull, takes ownership of 'packet', executes 'rule''s
504 * actions on it and credits the statistics for sending the packet to 'rule'.
505 * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of
508 wx_rule_insert(struct wx *wx, struct wx_rule *rule, struct ofpbuf *packet,
511 struct wx_rule *displaced_rule;
513 /* Insert the rule in the classifier. */
514 displaced_rule = wx_rule_cast(classifier_insert(&wx->cls, &rule->wr.cr));
515 if (!rule->wr.cr.flow.wildcards) {
516 wx_rule_make_actions(wx, rule, packet);
519 /* Send the packet and credit it to the rule. */
522 flow_extract(packet, 0, in_port, &flow);
523 wx_rule_execute(wx, rule, packet, &flow);
526 /* Install the rule in the datapath only after sending the packet, to
527 * avoid packet reordering. */
528 if (rule->wr.cr.flow.wildcards) {
529 COVERAGE_INC(wx_add_wc_flow);
530 wx->need_revalidate = true;
532 wx_rule_install(wx, rule, displaced_rule);
535 /* Free the rule that was displaced, if any. */
536 if (displaced_rule) {
537 rule->wr.client_data = displaced_rule->wr.client_data;
538 wx_rule_destroy(wx, displaced_rule);
542 static struct wx_rule *
543 wx_rule_create_subrule(struct wx *wx, struct wx_rule *rule, const flow_t *flow)
545 struct wx_rule *subrule;
547 subrule = wx_rule_create(rule, NULL, 0,
548 rule->wr.idle_timeout,
549 rule->wr.hard_timeout);
550 /* Subrules aren't really in any OpenFlow table, so don't bother with
551 * subrule->wr.ofp_table_id. */
552 COVERAGE_INC(wx_subrule_create);
553 cls_rule_from_flow(flow, &subrule->wr.cr);
554 classifier_insert_exact(&wx->cls, &subrule->wr.cr);
559 /* Returns true if the actions changed, false otherwise. */
561 wx_rule_make_actions(struct wx *wx, struct wx_rule *rule,
562 const struct ofpbuf *packet)
564 const struct wx_rule *super;
565 struct xflow_actions a;
568 assert(!rule->wr.cr.flow.wildcards);
570 super = rule->super ? rule->super : rule;
571 wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions,
572 &rule->wr.cr.flow, packet,
573 &rule->tags, &a, &rule->may_install);
575 actions_len = a.n_actions * sizeof *a.actions;
576 if (rule->n_xflow_actions != a.n_actions
577 || memcmp(rule->xflow_actions, a.actions, actions_len)) {
578 COVERAGE_INC(wx_xflow_unchanged);
579 free(rule->xflow_actions);
580 rule->n_xflow_actions = a.n_actions;
581 rule->xflow_actions = xmemdup(a.actions, actions_len);
589 do_put_flow(struct wx *wx, struct wx_rule *rule, int flags,
590 struct xflow_flow_put *put)
592 memset(&put->flow.stats, 0, sizeof put->flow.stats);
593 xflow_key_from_flow(&put->flow.key, &rule->wr.cr.flow);
594 put->flow.actions = rule->xflow_actions;
595 put->flow.n_actions = rule->n_xflow_actions;
598 return xfif_flow_put(wx->xfif, put);
602 wx_rule_install(struct wx *wx, struct wx_rule *rule, struct wx_rule *displaced_rule)
604 assert(!rule->wr.cr.flow.wildcards);
606 if (rule->may_install) {
607 struct xflow_flow_put put;
608 if (!do_put_flow(wx, rule,
609 XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS,
611 rule->installed = true;
612 if (displaced_rule) {
613 wx_rule_update_stats(wx, displaced_rule, &put.flow.stats);
614 wx_rule_post_uninstall(wx, displaced_rule);
617 } else if (displaced_rule) {
618 wx_rule_uninstall(wx, displaced_rule);
623 wx_rule_reinstall(struct wx *wx, struct wx_rule *rule)
625 if (rule->installed) {
626 struct xflow_flow_put put;
627 COVERAGE_INC(wx_dp_missed);
628 do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put);
630 wx_rule_install(wx, rule, NULL);
635 wx_rule_update_actions(struct wx *wx, struct wx_rule *rule)
637 bool actions_changed;
639 uint16_t new_out_iface, old_out_iface;
641 old_out_iface = rule->nf_flow.output_iface;
643 actions_changed = wx_rule_make_actions(wx, rule, NULL);
645 if (rule->may_install) {
646 if (rule->installed) {
647 if (actions_changed) {
648 struct xflow_flow_put put;
649 do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY
650 | XFLOWPF_ZERO_STATS, &put);
651 wx_rule_update_stats(wx, rule, &put.flow.stats);
653 /* Temporarily set the old output iface so that NetFlow
654 * messages have the correct output interface for the old
656 new_out_iface = rule->nf_flow.output_iface;
657 rule->nf_flow.output_iface = old_out_iface;
659 wx_rule_post_uninstall(wx, rule);
660 //rule->nf_flow.output_iface = new_out_iface;
663 wx_rule_install(wx, rule, NULL);
666 wx_rule_uninstall(wx, rule);
671 add_output_group_action(struct xflow_actions *actions, uint16_t group,
672 uint16_t *nf_output_iface)
674 xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group;
676 if (group == WX_GROUP_ALL || group == WX_GROUP_FLOOD) {
677 *nf_output_iface = NF_OUT_FLOOD;
682 add_controller_action(struct xflow_actions *actions, uint16_t max_len)
684 union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER);
685 a->controller.arg = max_len;
688 struct wx_xlate_ctx {
690 flow_t flow; /* Flow to which these actions correspond. */
691 int recurse; /* Recursion level, via xlate_table_action. */
693 const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
694 * null pointer if we are revalidating
695 * without a packet to refer to. */
698 struct xflow_actions *out; /* Datapath actions. */
699 tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
700 bool may_set_up_flow; /* True ordinarily; false if the actions must
701 * be reassessed for every packet. */
702 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
705 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
706 struct wx_xlate_ctx *ctx);
709 add_output_action(struct wx_xlate_ctx *ctx, uint16_t port)
711 const struct wx_port *wx_port = wx_port_get(ctx->wx, port);
714 if (wx_port->wdp_port.opp.config & OFPPC_NO_FWD) {
715 /* Forwarding disabled on port. */
720 * We don't have an ofport record for this port, but it doesn't hurt to
721 * allow forwarding to it anyhow. Maybe such a port will appear later
722 * and we're pre-populating the flow table.
726 xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port;
727 //ctx->nf_output_iface = port;
730 static struct wx_rule *
731 wx_rule_lookup_valid(struct wx *wx, const flow_t *flow)
733 struct wx_rule *rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
735 /* The rule we found might not be valid, since we could be in need of
736 * revalidation. If it is not valid, don't return it. */
739 && wx->need_revalidate
740 && !wx_rule_revalidate(wx, rule)) {
741 COVERAGE_INC(wx_invalidated);
749 xlate_table_action(struct wx_xlate_ctx *ctx, uint16_t in_port)
752 uint16_t old_in_port;
753 struct wx_rule *rule;
755 /* Look up a flow with 'in_port' as the input port. Then restore the
756 * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
757 * have surprising behavior). */
758 old_in_port = ctx->flow.in_port;
759 ctx->flow.in_port = in_port;
760 rule = wx_rule_lookup_valid(ctx->wx, &ctx->flow);
761 ctx->flow.in_port = old_in_port;
769 do_xlate_actions(rule->wr.actions, rule->wr.n_actions, ctx);
776 xlate_output_action__(struct wx_xlate_ctx *ctx,
777 uint16_t port, uint16_t max_len)
780 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
782 ctx->nf_output_iface = NF_OUT_DROP;
786 add_output_action(ctx, ctx->flow.in_port);
789 xlate_table_action(ctx, ctx->flow.in_port);
792 if (!ctx->wx->ofhooks->normal_cb(&ctx->flow, ctx->packet,
794 &ctx->nf_output_iface,
796 COVERAGE_INC(wx_uninstallable);
797 ctx->may_set_up_flow = false;
802 add_output_group_action(ctx->out, WX_GROUP_FLOOD,
803 &ctx->nf_output_iface);
806 add_output_group_action(ctx->out, WX_GROUP_ALL, &ctx->nf_output_iface);
808 case OFPP_CONTROLLER:
809 add_controller_action(ctx->out, max_len);
812 add_output_action(ctx, XFLOWP_LOCAL);
815 xflow_port = ofp_port_to_xflow_port(port);
816 if (xflow_port != ctx->flow.in_port) {
817 add_output_action(ctx, xflow_port);
822 if (prev_nf_output_iface == NF_OUT_FLOOD) {
823 ctx->nf_output_iface = NF_OUT_FLOOD;
824 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
825 ctx->nf_output_iface = prev_nf_output_iface;
826 } else if (prev_nf_output_iface != NF_OUT_DROP &&
827 ctx->nf_output_iface != NF_OUT_FLOOD) {
828 ctx->nf_output_iface = NF_OUT_MULTI;
833 xlate_output_action(struct wx_xlate_ctx *ctx,
834 const struct ofp_action_output *oao)
836 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
839 /* If the final xflow action in 'ctx' is "pop priority", drop it, as an
840 * optimization, because we're going to add another action that sets the
841 * priority immediately after, or because there are no actions following the
844 remove_pop_action(struct wx_xlate_ctx *ctx)
846 size_t n = ctx->out->n_actions;
847 if (n > 0 && ctx->out->actions[n - 1].type == XFLOWAT_POP_PRIORITY) {
848 ctx->out->n_actions--;
853 xlate_enqueue_action(struct wx_xlate_ctx *ctx,
854 const struct ofp_action_enqueue *oae)
856 uint16_t ofp_port, xflow_port;
860 error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(oae->queue_id),
863 /* Fall back to ordinary output action. */
864 xlate_output_action__(ctx, ntohs(oae->port), 0);
868 /* Figure out xflow output port. */
869 ofp_port = ntohs(oae->port);
870 if (ofp_port != OFPP_IN_PORT) {
871 xflow_port = ofp_port_to_xflow_port(ofp_port);
873 xflow_port = ctx->flow.in_port;
876 /* Add xflow actions. */
877 remove_pop_action(ctx);
878 xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority
880 add_output_action(ctx, xflow_port);
881 xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY);
883 /* Update NetFlow output port. */
884 if (ctx->nf_output_iface == NF_OUT_DROP) {
885 ctx->nf_output_iface = xflow_port;
886 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
887 ctx->nf_output_iface = NF_OUT_MULTI;
892 xlate_set_queue_action(struct wx_xlate_ctx *ctx,
893 const struct nx_action_set_queue *nasq)
898 error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(nasq->queue_id),
901 /* Couldn't translate queue to a priority, so ignore. A warning
902 * has already been logged. */
906 remove_pop_action(ctx);
907 xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority
912 xlate_nicira_action(struct wx_xlate_ctx *ctx,
913 const struct nx_action_header *nah)
915 const struct nx_action_resubmit *nar;
916 const struct nx_action_set_tunnel *nast;
917 const struct nx_action_set_queue *nasq;
918 union xflow_action *oa;
919 int subtype = ntohs(nah->subtype);
921 assert(nah->vendor == htonl(NX_VENDOR_ID));
924 nar = (const struct nx_action_resubmit *) nah;
925 xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port)));
928 case NXAST_SET_TUNNEL:
929 nast = (const struct nx_action_set_tunnel *) nah;
930 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TUNNEL);
931 ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
934 case NXAST_DROP_SPOOFED_ARP:
935 if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) {
936 xflow_actions_add(ctx->out, XFLOWAT_DROP_SPOOFED_ARP);
940 case NXAST_SET_QUEUE:
941 nasq = (const struct nx_action_set_queue *) nah;
942 xlate_set_queue_action(ctx, nasq);
945 case NXAST_POP_QUEUE:
946 xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY);
949 /* If you add a new action here that modifies flow data, don't forget to
950 * update the flow key in ctx->flow at the same time. */
953 VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
959 do_xlate_actions(const union ofp_action *in, size_t n_in,
960 struct wx_xlate_ctx *ctx)
962 struct actions_iterator iter;
963 const union ofp_action *ia;
964 const struct wx_port *port;
966 port = wx_port_get(ctx->wx, ctx->flow.in_port);
968 const struct ofp_phy_port *opp = &port->wdp_port.opp;
969 if (opp->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
970 opp->config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
971 ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
972 /* Drop this flow. */
977 for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
978 uint16_t type = ntohs(ia->type);
979 union xflow_action *oa;
983 xlate_output_action(ctx, &ia->output);
986 case OFPAT_SET_VLAN_VID:
987 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
988 oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK);
989 oa->dl_tci.mask = htons(VLAN_VID_MASK);
990 ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
993 case OFPAT_SET_VLAN_PCP:
994 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
995 oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT)
997 oa->dl_tci.mask = htons(VLAN_PCP_MASK);
999 if (ctx->flow.dl_vlan == htons(OFP_VLAN_NONE)) {
1000 ctx->flow.dl_vlan = htons(0);
1002 ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
1005 case OFPAT_STRIP_VLAN:
1006 xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN);
1007 ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
1008 ctx->flow.dl_vlan_pcp = 0;
1011 case OFPAT_SET_DL_SRC:
1012 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC);
1013 memcpy(oa->dl_addr.dl_addr,
1014 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1015 memcpy(ctx->flow.dl_src,
1016 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1019 case OFPAT_SET_DL_DST:
1020 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST);
1021 memcpy(oa->dl_addr.dl_addr,
1022 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1023 memcpy(ctx->flow.dl_dst,
1024 ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
1027 case OFPAT_SET_NW_SRC:
1028 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC);
1029 ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
1032 case OFPAT_SET_NW_DST:
1033 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST);
1034 ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
1037 case OFPAT_SET_NW_TOS:
1038 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS);
1039 ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
1042 case OFPAT_SET_TP_SRC:
1043 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC);
1044 ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
1047 case OFPAT_SET_TP_DST:
1048 oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST);
1049 ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
1053 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
1057 xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
1061 VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
1067 /* Returns true if 'flow' and 'actions' may be set up as a flow in the kernel.
1068 * This is true most of the time, but we don't allow flows that would prevent
1069 * DHCP replies from being seen by the local port to be set up in the
1072 * We only need this, strictly speaking, when in-band control is turned on. */
1074 wx_may_set_up(const flow_t *flow, const struct xflow_actions *actions)
1076 if (flow->dl_type == htons(ETH_TYPE_IP)
1077 && flow->nw_proto == IP_TYPE_UDP
1078 && flow->tp_src == htons(DHCP_SERVER_PORT)
1079 && flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
1082 for (i = 0; i < actions->n_actions; i++) {
1083 const struct xflow_action_output *oao = &actions->actions[i].output;
1084 if (oao->type == XFLOWAT_OUTPUT && oao->port == XFLOWP_LOCAL) {
1095 wx_xlate_actions(struct wx *wx, const union ofp_action *in, size_t n_in,
1096 const flow_t *flow, const struct ofpbuf *packet,
1097 tag_type *tags, struct xflow_actions *out,
1098 bool *may_set_up_flow)
1100 tag_type no_tags = 0;
1101 struct wx_xlate_ctx ctx;
1102 COVERAGE_INC(wx_ofp2xflow);
1103 xflow_actions_init(out);
1107 ctx.packet = packet;
1109 ctx.tags = tags ? tags : &no_tags;
1110 ctx.may_set_up_flow = true;
1111 ctx.nf_output_iface = NF_OUT_DROP;
1112 do_xlate_actions(in, n_in, &ctx);
1113 remove_pop_action(&ctx);
1115 if (may_set_up_flow) {
1116 *may_set_up_flow = ctx.may_set_up_flow && wx_may_set_up(flow, out);
1119 if (nf_output_iface) {
1120 *nf_output_iface = ctx.nf_output_iface;
1123 if (xflow_actions_overflow(out)) {
1124 COVERAGE_INC(xflow_overflow);
1125 xflow_actions_init(out);
1126 return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
1132 update_used(struct wx *wx)
1134 struct xflow_flow *flows;
1139 error = xfif_flow_list_all(wx->xfif, &flows, &n_flows);
1144 for (i = 0; i < n_flows; i++) {
1145 struct xflow_flow *f = &flows[i];
1146 struct wx_rule *rule;
1149 xflow_key_to_flow(&f->key, &flow);
1150 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, &flow));
1151 if (!rule || !rule->installed) {
1152 COVERAGE_INC(wx_unexpected_rule);
1153 xfif_flow_del(wx->xfif, f);
1157 wx_rule_update_time(wx, rule, &f->stats);
1158 wx_rule_account(wx, rule, f->stats.n_bytes);
1164 uninstall_idle_flow(struct wx *wx, struct wx_rule *rule)
1166 assert(rule->installed);
1167 assert(!rule->wr.cr.flow.wildcards);
1170 wx_rule_remove(wx, rule);
1172 wx_rule_uninstall(wx, rule);
1177 expire_rule(struct cls_rule *cls_rule, void *wx_)
1179 struct wx *wx = wx_;
1180 struct wx_rule *rule = wx_rule_cast(cls_rule);
1181 long long int hard_expire, idle_expire, expire, now;
1183 hard_expire = (rule->wr.hard_timeout
1184 ? rule->wr.created + rule->wr.hard_timeout * 1000
1186 idle_expire = (rule->wr.idle_timeout
1187 && (rule->super || list_is_empty(&rule->list))
1188 ? rule->used + rule->wr.idle_timeout * 1000
1190 expire = MIN(hard_expire, idle_expire);
1194 if (rule->installed && now >= rule->used + 5000) {
1195 uninstall_idle_flow(wx, rule);
1196 } else if (!rule->wr.cr.flow.wildcards) {
1197 //XXX active_timeout(wx, rule);
1203 COVERAGE_INC(wx_expired);
1205 /* Update stats. This code will be a no-op if the rule expired
1206 * due to an idle timeout. */
1207 if (rule->wr.cr.flow.wildcards) {
1208 struct wx_rule *subrule, *next;
1209 LIST_FOR_EACH_SAFE (subrule, next, list, &rule->list) {
1210 wx_rule_remove(wx, subrule);
1213 wx_rule_uninstall(wx, rule);
1217 if (!wx_rule_is_hidden(rule)) {
1218 send_flow_removed(wx, rule, now,
1220 ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
1223 wx_rule_remove(wx, rule);
1228 struct revalidate_cbdata {
1230 bool revalidate_all; /* Revalidate all exact-match rules? */
1231 bool revalidate_subrules; /* Revalidate all exact-match subrules? */
1232 struct tag_set revalidate_set; /* Set of tags to revalidate. */
1236 revalidate_rule(struct wx *wx, struct wx_rule *rule)
1238 const flow_t *flow = &rule->wr.cr.flow;
1240 COVERAGE_INC(wx_revalidate_rule);
1242 struct wx_rule *super;
1243 super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
1245 wx_rule_remove(wx, rule);
1247 } else if (super != rule->super) {
1248 COVERAGE_INC(wx_revalidate_moved);
1249 list_remove(&rule->list);
1250 list_push_back(&super->list, &rule->list);
1251 rule->super = super;
1252 rule->wr.hard_timeout = super->wr.hard_timeout;
1253 rule->wr.idle_timeout = super->wr.idle_timeout;
1254 rule->wr.created = super->wr.created;
1259 wx_rule_update_actions(wx, rule);
1264 revalidate_cb(struct cls_rule *sub_, void *cbdata_)
1266 struct wx_rule *sub = wx_rule_cast(sub_);
1267 struct revalidate_cbdata *cbdata = cbdata_;
1269 if (cbdata->revalidate_all
1270 || (cbdata->revalidate_subrules && sub->super)
1271 || tag_set_intersects(&cbdata->revalidate_set, sub->tags)) {
1272 revalidate_rule(cbdata->wx, sub);
1278 wx_run_one(struct wx *wx)
1280 if (time_msec() >= wx->next_expiration) {
1281 COVERAGE_INC(wx_expiration);
1282 wx->next_expiration = time_msec() + 1000;
1285 classifier_for_each(&wx->cls, CLS_INC_ALL, expire_rule, wx);
1287 /* XXX account_checkpoint_cb */
1290 if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
1291 struct revalidate_cbdata cbdata;
1293 cbdata.revalidate_all = wx->revalidate_all;
1294 cbdata.revalidate_subrules = wx->need_revalidate;
1295 cbdata.revalidate_set = wx->revalidate_set;
1296 tag_set_init(&wx->revalidate_set);
1297 COVERAGE_INC(wx_revalidate);
1298 classifier_for_each(&wx->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
1299 wx->need_revalidate = false;
1308 LIST_FOR_EACH (wx, list_node, &all_wx) {
1315 wx_wait_one(struct wx *wx)
1317 if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
1318 poll_immediate_wake();
1319 } else if (wx->next_expiration != LLONG_MAX) {
1320 poll_timer_wait_until(wx->next_expiration);
1329 LIST_FOR_EACH (wx, list_node, &all_wx) {
1335 static int wx_flow_flush(struct wdp *);
1338 wx_enumerate(const struct wdp_class *wdp_class, struct svec *all_wdps)
1340 struct svec names = SVEC_EMPTY_INITIALIZER;
1341 int error = xf_enumerate_names(wdp_class->type, &names);
1342 svec_move(all_wdps, &names);
1347 wx_open(const struct wdp_class *wdp_class, const char *name, bool create,
1354 ? xfif_create_and_open(name, wdp_class->type, &xfif)
1355 : xfif_open(name, wdp_class->type, &xfif));
1359 wx = xzalloc(sizeof *wx);
1360 list_push_back(&all_wx, &wx->list_node);
1361 wdp_init(&wx->wdp, wdp_class, name, 0, 0);
1363 classifier_init(&wx->cls);
1364 wx->netdev_monitor = netdev_monitor_create();
1365 hmap_init(&wx->ports);
1366 shash_init(&wx->port_by_name);
1367 wx->next_expiration = time_msec() + 1000;
1368 tag_set_init(&wx->revalidate_set);
1372 wx->ofhooks = &default_ofhooks;
1374 wx->ml = mac_learning_create();
1376 list_init(&wx->ctl_packets);
1385 wx_close(struct wdp *wdp)
1387 struct wx *wx = wx_cast(wdp);
1390 xfif_close(wx->xfif);
1391 classifier_destroy(&wx->cls);
1392 netdev_monitor_destroy(wx->netdev_monitor);
1393 list_remove(&wx->list_node);
1394 mac_learning_destroy(wx->ml);
1395 hmap_destroy(&wx->ports);
1396 shash_destroy(&wx->port_by_name);
1401 wx_get_all_names(const struct wdp *wdp, struct svec *all_names)
1403 struct wx *wx = wx_cast(wdp);
1405 return xfif_get_all_names(wx->xfif, all_names);
1409 wx_destroy(struct wdp *wdp)
1411 struct wx *wx = wx_cast(wdp);
1413 return xfif_delete(wx->xfif);
1417 wx_get_features(const struct wdp *wdp, struct ofpbuf **featuresp)
1419 struct wx *wx = wx_cast(wdp);
1420 struct ofp_switch_features *osf;
1422 struct wx_port *port;
1424 buf = ofpbuf_new(sizeof *osf);
1425 osf = ofpbuf_put_zeros(buf, sizeof *osf);
1427 osf->capabilities = htonl(OFPC_ARP_MATCH_IP);
1428 osf->actions = htonl((1u << OFPAT_OUTPUT) |
1429 (1u << OFPAT_SET_VLAN_VID) |
1430 (1u << OFPAT_SET_VLAN_PCP) |
1431 (1u << OFPAT_STRIP_VLAN) |
1432 (1u << OFPAT_SET_DL_SRC) |
1433 (1u << OFPAT_SET_DL_DST) |
1434 (1u << OFPAT_SET_NW_SRC) |
1435 (1u << OFPAT_SET_NW_DST) |
1436 (1u << OFPAT_SET_NW_TOS) |
1437 (1u << OFPAT_SET_TP_SRC) |
1438 (1u << OFPAT_SET_TP_DST) |
1439 (1u << OFPAT_ENQUEUE));
1441 HMAP_FOR_EACH (port, hmap_node, &wx->ports) {
1442 const struct ofp_phy_port *opp = &port->wdp_port.opp;
1443 hton_ofp_phy_port(ofpbuf_put(buf, opp, sizeof *opp));
1451 count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
1453 struct wx_rule *rule = wx_rule_cast(cls_rule);
1454 int *n_subrules = n_subrules_;
1463 wx_get_stats(const struct wdp *wdp, struct wdp_stats *stats)
1465 struct wx *wx = wx_cast(wdp);
1466 struct xflow_stats xflow_stats;
1469 error = xfif_get_xf_stats(wx->xfif, &xflow_stats);
1470 stats->max_ports = xflow_stats.max_ports;
1475 wx_get_table_stats(const struct wdp *wdp, struct ofpbuf *stats)
1477 struct wx *wx = wx_cast(wdp);
1478 struct xflow_stats xflow_stats;
1479 struct ofp_table_stats *exact, *wild;
1482 xfif_get_xf_stats(wx->xfif, &xflow_stats);
1483 /* XXX should pass up errors, but there are no appropriate OpenFlow error
1487 classifier_for_each(&wx->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
1489 exact = ofpbuf_put_zeros(stats, sizeof *exact);
1490 exact->table_id = TABLEID_HASH;
1491 strcpy(exact->name, "exact");
1492 exact->wildcards = htonl(0);
1493 exact->max_entries = htonl(MIN(WX_MAX_EXACT, xflow_stats.max_capacity));
1494 exact->active_count = htonl(classifier_count_exact(&wx->cls) - n_subrules);
1495 exact->lookup_count = htonll(xflow_stats.n_hit + xflow_stats.n_missed);
1496 exact->matched_count = htonll(xflow_stats.n_hit);
1498 wild = ofpbuf_put_zeros(stats, sizeof *exact);
1499 wild->table_id = TABLEID_CLASSIFIER;
1500 strcpy(wild->name, "classifier");
1501 wild->wildcards = htonl(OVSFW_ALL);
1502 wild->max_entries = htonl(WX_MAX_WILD);
1503 wild->active_count = htonl(classifier_count_wild(&wx->cls));
1504 wild->lookup_count = htonll(0); /* XXX */
1505 wild->matched_count = htonll(0); /* XXX */
1511 wx_get_drop_frags(const struct wdp *wdp, bool *drop_frags)
1513 struct wx *wx = wx_cast(wdp);
1515 return xfif_get_drop_frags(wx->xfif, drop_frags);
1519 wx_set_drop_frags(struct wdp *wdp, bool drop_frags)
1521 struct wx *wx = wx_cast(wdp);
1523 return xfif_set_drop_frags(wx->xfif, drop_frags);
1527 wx_port_add(struct wdp *wdp, const char *devname,
1528 bool internal, uint16_t *port_no)
1530 struct wx *wx = wx_cast(wdp);
1531 uint16_t xflow_flags = internal ? XFLOW_PORT_INTERNAL : 0;
1532 return xfif_port_add(wx->xfif, devname, xflow_flags, port_no);
1536 wx_port_del(struct wdp *wdp, uint16_t port_no)
1538 struct wx *wx = wx_cast(wdp);
1540 return xfif_port_del(wx->xfif, port_no);
1544 wx_answer_port_query(const struct wx_port *port, struct wdp_port *portp)
1547 wdp_port_copy(portp, &port->wdp_port);
1555 wx_port_query_by_number(const struct wdp *wdp, uint16_t port_no,
1556 struct wdp_port *portp)
1558 struct wx *wx = wx_cast(wdp);
1559 struct wx_port *wx_port = wx_port_get(wx, ofp_port_to_xflow_port(port_no));
1561 return wx_answer_port_query(wx_port, portp);
1565 wx_port_query_by_name(const struct wdp *wdp, const char *devname,
1566 struct wdp_port *portp)
1568 struct wx *wx = wx_cast(wdp);
1570 return wx_answer_port_query(shash_find_data(&wx->port_by_name, devname),
1575 wx_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config)
1577 struct wx *wx = wx_cast(wdp);
1578 struct wx_port *port;
1579 struct ofp_phy_port *opp;
1582 port = wx_port_get(wx, ofp_port_to_xflow_port(port_no));
1586 opp = &port->wdp_port.opp;
1587 changes = config ^ opp->config;
1589 if (changes & OFPPC_PORT_DOWN) {
1590 struct netdev *netdev = port->wdp_port.netdev;
1593 if (config & OFPPC_PORT_DOWN) {
1594 error = netdev_turn_flags_off(netdev, NETDEV_UP, true);
1596 error = netdev_turn_flags_on(netdev, NETDEV_UP, true);
1599 opp->config ^= OFPPC_PORT_DOWN;
1603 #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
1604 if (changes & REVALIDATE_BITS) {
1605 COVERAGE_INC(wx_costly_flags);
1606 opp->config ^= changes & REVALIDATE_BITS;
1607 wx->need_revalidate = true;
1609 #undef REVALIDATE_BITS
1611 if (changes & OFPPC_NO_FLOOD) {
1612 opp->config ^= OFPPC_NO_FLOOD;
1613 wx_port_refresh_groups(wx);
1616 if (changes & OFPPC_NO_PACKET_IN) {
1617 opp->config ^= OFPPC_NO_PACKET_IN;
1624 wx_port_list(const struct wdp *wdp,
1625 struct wdp_port **wdp_portsp, size_t *n_wdp_portsp)
1627 struct wx *wx = wx_cast(wdp);
1628 struct xflow_port *xflow_ports;
1629 size_t n_xflow_ports;
1630 struct wdp_port *wdp_ports;
1635 /* Instead of using the cached set of ports kept in wdp->ports, this
1636 * queries the underlying xfif. This isn't really desirable, but otherwise
1637 * a wx_port_add() or wx_port_del() isn't reflected in the list of ports
1638 * until the next time that ofproto_run() calls wx_port_poll() below. That
1639 * confuses bridge.c's reconfiguration code, which expects to have the
1640 * port list updated immediately. */
1642 error = xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports);
1648 wdp_ports = xmalloc(n_xflow_ports * sizeof *wdp_ports);
1649 for (i = 0; i < n_xflow_ports; i++) {
1650 if (wx_make_wdp_port(&xflow_ports[i], &wdp_ports[n_wdp_ports])) {
1656 *wdp_portsp = wdp_ports;
1657 *n_wdp_portsp = n_wdp_ports;
1663 wx_port_poll(struct wdp *wdp, wdp_port_poll_cb_func *cb, void *aux)
1665 struct wx *wx = wx_cast(wdp);
1671 while ((error = xfif_port_poll(wx->xfif, &devname)) != EAGAIN) {
1672 wx_port_process_change(wx, error, devname, cb, aux);
1673 if (error && error != ENOBUFS) {
1677 while ((error = netdev_monitor_poll(wx->netdev_monitor,
1678 &devname)) != EAGAIN) {
1679 wx_port_process_change(wx, error, devname, cb, aux);
1680 if (error && error != ENOBUFS) {
1688 wx_port_poll_wait(const struct wdp *wdp)
1690 struct wx *wx = wx_cast(wdp);
1692 xfif_port_poll_wait(wx->xfif);
1693 netdev_monitor_poll_wait(wx->netdev_monitor);
1697 static struct wdp_rule *
1698 wx_flow_get(const struct wdp *wdp, const flow_t *flow, unsigned int include)
1700 struct wx *wx = wx_cast(wdp);
1701 struct wx_rule *rule;
1704 table_id = flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
1705 if (!(include & (1u << table_id))) {
1709 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, flow));
1710 return rule && !wx_rule_is_hidden(rule) ? &rule->wr : NULL;
1713 static struct wdp_rule *
1714 wx_flow_match(const struct wdp *wdp, const flow_t *flow)
1716 struct wx *wx = wx_cast(wdp);
1717 struct wx_rule *rule;
1719 rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
1721 if (wx_rule_is_hidden(rule)) {
1730 struct wx_for_each_thunk_aux {
1731 wdp_flow_cb_func *client_callback;
1736 wx_for_each_thunk(struct cls_rule *cls_rule, void *aux_)
1738 struct wx_for_each_thunk_aux *aux = aux_;
1739 struct wx_rule *rule = wx_rule_cast(cls_rule);
1741 if (!wx_rule_is_hidden(rule)) {
1742 return aux->client_callback(&rule->wr, aux->client_aux);
1748 wx_flow_for_each_match(const struct wdp *wdp, const flow_t *target,
1749 unsigned int include,
1750 wdp_flow_cb_func *client_callback, void *client_aux)
1752 struct wx *wx = wx_cast(wdp);
1753 struct wx_for_each_thunk_aux aux;
1757 if (include & (1u << TABLEID_HASH)) {
1758 cls_include |= CLS_INC_EXACT;
1760 if (include & (1u << TABLEID_CLASSIFIER)) {
1761 cls_include |= CLS_INC_WILD;
1764 aux.client_callback = client_callback;
1765 aux.client_aux = client_aux;
1766 return classifier_for_each_match(&wx->cls, target, cls_include,
1767 wx_for_each_thunk, &aux);
1770 /* Obtains statistic counters for 'rule' within 'wx' and stores them into
1771 * '*stats'. If 'rule' is a wildcarded rule, the returned statistic include
1772 * statistics for all of 'rule''s subrules. */
1774 query_stats(struct wx *wx, struct wx_rule *rule, struct wdp_flow_stats *stats)
1776 struct wx_rule *subrule;
1777 struct xflow_flow *xflow_flows;
1778 size_t n_xflow_flows;
1780 /* Start from historical data for 'rule' itself that are no longer tracked
1781 * by the datapath. This counts, for example, subrules that have
1783 stats->n_packets = rule->packet_count;
1784 stats->n_bytes = rule->byte_count;
1785 stats->inserted = rule->wr.created;
1786 stats->used = LLONG_MIN;
1787 stats->tcp_flags = 0;
1790 /* Prepare to ask the datapath for statistics on 'rule', or if it is
1791 * wildcarded then on all of its subrules.
1793 * Also, add any statistics that are not tracked by the datapath for each
1794 * subrule. This includes, for example, statistics for packets that were
1795 * executed "by hand" by ofproto via xfif_execute() but must be accounted
1797 n_xflow_flows = rule->wr.cr.flow.wildcards ? list_size(&rule->list) : 1;
1798 xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows);
1799 if (rule->wr.cr.flow.wildcards) {
1801 LIST_FOR_EACH (subrule, list, &rule->list) {
1802 xflow_key_from_flow(&xflow_flows[i++].key, &subrule->wr.cr.flow);
1803 stats->n_packets += subrule->packet_count;
1804 stats->n_bytes += subrule->byte_count;
1807 xflow_key_from_flow(&xflow_flows[0].key, &rule->wr.cr.flow);
1810 /* Fetch up-to-date statistics from the datapath and add them in. */
1811 if (!xfif_flow_get_multiple(wx->xfif, xflow_flows, n_xflow_flows)) {
1813 for (i = 0; i < n_xflow_flows; i++) {
1814 struct xflow_flow *xflow_flow = &xflow_flows[i];
1817 stats->n_packets += xflow_flow->stats.n_packets;
1818 stats->n_bytes += xflow_flow->stats.n_bytes;
1819 used = xflow_flow_stats_to_msec(&xflow_flow->stats);
1820 if (used > stats->used) {
1823 stats->tcp_flags |= xflow_flow->stats.tcp_flags;
1830 wx_flow_get_stats(const struct wdp *wdp,
1831 const struct wdp_rule *wdp_rule,
1832 struct wdp_flow_stats *stats)
1834 struct wx *wx = wx_cast(wdp);
1835 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1837 query_stats(wx, rule, stats);
1842 wx_flow_overlaps(const struct wdp *wdp, const flow_t *flow)
1844 struct wx *wx = wx_cast(wdp);
1846 /* XXX overlap with a subrule? */
1847 return classifier_rule_overlaps(&wx->cls, flow);
1851 wx_flow_put(struct wdp *wdp, const struct wdp_flow_put *put,
1852 struct wdp_flow_stats *old_stats, struct wdp_rule **rulep)
1854 struct wx *wx = wx_cast(wdp);
1855 struct wx_rule *rule;
1856 uint8_t ofp_table_id;
1858 ofp_table_id = put->flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
1859 if (put->ofp_table_id != 0xff && put->ofp_table_id != ofp_table_id) {
1860 return ofp_mkerr_nicira(OFPET_FLOW_MOD_FAILED, NXFMFC_BAD_TABLE_ID);
1863 rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, put->flow));
1864 if (rule && wx_rule_is_hidden(rule)) {
1869 if (!(put->flags & WDP_PUT_MODIFY)) {
1873 if (!(put->flags & WDP_PUT_CREATE)) {
1876 if ((put->flow->wildcards
1877 ? classifier_count_wild(&wx->cls) >= WX_MAX_WILD
1878 : classifier_count_exact(&wx->cls) >= WX_MAX_EXACT)) {
1879 /* XXX subrules should not count against exact-match limit */
1880 return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL);
1884 rule = wx_rule_create(NULL, put->actions, put->n_actions,
1885 put->idle_timeout, put->hard_timeout);
1886 cls_rule_from_flow(put->flow, &rule->wr.cr);
1887 rule->wr.ofp_table_id = ofp_table_id;
1888 wx_rule_insert(wx, rule, NULL, 0);
1892 memset(old_stats, 0, sizeof *old_stats);
1902 wx_flow_delete(struct wdp *wdp, struct wdp_rule *wdp_rule,
1903 struct wdp_flow_stats *final_stats)
1905 struct wx *wx = wx_cast(wdp);
1906 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1908 wx_rule_remove(wx, rule);
1910 memset(final_stats, 0, sizeof *final_stats); /* XXX */
1916 wx_flush_rule(struct cls_rule *cls_rule, void *wx_)
1918 struct wx_rule *rule = wx_rule_cast(cls_rule);
1919 struct wx *wx = wx_;
1921 /* Mark the flow as not installed, even though it might really be
1922 * installed, so that wx_rule_remove() doesn't bother trying to uninstall
1923 * it. There is no point in uninstalling it individually since we are
1924 * about to blow away all the flows with xfif_flow_flush(). */
1925 rule->installed = false;
1927 wx_rule_remove(wx, rule);
1933 wx_flow_flush(struct wdp *wdp)
1935 struct wx *wx = wx_cast(wdp);
1937 COVERAGE_INC(wx_flow_flush);
1938 classifier_for_each(&wx->cls, CLS_INC_ALL, wx_flush_rule, wx);
1939 xfif_flow_flush(wx->xfif);
1944 wx_execute(struct wdp *wdp, uint16_t in_port,
1945 const union ofp_action actions[], int n_actions,
1946 const struct ofpbuf *packet)
1948 struct wx *wx = wx_cast(wdp);
1949 struct xflow_actions xflow_actions;
1953 flow_extract((struct ofpbuf *) packet, 0, in_port, &flow);
1954 error = wx_xlate_actions(wx, actions, n_actions, &flow, packet,
1955 NULL, &xflow_actions, NULL);
1959 return xfif_execute(wx->xfif, ofp_port_to_xflow_port(in_port),
1960 xflow_actions.actions, xflow_actions.n_actions,
1965 wx_flow_inject(struct wdp *wdp, struct wdp_rule *wdp_rule,
1966 uint16_t in_port, const struct ofpbuf *packet)
1968 struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
1971 error = wx_execute(wdp, in_port, rule->wr.actions, rule->wr.n_actions,
1974 rule->packet_count++;
1975 rule->byte_count += packet->size;
1976 rule->used = time_msec();
1982 wx_recv_get_mask(const struct wdp *wdp, int *listen_mask)
1984 struct wx *wx = wx_cast(wdp);
1985 int xflow_listen_mask;
1988 error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
1991 if (xflow_listen_mask & XFLOWL_MISS) {
1992 *listen_mask |= 1 << WDP_CHAN_MISS;
1994 if (xflow_listen_mask & XFLOWL_ACTION) {
1995 *listen_mask |= 1 << WDP_CHAN_ACTION;
1997 if (xflow_listen_mask & XFLOWL_SFLOW) {
1998 *listen_mask |= 1 << WDP_CHAN_SFLOW;
2005 wx_recv_set_mask(struct wdp *wdp, int listen_mask)
2007 struct wx *wx = wx_cast(wdp);
2008 int xflow_listen_mask;
2010 wx->wdp_listen_mask = listen_mask;
2012 xflow_listen_mask = 0;
2013 if (listen_mask & (1 << WDP_CHAN_MISS)) {
2014 xflow_listen_mask |= XFLOWL_MISS;
2016 if (listen_mask & (1 << WDP_CHAN_ACTION)) {
2017 xflow_listen_mask |= XFLOWL_ACTION;
2019 wx_purge_ctl_packets__(wx);
2021 if (listen_mask & (1 << WDP_CHAN_SFLOW)) {
2022 xflow_listen_mask |= XFLOWL_SFLOW;
2025 return xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2029 wx_get_sflow_probability(const struct wdp *wdp, uint32_t *probability)
2031 struct wx *wx = wx_cast(wdp);
2033 return xfif_get_sflow_probability(wx->xfif, probability);
2037 wx_set_sflow_probability(struct wdp *wdp, uint32_t probability)
2039 struct wx *wx = wx_cast(wdp);
2041 return xfif_set_sflow_probability(wx->xfif, probability);
2045 wx_translate_xflow_msg(struct xflow_msg *msg, struct ofpbuf *payload,
2046 struct wdp_packet *packet)
2048 packet->in_port = xflow_port_to_ofp_port(msg->port);
2049 packet->send_len = 0;
2052 switch (msg->type) {
2053 case _XFLOWL_MISS_NR:
2054 packet->channel = WDP_CHAN_MISS;
2055 packet->payload = payload;
2056 packet->tun_id = msg->arg;
2059 case _XFLOWL_ACTION_NR:
2060 packet->channel = WDP_CHAN_ACTION;
2061 packet->payload = payload;
2062 packet->send_len = msg->arg;
2065 case _XFLOWL_SFLOW_NR:
2067 ofpbuf_delete(payload);
2071 VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32,
2073 ofpbuf_delete(payload);
2078 static const uint8_t *
2079 get_local_mac(const struct wx *wx)
2081 const struct wx_port *port = wx_port_get(wx, XFLOWP_LOCAL);
2082 return port ? port->wdp_port.opp.hw_addr : NULL;
2085 /* Returns true if 'packet' is a DHCP reply to the local port. Such a reply
2086 * should be sent to the local port regardless of the flow table.
2088 * We only need this, strictly speaking, when in-band control is turned on. */
2090 wx_is_local_dhcp_reply(const struct wx *wx,
2091 const flow_t *flow, const struct ofpbuf *packet)
2093 if (flow->dl_type == htons(ETH_TYPE_IP)
2094 && flow->nw_proto == IP_TYPE_UDP
2095 && flow->tp_src == htons(DHCP_SERVER_PORT)
2096 && flow->tp_dst == htons(DHCP_CLIENT_PORT)
2099 const uint8_t *local_mac = get_local_mac(wx);
2100 struct dhcp_header *dhcp = ofpbuf_at(
2101 packet, (char *)packet->l7 - (char *)packet->data, sizeof *dhcp);
2102 return dhcp && local_mac && eth_addr_equals(dhcp->chaddr, local_mac);
2108 /* Determines whether 'payload' that arrived on 'in_port' is included in any of
2109 * the flows in 'wx''s OpenFlow flow table. If so, then it adds a
2110 * corresponding flow to the xfif's exact-match flow table, taking ownership of
2111 * 'payload', and returns true. If not, it returns false and the caller
2112 * retains ownership of 'payload'. */
2114 wx_explode_rule(struct wx *wx, uint16_t in_port, struct ofpbuf *payload)
2116 struct wx_rule *rule;
2119 flow_extract(payload, 0, xflow_port_to_ofp_port(in_port), &flow);
2121 if (wx_is_local_dhcp_reply(wx, &flow, payload)) {
2122 union xflow_action action;
2124 memset(&action, 0, sizeof(action));
2125 action.output.type = XFLOWAT_OUTPUT;
2126 action.output.port = XFLOWP_LOCAL;
2127 xfif_execute(wx->xfif, in_port, &action, 1, payload);
2130 rule = wx_rule_lookup_valid(wx, &flow);
2135 if (rule->wr.cr.flow.wildcards) {
2136 rule = wx_rule_create_subrule(wx, rule, &flow);
2137 wx_rule_make_actions(wx, rule, payload);
2139 if (!rule->may_install) {
2140 /* The rule is not installable, that is, we need to process every
2141 * packet, so process the current packet and set its actions into
2143 wx_rule_make_actions(wx, rule, payload);
2145 /* XXX revalidate rule if it needs it */
2149 wx_rule_execute(wx, rule, payload, &flow);
2150 wx_rule_reinstall(wx, rule);
2156 wx_recv(struct wdp *wdp, struct wdp_packet *packet)
2158 struct wx *wx = wx_cast(wdp);
2161 if (wx->n_ctl_packets) {
2162 struct wdp_packet *wdp_packet;
2164 wdp_packet = CONTAINER_OF(list_pop_front(&wx->ctl_packets),
2165 struct wdp_packet, list);
2166 wx->n_ctl_packets--;
2168 *packet = *wdp_packet;
2174 /* XXX need to avoid 50*50 potential cost for caller. */
2175 for (i = 0; i < 50; i++) {
2176 struct xflow_msg *msg;
2180 error = xfif_recv(wx->xfif, &buf);
2185 msg = ofpbuf_pull(buf, sizeof *msg);
2186 if (msg->type != _XFLOWL_MISS_NR
2187 || !wx_explode_rule(wx, msg->port, buf)) {
2188 return wx_translate_xflow_msg(msg, buf, packet);
2195 wx_recv_purge_queue__(struct wx *wx, int max, int xflow_listen_mask,
2200 error = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2204 while (max > 0 && (error = xfif_recv(wx->xfif, &buf)) == 0) {
2209 if (error && error != EAGAIN) {
2215 wx_purge_ctl_packets__(struct wx *wx)
2217 struct wdp_packet *this, *next;
2219 LIST_FOR_EACH_SAFE (this, next, list, &wx->ctl_packets) {
2220 list_remove(&this->list);
2221 ofpbuf_delete(this->payload);
2224 wx->n_ctl_packets = 0;
2228 wx_recv_purge(struct wdp *wdp)
2230 struct wx *wx = wx_cast(wdp);
2231 struct xflow_stats xflow_stats;
2232 int xflow_listen_mask;
2235 xfif_get_xf_stats(wx->xfif, &xflow_stats);
2237 error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
2238 if (error || !(xflow_listen_mask & XFLOWL_ALL)) {
2242 if (xflow_listen_mask & XFLOWL_MISS) {
2243 wx_recv_purge_queue__(wx, xflow_stats.max_miss_queue, XFLOWL_MISS,
2246 if (xflow_listen_mask & XFLOWL_ACTION) {
2247 wx_recv_purge_queue__(wx, xflow_stats.max_action_queue, XFLOWL_ACTION,
2249 wx_purge_ctl_packets__(wx);
2251 if (xflow_listen_mask & XFLOWL_SFLOW) {
2252 wx_recv_purge_queue__(wx, xflow_stats.max_sflow_queue, XFLOWL_SFLOW,
2256 retval = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
2257 return retval ? retval : error;
2262 wx_recv_wait(struct wdp *wdp)
2264 struct wx *wx = wx_cast(wdp);
2266 if (wx->n_ctl_packets) {
2267 poll_immediate_wake();
2269 xfif_recv_wait(wx->xfif);
2274 wx_set_ofhooks(struct wdp *wdp, const struct ofhooks *ofhooks, void *aux)
2276 struct wx *wx = wx_cast(wdp);
2278 if (wx->ofhooks == &default_ofhooks) {
2279 mac_learning_destroy(wx->ml);
2283 wx->ofhooks = ofhooks;
2289 wx_revalidate(struct wdp *wdp, tag_type tag)
2291 struct wx *wx = wx_cast(wdp);
2293 tag_set_add(&wx->revalidate_set, tag);
2297 wx_revalidate_all(struct wdp *wdp)
2299 struct wx *wx = wx_cast(wdp);
2301 wx->revalidate_all = true;
2304 static void wx_port_update(struct wx *, const char *devname,
2305 wdp_port_poll_cb_func *cb, void *aux);
2306 static void wx_port_reinit(struct wx *, wdp_port_poll_cb_func *cb, void *aux);
2309 wx_port_process_change(struct wx *wx, int error, char *devname,
2310 wdp_port_poll_cb_func *cb, void *aux)
2312 if (error == ENOBUFS) {
2313 wx_port_reinit(wx, cb, aux);
2314 } else if (!error) {
2315 wx_port_update(wx, devname, cb, aux);
2321 wx_port_refresh_group(struct wx *wx, unsigned int group)
2325 struct wx_port *port;
2327 assert(group == WX_GROUP_ALL || group == WX_GROUP_FLOOD);
2329 ports = xmalloc(hmap_count(&wx->ports) * sizeof *ports);
2331 HMAP_FOR_EACH (port, hmap_node, &wx->ports) {
2332 const struct ofp_phy_port *opp = &port->wdp_port.opp;
2333 if (group == WX_GROUP_ALL || !(opp->config & OFPPC_NO_FLOOD)) {
2334 ports[n_ports++] = port->xflow_port;
2337 xfif_port_group_set(wx->xfif, group, ports, n_ports);
2344 wx_port_refresh_groups(struct wx *wx)
2346 wx_port_refresh_group(wx, WX_GROUP_FLOOD);
2347 wx_port_refresh_group(wx, WX_GROUP_ALL);
2351 wx_port_reinit(struct wx *wx, wdp_port_poll_cb_func *cb, void *aux)
2353 struct svec devnames;
2354 struct wx_port *wx_port;
2355 struct xflow_port *xflow_ports;
2356 size_t n_xflow_ports;
2359 svec_init(&devnames);
2360 HMAP_FOR_EACH (wx_port, hmap_node, &wx->ports) {
2361 svec_add (&devnames, (char *) wx_port->wdp_port.opp.name);
2363 xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports);
2364 for (i = 0; i < n_xflow_ports; i++) {
2365 svec_add(&devnames, xflow_ports[i].devname);
2369 svec_sort_unique(&devnames);
2370 for (i = 0; i < devnames.n; i++) {
2371 wx_port_update(wx, devnames.names[i], cb, aux);
2373 svec_destroy(&devnames);
2375 wx_port_refresh_groups(wx);
2379 wx_make_wdp_port(const struct xflow_port *xflow_port,
2380 struct wdp_port *wdp_port)
2382 struct netdev_options netdev_options;
2383 enum netdev_flags flags;
2384 struct netdev *netdev;
2388 memset(&netdev_options, 0, sizeof netdev_options);
2389 netdev_options.name = xflow_port->devname;
2390 netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
2392 error = netdev_open(&netdev_options, &netdev);
2394 VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
2395 "cannot be opened (%s)",
2396 xflow_port->devname, xflow_port->port,
2397 xflow_port->devname, strerror(error));
2401 wdp_port->netdev = netdev;
2402 wdp_port->opp.port_no = xflow_port_to_ofp_port(xflow_port->port);
2403 netdev_get_etheraddr(netdev, wdp_port->opp.hw_addr);
2404 strncpy((char *) wdp_port->opp.name, xflow_port->devname,
2405 sizeof wdp_port->opp.name);
2406 wdp_port->opp.name[sizeof wdp_port->opp.name - 1] = '\0';
2408 netdev_get_flags(netdev, &flags);
2409 wdp_port->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
2411 netdev_get_carrier(netdev, &carrier);
2412 wdp_port->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
2414 netdev_get_features(netdev,
2415 &wdp_port->opp.curr, &wdp_port->opp.advertised,
2416 &wdp_port->opp.supported, &wdp_port->opp.peer);
2418 wdp_port->devname = xstrdup(xflow_port->devname);
2419 wdp_port->internal = (xflow_port->flags & XFLOW_PORT_INTERNAL) != 0;
2423 static struct wx_port *
2424 make_wx_port(const struct xflow_port *xflow_port)
2426 struct wdp_port wdp_port;
2427 struct wx_port *wx_port;
2429 if (!wx_make_wdp_port(xflow_port, &wdp_port)) {
2433 wx_port = xmalloc(sizeof *wx_port);
2434 wx_port->wdp_port = wdp_port;
2435 wx_port->xflow_port = xflow_port->port;
2440 wx_port_conflicts(const struct wx *wx, const struct xflow_port *xflow_port)
2442 if (wx_port_get(wx, xflow_port->port)) {
2443 VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
2446 } else if (shash_find(&wx->port_by_name, xflow_port->devname)) {
2447 VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
2448 xflow_port->devname);
2456 wx_port_equal(const struct wx_port *a_, const struct wx_port *b_)
2458 const struct ofp_phy_port *a = &a_->wdp_port.opp;
2459 const struct ofp_phy_port *b = &b_->wdp_port.opp;
2461 BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
2462 return (a->port_no == b->port_no
2463 && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
2464 && !strcmp((char *) a->name, (char *) b->name)
2465 && a->state == b->state
2466 && a->config == b->config
2467 && a->curr == b->curr
2468 && a->advertised == b->advertised
2469 && a->supported == b->supported
2470 && a->peer == b->peer);
2474 wx_port_install(struct wx *wx, struct wx_port *wx_port)
2476 const struct ofp_phy_port *opp = &wx_port->wdp_port.opp;
2477 uint16_t xflow_port = ofp_port_to_xflow_port(opp->port_no);
2478 const char *name = (const char *) opp->name;
2480 netdev_monitor_add(wx->netdev_monitor, wx_port->wdp_port.netdev);
2481 hmap_insert(&wx->ports, &wx_port->hmap_node, hash_int(xflow_port, 0));
2482 shash_add(&wx->port_by_name, name, wx_port);
2486 wx_port_remove(struct wx *wx, struct wx_port *wx_port)
2488 const struct ofp_phy_port *opp = &wx_port->wdp_port.opp;
2489 const char *name = (const char *) opp->name;
2491 netdev_monitor_remove(wx->netdev_monitor, wx_port->wdp_port.netdev);
2492 hmap_remove(&wx->ports, &wx_port->hmap_node);
2493 shash_delete(&wx->port_by_name, shash_find(&wx->port_by_name, name));
2497 wx_port_free(struct wx_port *wx_port)
2500 wdp_port_free(&wx_port->wdp_port);
2506 wx_port_update(struct wx *wx, const char *devname,
2507 wdp_port_poll_cb_func *cb, void *aux)
2509 struct xflow_port xflow_port;
2510 struct wx_port *old_wx_port;
2511 struct wx_port *new_wx_port;
2514 COVERAGE_INC(wx_update_port);
2516 /* Query the datapath for port information. */
2517 error = xfif_port_query_by_name(wx->xfif, devname, &xflow_port);
2519 /* Find the old wx_port. */
2520 old_wx_port = shash_find_data(&wx->port_by_name, devname);
2523 /* There's no port named 'devname' but there might be a port with
2524 * the same port number. This could happen if a port is deleted
2525 * and then a new one added in its place very quickly, or if a port
2526 * is renamed. In the former case we want to send an OFPPR_DELETE
2527 * and an OFPPR_ADD, and in the latter case we want to send a
2528 * single OFPPR_MODIFY. We can distinguish the cases by comparing
2529 * the old port's ifindex against the new port, or perhaps less
2530 * reliably but more portably by comparing the old port's MAC
2531 * against the new port's MAC. However, this code isn't that smart
2532 * and always sends an OFPPR_MODIFY (XXX). */
2533 old_wx_port = wx_port_get(wx, xflow_port.port);
2535 } else if (error != ENOENT && error != ENODEV) {
2536 VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error "
2537 "%s", strerror(error));
2541 /* Create a new wx_port. */
2542 new_wx_port = !error ? make_wx_port(&xflow_port) : NULL;
2544 /* Eliminate a few pathological cases. */
2545 if (!old_wx_port && !new_wx_port) {
2547 } else if (old_wx_port && new_wx_port) {
2548 /* Most of the 'config' bits are OpenFlow soft state, but
2549 * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
2550 * OpenFlow bits from old_wx_port. (make_wx_port() only sets
2551 * OFPPC_PORT_DOWN and leaves the other bits 0.) */
2552 struct ofp_phy_port *new_opp = &new_wx_port->wdp_port.opp;
2553 struct ofp_phy_port *old_opp = &old_wx_port->wdp_port.opp;
2554 new_opp->config |= old_opp->config & ~OFPPC_PORT_DOWN;
2556 if (wx_port_equal(old_wx_port, new_wx_port)) {
2557 /* False alarm--no change. */
2558 wx_port_free(new_wx_port);
2563 /* Now deal with the normal cases. */
2565 wx_port_remove(wx, old_wx_port);
2568 wx_port_install(wx, new_wx_port);
2573 (*cb)(&new_wx_port->wdp_port.opp, OFPPR_ADD, aux);
2574 } else if (!new_wx_port) {
2575 (*cb)(&old_wx_port->wdp_port.opp, OFPPR_DELETE, aux);
2577 (*cb)(&new_wx_port->wdp_port.opp, OFPPR_MODIFY, aux);
2580 /* Update port groups. */
2581 wx_port_refresh_groups(wx);
2584 wx_port_free(old_wx_port);
2588 wx_port_init(struct wx *wx)
2590 struct xflow_port *ports;
2595 error = xfif_port_list(wx->xfif, &ports, &n_ports);
2600 for (i = 0; i < n_ports; i++) {
2601 const struct xflow_port *xflow_port = &ports[i];
2602 if (!wx_port_conflicts(wx, xflow_port)) {
2603 struct wx_port *wx_port = make_wx_port(xflow_port);
2605 wx_port_install(wx, wx_port);
2610 wx_port_refresh_groups(wx);
2614 /* Returns the port in 'wx' with xflow port number 'xflow_port'. */
2615 static struct wx_port *
2616 wx_port_get(const struct wx *wx, uint16_t xflow_port)
2618 struct wx_port *port;
2620 HMAP_FOR_EACH_IN_BUCKET (port, hmap_node, hash_int(xflow_port, 0),
2622 if (port->xflow_port == xflow_port) {
2630 wdp_xflow_register(void)
2632 static const struct wdp_class wdp_xflow_class = {
2648 wx_port_query_by_number,
2649 wx_port_query_by_name,
2656 wx_flow_for_each_match,
2666 wx_get_sflow_probability,
2667 wx_set_sflow_probability,
2676 static bool inited = false;
2689 xf_enumerate_types(&types);
2692 SVEC_FOR_EACH (i, type, &types) {
2693 struct wdp_class *class;
2695 class = xmalloc(sizeof *class);
2696 *class = wdp_xflow_class;
2697 class->type = xstrdup(type);
2702 if (!wdp_register_provider(class)) {
2707 svec_destroy(&types);
2711 default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
2712 struct xflow_actions *actions, tag_type *tags,
2713 uint16_t *nf_output_iface, void *wx_)
2715 struct wx *wx = wx_;
2718 /* Drop frames for reserved multicast addresses. */
2719 if (eth_addr_is_reserved(flow->dl_dst)) {
2723 /* Learn source MAC (but don't try to learn from revalidation). */
2724 if (packet != NULL) {
2725 tag_type rev_tag = mac_learning_learn(wx->ml, flow->dl_src,
2727 GRAT_ARP_LOCK_NONE);
2729 /* The log messages here could actually be useful in debugging,
2730 * so keep the rate limit relatively high. */
2731 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
2732 VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
2733 ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
2734 tag_set_add(&wx->revalidate_set, rev_tag);
2738 /* Determine output port. */
2739 out_port = mac_learning_lookup_tag(wx->ml, flow->dl_dst, 0, tags,
2742 add_output_group_action(actions, WX_GROUP_FLOOD, nf_output_iface);
2743 } else if (out_port != flow->in_port) {
2744 xflow_actions_add(actions, XFLOWAT_OUTPUT)->output.port = out_port;
2745 *nf_output_iface = out_port;
2753 static const struct ofhooks default_ofhooks = {
2755 default_normal_ofhook_cb,