2 * Copyright (c) 2009, 2010, 2011, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include "ofproto/ofproto-provider.h"
26 #include "byte-order.h"
31 #include "dynamic-string.h"
32 #include "fail-open.h"
36 #include "mac-learning.h"
37 #include "meta-flow.h"
38 #include "multipath.h"
45 #include "ofp-parse.h"
46 #include "ofp-print.h"
47 #include "ofproto-dpif-governor.h"
48 #include "ofproto-dpif-sflow.h"
49 #include "poll-loop.h"
51 #include "unaligned.h"
53 #include "vlan-bitmap.h"
56 VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
58 COVERAGE_DEFINE(ofproto_dpif_ctlr_action);
59 COVERAGE_DEFINE(ofproto_dpif_expired);
60 COVERAGE_DEFINE(ofproto_dpif_no_packet_in);
61 COVERAGE_DEFINE(ofproto_dpif_xlate);
62 COVERAGE_DEFINE(facet_changed_rule);
63 COVERAGE_DEFINE(facet_invalidated);
64 COVERAGE_DEFINE(facet_revalidate);
65 COVERAGE_DEFINE(facet_unexpected);
66 COVERAGE_DEFINE(facet_suppress);
68 /* Maximum depth of flow table recursion (due to resubmit actions) in a
69 * flow translation. */
70 #define MAX_RESUBMIT_RECURSION 32
72 /* Number of implemented OpenFlow tables. */
73 enum { N_TABLES = 255 };
74 BUILD_ASSERT_DECL(N_TABLES >= 1 && N_TABLES <= 255);
84 * - Do include packets and bytes from facets that have been deleted or
85 * whose own statistics have been folded into the rule.
87 * - Do include packets and bytes sent "by hand" that were accounted to
88 * the rule without any facet being involved (this is a rare corner
89 * case in rule_execute()).
91 * - Do not include packet or bytes that can be obtained from any facet's
92 * packet_count or byte_count member or that can be obtained from the
93 * datapath by, e.g., dpif_flow_get() for any subfacet.
95 uint64_t packet_count; /* Number of packets received. */
96 uint64_t byte_count; /* Number of bytes received. */
98 tag_type tag; /* Caches rule_calculate_tag() result. */
100 struct list facets; /* List of "struct facet"s. */
103 static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
105 return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
108 static struct rule_dpif *rule_dpif_lookup(struct ofproto_dpif *,
109 const struct flow *, uint8_t table);
111 static void rule_credit_stats(struct rule_dpif *,
112 const struct dpif_flow_stats *);
113 static void flow_push_stats(struct rule_dpif *, const struct flow *,
114 const struct dpif_flow_stats *);
115 static tag_type rule_calculate_tag(const struct flow *,
116 const struct flow_wildcards *,
118 static void rule_invalidate(const struct rule_dpif *);
120 #define MAX_MIRRORS 32
121 typedef uint32_t mirror_mask_t;
122 #define MIRROR_MASK_C(X) UINT32_C(X)
123 BUILD_ASSERT_DECL(sizeof(mirror_mask_t) * CHAR_BIT >= MAX_MIRRORS);
125 struct ofproto_dpif *ofproto; /* Owning ofproto. */
126 size_t idx; /* In ofproto's "mirrors" array. */
127 void *aux; /* Key supplied by ofproto's client. */
128 char *name; /* Identifier for log messages. */
130 /* Selection criteria. */
131 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
132 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
133 unsigned long *vlans; /* Bitmap of chosen VLANs, NULL selects all. */
135 /* Output (exactly one of out == NULL and out_vlan == -1 is true). */
136 struct ofbundle *out; /* Output port or NULL. */
137 int out_vlan; /* Output VLAN or -1. */
138 mirror_mask_t dup_mirrors; /* Bitmap of mirrors with the same output. */
141 int64_t packet_count; /* Number of packets sent. */
142 int64_t byte_count; /* Number of bytes sent. */
145 static void mirror_destroy(struct ofmirror *);
146 static void update_mirror_stats(struct ofproto_dpif *ofproto,
147 mirror_mask_t mirrors,
148 uint64_t packets, uint64_t bytes);
151 struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
152 struct ofproto_dpif *ofproto; /* Owning ofproto. */
153 void *aux; /* Key supplied by ofproto's client. */
154 char *name; /* Identifier for log messages. */
157 struct list ports; /* Contains "struct ofport"s. */
158 enum port_vlan_mode vlan_mode; /* VLAN mode */
159 int vlan; /* -1=trunk port, else a 12-bit VLAN ID. */
160 unsigned long *trunks; /* Bitmap of trunked VLANs, if 'vlan' == -1.
161 * NULL if all VLANs are trunked. */
162 struct lacp *lacp; /* LACP if LACP is enabled, otherwise NULL. */
163 struct bond *bond; /* Nonnull iff more than one port. */
164 bool use_priority_tags; /* Use 802.1p tag for frames in VLAN 0? */
167 bool floodable; /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
169 /* Port mirroring info. */
170 mirror_mask_t src_mirrors; /* Mirrors triggered when packet received. */
171 mirror_mask_t dst_mirrors; /* Mirrors triggered when packet sent. */
172 mirror_mask_t mirror_out; /* Mirrors that output to this bundle. */
175 static void bundle_remove(struct ofport *);
176 static void bundle_update(struct ofbundle *);
177 static void bundle_destroy(struct ofbundle *);
178 static void bundle_del_port(struct ofport_dpif *);
179 static void bundle_run(struct ofbundle *);
180 static void bundle_wait(struct ofbundle *);
181 static struct ofbundle *lookup_input_bundle(struct ofproto_dpif *,
182 uint16_t in_port, bool warn,
183 struct ofport_dpif **in_ofportp);
185 /* A controller may use OFPP_NONE as the ingress port to indicate that
186 * it did not arrive on a "real" port. 'ofpp_none_bundle' exists for
187 * when an input bundle is needed for validation (e.g., mirroring or
188 * OFPP_NORMAL processing). It is not connected to an 'ofproto' or have
189 * any 'port' structs, so care must be taken when dealing with it. */
190 static struct ofbundle ofpp_none_bundle = {
192 .vlan_mode = PORT_VLAN_TRUNK
195 static void stp_run(struct ofproto_dpif *ofproto);
196 static void stp_wait(struct ofproto_dpif *ofproto);
197 static int set_stp_port(struct ofport *,
198 const struct ofproto_port_stp_settings *);
200 static bool ofbundle_includes_vlan(const struct ofbundle *, uint16_t vlan);
202 struct action_xlate_ctx {
203 /* action_xlate_ctx_init() initializes these members. */
206 struct ofproto_dpif *ofproto;
208 /* Flow to which the OpenFlow actions apply. xlate_actions() will modify
209 * this flow when actions change header fields. */
212 /* The packet corresponding to 'flow', or a null pointer if we are
213 * revalidating without a packet to refer to. */
214 const struct ofpbuf *packet;
216 /* Should OFPP_NORMAL update the MAC learning table? Should "learn"
217 * actions update the flow table?
219 * We want to update these tables if we are actually processing a packet,
220 * or if we are accounting for packets that the datapath has processed, but
221 * not if we are just revalidating. */
224 /* The rule that we are currently translating, or NULL. */
225 struct rule_dpif *rule;
227 /* Union of the set of TCP flags seen so far in this flow. (Used only by
228 * NXAST_FIN_TIMEOUT. Set to zero to avoid updating updating rules'
232 /* If nonnull, flow translation calls this function just before executing a
233 * resubmit or OFPP_TABLE action. In addition, disables logging of traces
234 * when the recursion depth is exceeded.
236 * 'rule' is the rule being submitted into. It will be null if the
237 * resubmit or OFPP_TABLE action didn't find a matching rule.
239 * This is normally null so the client has to set it manually after
240 * calling action_xlate_ctx_init(). */
241 void (*resubmit_hook)(struct action_xlate_ctx *, struct rule_dpif *rule);
243 /* If nonnull, flow translation credits the specified statistics to each
244 * rule reached through a resubmit or OFPP_TABLE action.
246 * This is normally null so the client has to set it manually after
247 * calling action_xlate_ctx_init(). */
248 const struct dpif_flow_stats *resubmit_stats;
250 /* xlate_actions() initializes and uses these members. The client might want
251 * to look at them after it returns. */
253 struct ofpbuf *odp_actions; /* Datapath actions. */
254 tag_type tags; /* Tags associated with actions. */
255 bool may_set_up_flow; /* True ordinarily; false if the actions must
256 * be reassessed for every packet. */
257 bool has_learn; /* Actions include NXAST_LEARN? */
258 bool has_normal; /* Actions output to OFPP_NORMAL? */
259 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
260 uint16_t nf_output_iface; /* Output interface index for NetFlow. */
261 mirror_mask_t mirrors; /* Bitmap of associated mirrors. */
263 /* xlate_actions() initializes and uses these members, but the client has no
264 * reason to look at them. */
266 int recurse; /* Recursion level, via xlate_table_action. */
267 bool max_resubmit_trigger; /* Recursed too deeply during translation. */
268 struct flow base_flow; /* Flow at the last commit. */
269 uint32_t orig_skb_priority; /* Priority when packet arrived. */
270 uint8_t table_id; /* OpenFlow table ID where flow was found. */
271 uint32_t sflow_n_outputs; /* Number of output ports. */
272 uint16_t sflow_odp_port; /* Output port for composing sFlow action. */
273 uint16_t user_cookie_offset;/* Used for user_action_cookie fixup. */
274 bool exit; /* No further actions should be processed. */
275 struct flow orig_flow; /* Copy of original flow. */
278 static void action_xlate_ctx_init(struct action_xlate_ctx *,
279 struct ofproto_dpif *, const struct flow *,
280 ovs_be16 initial_tci, struct rule_dpif *,
281 uint8_t tcp_flags, const struct ofpbuf *);
282 static void xlate_actions(struct action_xlate_ctx *,
283 const union ofp_action *in, size_t n_in,
284 struct ofpbuf *odp_actions);
285 static void xlate_actions_for_side_effects(struct action_xlate_ctx *,
286 const union ofp_action *in,
289 /* A dpif flow and actions associated with a facet.
291 * See also the large comment on struct facet. */
294 struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
295 struct list list_node; /* In struct facet's 'facets' list. */
296 struct facet *facet; /* Owning facet. */
300 * To save memory in the common case, 'key' is NULL if 'key_fitness' is
301 * ODP_FIT_PERFECT, that is, odp_flow_key_from_flow() can accurately
302 * regenerate the ODP flow key from ->facet->flow. */
303 enum odp_key_fitness key_fitness;
307 long long int used; /* Time last used; time created if not used. */
309 uint64_t dp_packet_count; /* Last known packet count in the datapath. */
310 uint64_t dp_byte_count; /* Last known byte count in the datapath. */
314 * These should be essentially identical for every subfacet in a facet, but
315 * may differ in trivial ways due to VLAN splinters. */
316 size_t actions_len; /* Number of bytes in actions[]. */
317 struct nlattr *actions; /* Datapath actions. */
319 bool installed; /* Installed in datapath? */
321 /* This value is normally the same as ->facet->flow.vlan_tci. Only VLAN
322 * splinters can cause it to differ. This value should be removed when
323 * the VLAN splinters feature is no longer needed. */
324 ovs_be16 initial_tci; /* Initial VLAN TCI value. */
327 static struct subfacet *subfacet_create(struct facet *, enum odp_key_fitness,
328 const struct nlattr *key,
329 size_t key_len, ovs_be16 initial_tci);
330 static struct subfacet *subfacet_find(struct ofproto_dpif *,
331 const struct nlattr *key, size_t key_len);
332 static void subfacet_destroy(struct subfacet *);
333 static void subfacet_destroy__(struct subfacet *);
334 static void subfacet_get_key(struct subfacet *, struct odputil_keybuf *,
336 static void subfacet_reset_dp_stats(struct subfacet *,
337 struct dpif_flow_stats *);
338 static void subfacet_update_time(struct subfacet *, long long int used);
339 static void subfacet_update_stats(struct subfacet *,
340 const struct dpif_flow_stats *);
341 static void subfacet_make_actions(struct subfacet *,
342 const struct ofpbuf *packet,
343 struct ofpbuf *odp_actions);
344 static int subfacet_install(struct subfacet *,
345 const struct nlattr *actions, size_t actions_len,
346 struct dpif_flow_stats *);
347 static void subfacet_uninstall(struct subfacet *);
349 /* An exact-match instantiation of an OpenFlow flow.
351 * A facet associates a "struct flow", which represents the Open vSwitch
352 * userspace idea of an exact-match flow, with one or more subfacets. Each
353 * subfacet tracks the datapath's idea of the exact-match flow equivalent to
354 * the facet. When the kernel module (or other dpif implementation) and Open
355 * vSwitch userspace agree on the definition of a flow key, there is exactly
356 * one subfacet per facet. If the dpif implementation supports more-specific
357 * flow matching than userspace, however, a facet can have more than one
358 * subfacet, each of which corresponds to some distinction in flow that
359 * userspace simply doesn't understand.
361 * Flow expiration works in terms of subfacets, so a facet must have at least
362 * one subfacet or it will never expire, leaking memory. */
365 struct hmap_node hmap_node; /* In owning ofproto's 'facets' hmap. */
366 struct list list_node; /* In owning rule's 'facets' list. */
367 struct rule_dpif *rule; /* Owning rule. */
370 struct list subfacets;
371 long long int used; /* Time last used; time created if not used. */
378 * - Do include packets and bytes sent "by hand", e.g. with
381 * - Do include packets and bytes that were obtained from the datapath
382 * when a subfacet's statistics were reset (e.g. dpif_flow_put() with
383 * DPIF_FP_ZERO_STATS).
385 * - Do not include packets or bytes that can be obtained from the
386 * datapath for any existing subfacet.
388 uint64_t packet_count; /* Number of packets received. */
389 uint64_t byte_count; /* Number of bytes received. */
391 /* Resubmit statistics. */
392 uint64_t prev_packet_count; /* Number of packets from last stats push. */
393 uint64_t prev_byte_count; /* Number of bytes from last stats push. */
394 long long int prev_used; /* Used time from last stats push. */
397 uint64_t accounted_bytes; /* Bytes processed by facet_account(). */
398 struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
399 uint8_t tcp_flags; /* TCP flags seen for this 'rule'. */
401 /* Properties of datapath actions.
403 * Every subfacet has its own actions because actions can differ slightly
404 * between splintered and non-splintered subfacets due to the VLAN tag
405 * being initially different (present vs. absent). All of them have these
406 * properties in common so we just store one copy of them here. */
407 bool may_install; /* Reassess actions for every packet? */
408 bool has_learn; /* Actions include NXAST_LEARN? */
409 bool has_normal; /* Actions output to OFPP_NORMAL? */
410 bool has_fin_timeout; /* Actions include NXAST_FIN_TIMEOUT? */
411 tag_type tags; /* Tags that would require revalidation. */
412 mirror_mask_t mirrors; /* Bitmap of dependent mirrors. */
414 /* Storage for a single subfacet, to reduce malloc() time and space
415 * overhead. (A facet always has at least one subfacet and in the common
416 * case has exactly one subfacet.) */
417 struct subfacet one_subfacet;
420 static struct facet *facet_create(struct rule_dpif *,
421 const struct flow *, uint32_t hash);
422 static void facet_remove(struct facet *);
423 static void facet_free(struct facet *);
425 static struct facet *facet_find(struct ofproto_dpif *,
426 const struct flow *, uint32_t hash);
427 static struct facet *facet_lookup_valid(struct ofproto_dpif *,
428 const struct flow *, uint32_t hash);
429 static bool facet_revalidate(struct facet *);
430 static bool facet_check_consistency(struct facet *);
432 static void facet_flush_stats(struct facet *);
434 static void facet_update_time(struct facet *, long long int used);
435 static void facet_reset_counters(struct facet *);
436 static void facet_push_stats(struct facet *);
437 static void facet_learn(struct facet *);
438 static void facet_account(struct facet *);
440 static bool facet_is_controller_flow(struct facet *);
446 struct ofbundle *bundle; /* Bundle that contains this port, if any. */
447 struct list bundle_node; /* In struct ofbundle's "ports" list. */
448 struct cfm *cfm; /* Connectivity Fault Management, if any. */
449 tag_type tag; /* Tag associated with this port. */
450 uint32_t bond_stable_id; /* stable_id to use as bond slave, or 0. */
451 bool may_enable; /* May be enabled in bonds. */
452 long long int carrier_seq; /* Carrier status changes. */
455 struct stp_port *stp_port; /* Spanning Tree Protocol, if any. */
456 enum stp_state stp_state; /* Always STP_DISABLED if STP not in use. */
457 long long int stp_state_entered;
459 struct hmap priorities; /* Map of attached 'priority_to_dscp's. */
461 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
463 * This is deprecated. It is only for compatibility with broken device
464 * drivers in old versions of Linux that do not properly support VLANs when
465 * VLAN devices are not used. When broken device drivers are no longer in
466 * widespread use, we will delete these interfaces. */
467 uint16_t realdev_ofp_port;
471 /* Node in 'ofport_dpif''s 'priorities' map. Used to maintain a map from
472 * 'priority' (the datapath's term for QoS queue) to the dscp bits which all
473 * traffic egressing the 'ofport' with that priority should be marked with. */
474 struct priority_to_dscp {
475 struct hmap_node hmap_node; /* Node in 'ofport_dpif''s 'priorities' map. */
476 uint32_t priority; /* Priority of this queue (see struct flow). */
478 uint8_t dscp; /* DSCP bits to mark outgoing traffic with. */
481 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
483 * This is deprecated. It is only for compatibility with broken device drivers
484 * in old versions of Linux that do not properly support VLANs when VLAN
485 * devices are not used. When broken device drivers are no longer in
486 * widespread use, we will delete these interfaces. */
487 struct vlan_splinter {
488 struct hmap_node realdev_vid_node;
489 struct hmap_node vlandev_node;
490 uint16_t realdev_ofp_port;
491 uint16_t vlandev_ofp_port;
495 static uint32_t vsp_realdev_to_vlandev(const struct ofproto_dpif *,
496 uint32_t realdev, ovs_be16 vlan_tci);
497 static uint16_t vsp_vlandev_to_realdev(const struct ofproto_dpif *,
498 uint16_t vlandev, int *vid);
499 static bool vsp_adjust_flow(const struct ofproto_dpif *, struct flow *);
500 static void vsp_remove(struct ofport_dpif *);
501 static void vsp_add(struct ofport_dpif *, uint16_t realdev_ofp_port, int vid);
503 static struct ofport_dpif *
504 ofport_dpif_cast(const struct ofport *ofport)
506 assert(ofport->ofproto->ofproto_class == &ofproto_dpif_class);
507 return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
510 static void port_run(struct ofport_dpif *);
511 static void port_wait(struct ofport_dpif *);
512 static int set_cfm(struct ofport *, const struct cfm_settings *);
513 static void ofport_clear_priorities(struct ofport_dpif *);
515 struct dpif_completion {
516 struct list list_node;
517 struct ofoperation *op;
520 /* Extra information about a classifier table.
521 * Currently used just for optimized flow revalidation. */
523 /* If either of these is nonnull, then this table has a form that allows
524 * flows to be tagged to avoid revalidating most flows for the most common
525 * kinds of flow table changes. */
526 struct cls_table *catchall_table; /* Table that wildcards all fields. */
527 struct cls_table *other_table; /* Table with any other wildcard set. */
528 uint32_t basis; /* Keeps each table's tags separate. */
531 struct ofproto_dpif {
532 struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
541 struct netflow *netflow;
542 struct dpif_sflow *sflow;
543 struct hmap bundles; /* Contains "struct ofbundle"s. */
544 struct mac_learning *ml;
545 struct ofmirror *mirrors[MAX_MIRRORS];
547 bool has_bonded_bundles;
550 struct timer next_expiration;
554 struct hmap subfacets;
555 struct governor *governor;
558 struct table_dpif tables[N_TABLES];
559 bool need_revalidate;
560 struct tag_set revalidate_set;
562 /* Support for debugging async flow mods. */
563 struct list completions;
565 bool has_bundle_action; /* True when the first bundle action appears. */
566 struct netdev_stats stats; /* To account packets generated and consumed in
571 long long int stp_last_tick;
573 /* VLAN splinters. */
574 struct hmap realdev_vid_map; /* (realdev,vid) -> vlandev. */
575 struct hmap vlandev_map; /* vlandev -> (realdev,vid). */
578 /* Defer flow mod completion until "ovs-appctl ofproto/unclog"? (Useful only
579 * for debugging the asynchronous flow_mod implementation.) */
582 /* All existing ofproto_dpif instances, indexed by ->up.name. */
583 static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
585 static void ofproto_dpif_unixctl_init(void);
587 static struct ofproto_dpif *
588 ofproto_dpif_cast(const struct ofproto *ofproto)
590 assert(ofproto->ofproto_class == &ofproto_dpif_class);
591 return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
594 static struct ofport_dpif *get_ofp_port(struct ofproto_dpif *,
596 static struct ofport_dpif *get_odp_port(struct ofproto_dpif *,
598 static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
599 const struct ofpbuf *, ovs_be16 initial_tci,
602 /* Packet processing. */
603 static void update_learning_table(struct ofproto_dpif *,
604 const struct flow *, int vlan,
607 #define FLOW_MISS_MAX_BATCH 50
608 static int handle_upcalls(struct ofproto_dpif *, unsigned int max_batch);
610 /* Flow expiration. */
611 static int expire(struct ofproto_dpif *);
614 static void send_netflow_active_timeouts(struct ofproto_dpif *);
617 static int send_packet(const struct ofport_dpif *, struct ofpbuf *packet);
619 compose_sflow_action(const struct ofproto_dpif *, struct ofpbuf *odp_actions,
620 const struct flow *, uint32_t odp_port);
621 static void add_mirror_actions(struct action_xlate_ctx *ctx,
622 const struct flow *flow);
623 /* Global variables. */
624 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
626 /* Factory functions. */
629 enumerate_types(struct sset *types)
631 dp_enumerate_types(types);
635 enumerate_names(const char *type, struct sset *names)
637 return dp_enumerate_names(type, names);
641 del(const char *type, const char *name)
646 error = dpif_open(name, type, &dpif);
648 error = dpif_delete(dpif);
654 /* Basic life-cycle. */
656 static struct ofproto *
659 struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
664 dealloc(struct ofproto *ofproto_)
666 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
671 construct(struct ofproto *ofproto_)
673 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
674 const char *name = ofproto->up.name;
678 error = dpif_create_and_open(name, ofproto->up.type, &ofproto->dpif);
680 VLOG_ERR("failed to open datapath %s: %s", name, strerror(error));
684 ofproto->max_ports = dpif_get_max_ports(ofproto->dpif);
685 ofproto->n_matches = 0;
687 dpif_flow_flush(ofproto->dpif);
688 dpif_recv_purge(ofproto->dpif);
690 error = dpif_recv_set(ofproto->dpif, true);
692 VLOG_ERR("failed to listen on datapath %s: %s", name, strerror(error));
693 dpif_close(ofproto->dpif);
697 ofproto->netflow = NULL;
698 ofproto->sflow = NULL;
700 hmap_init(&ofproto->bundles);
701 ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
702 for (i = 0; i < MAX_MIRRORS; i++) {
703 ofproto->mirrors[i] = NULL;
705 ofproto->has_bonded_bundles = false;
707 timer_set_duration(&ofproto->next_expiration, 1000);
709 hmap_init(&ofproto->facets);
710 hmap_init(&ofproto->subfacets);
711 ofproto->governor = NULL;
713 for (i = 0; i < N_TABLES; i++) {
714 struct table_dpif *table = &ofproto->tables[i];
716 table->catchall_table = NULL;
717 table->other_table = NULL;
718 table->basis = random_uint32();
720 ofproto->need_revalidate = false;
721 tag_set_init(&ofproto->revalidate_set);
723 list_init(&ofproto->completions);
725 ofproto_dpif_unixctl_init();
727 ofproto->has_mirrors = false;
728 ofproto->has_bundle_action = false;
730 hmap_init(&ofproto->vlandev_map);
731 hmap_init(&ofproto->realdev_vid_map);
733 hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
734 hash_string(ofproto->up.name, 0));
735 memset(&ofproto->stats, 0, sizeof ofproto->stats);
737 ofproto_init_tables(ofproto_, N_TABLES);
743 complete_operations(struct ofproto_dpif *ofproto)
745 struct dpif_completion *c, *next;
747 LIST_FOR_EACH_SAFE (c, next, list_node, &ofproto->completions) {
748 ofoperation_complete(c->op, 0);
749 list_remove(&c->list_node);
755 destruct(struct ofproto *ofproto_)
757 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
758 struct rule_dpif *rule, *next_rule;
759 struct oftable *table;
762 hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
763 complete_operations(ofproto);
765 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
766 struct cls_cursor cursor;
768 cls_cursor_init(&cursor, &table->cls, NULL);
769 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
770 ofproto_rule_destroy(&rule->up);
774 for (i = 0; i < MAX_MIRRORS; i++) {
775 mirror_destroy(ofproto->mirrors[i]);
778 netflow_destroy(ofproto->netflow);
779 dpif_sflow_destroy(ofproto->sflow);
780 hmap_destroy(&ofproto->bundles);
781 mac_learning_destroy(ofproto->ml);
783 hmap_destroy(&ofproto->facets);
784 hmap_destroy(&ofproto->subfacets);
785 governor_destroy(ofproto->governor);
787 hmap_destroy(&ofproto->vlandev_map);
788 hmap_destroy(&ofproto->realdev_vid_map);
790 dpif_close(ofproto->dpif);
794 run_fast(struct ofproto *ofproto_)
796 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
799 /* Handle one or more batches of upcalls, until there's nothing left to do
800 * or until we do a fixed total amount of work.
802 * We do work in batches because it can be much cheaper to set up a number
803 * of flows and fire off their patches all at once. We do multiple batches
804 * because in some cases handling a packet can cause another packet to be
805 * queued almost immediately as part of the return flow. Both
806 * optimizations can make major improvements on some benchmarks and
807 * presumably for real traffic as well. */
809 while (work < FLOW_MISS_MAX_BATCH) {
810 int retval = handle_upcalls(ofproto, FLOW_MISS_MAX_BATCH - work);
820 run(struct ofproto *ofproto_)
822 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
823 struct ofport_dpif *ofport;
824 struct ofbundle *bundle;
828 complete_operations(ofproto);
830 dpif_run(ofproto->dpif);
832 error = run_fast(ofproto_);
837 if (timer_expired(&ofproto->next_expiration)) {
838 int delay = expire(ofproto);
839 timer_set_duration(&ofproto->next_expiration, delay);
842 if (ofproto->netflow) {
843 if (netflow_run(ofproto->netflow)) {
844 send_netflow_active_timeouts(ofproto);
847 if (ofproto->sflow) {
848 dpif_sflow_run(ofproto->sflow);
851 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
854 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
859 mac_learning_run(ofproto->ml, &ofproto->revalidate_set);
861 /* Now revalidate if there's anything to do. */
862 if (ofproto->need_revalidate
863 || !tag_set_is_empty(&ofproto->revalidate_set)) {
864 struct tag_set revalidate_set = ofproto->revalidate_set;
865 bool revalidate_all = ofproto->need_revalidate;
866 struct facet *facet, *next;
868 /* Clear the revalidation flags. */
869 tag_set_init(&ofproto->revalidate_set);
870 ofproto->need_revalidate = false;
872 HMAP_FOR_EACH_SAFE (facet, next, hmap_node, &ofproto->facets) {
874 || tag_set_intersects(&revalidate_set, facet->tags)) {
875 facet_revalidate(facet);
880 /* Check the consistency of a random facet, to aid debugging. */
881 if (!hmap_is_empty(&ofproto->facets) && !ofproto->need_revalidate) {
884 facet = CONTAINER_OF(hmap_random_node(&ofproto->facets),
885 struct facet, hmap_node);
886 if (!tag_set_intersects(&ofproto->revalidate_set, facet->tags)) {
887 if (!facet_check_consistency(facet)) {
888 ofproto->need_revalidate = true;
893 if (ofproto->governor) {
896 governor_run(ofproto->governor);
898 /* If the governor has shrunk to its minimum size and the number of
899 * subfacets has dwindled, then drop the governor entirely.
901 * For hysteresis, the number of subfacets to drop the governor is
902 * smaller than the number needed to trigger its creation. */
903 n_subfacets = hmap_count(&ofproto->subfacets);
904 if (n_subfacets * 4 < ofproto->up.flow_eviction_threshold
905 && governor_is_idle(ofproto->governor)) {
906 governor_destroy(ofproto->governor);
907 ofproto->governor = NULL;
915 wait(struct ofproto *ofproto_)
917 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
918 struct ofport_dpif *ofport;
919 struct ofbundle *bundle;
921 if (!clogged && !list_is_empty(&ofproto->completions)) {
922 poll_immediate_wake();
925 dpif_wait(ofproto->dpif);
926 dpif_recv_wait(ofproto->dpif);
927 if (ofproto->sflow) {
928 dpif_sflow_wait(ofproto->sflow);
930 if (!tag_set_is_empty(&ofproto->revalidate_set)) {
931 poll_immediate_wake();
933 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
936 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
939 if (ofproto->netflow) {
940 netflow_wait(ofproto->netflow);
942 mac_learning_wait(ofproto->ml);
944 if (ofproto->need_revalidate) {
945 /* Shouldn't happen, but if it does just go around again. */
946 VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
947 poll_immediate_wake();
949 timer_wait(&ofproto->next_expiration);
951 if (ofproto->governor) {
952 governor_wait(ofproto->governor);
957 flush(struct ofproto *ofproto_)
959 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
960 struct facet *facet, *next_facet;
962 HMAP_FOR_EACH_SAFE (facet, next_facet, hmap_node, &ofproto->facets) {
963 /* Mark the facet as not installed so that facet_remove() doesn't
964 * bother trying to uninstall it. There is no point in uninstalling it
965 * individually since we are about to blow away all the facets with
966 * dpif_flow_flush(). */
967 struct subfacet *subfacet;
969 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
970 subfacet->installed = false;
971 subfacet->dp_packet_count = 0;
972 subfacet->dp_byte_count = 0;
976 dpif_flow_flush(ofproto->dpif);
980 get_features(struct ofproto *ofproto_ OVS_UNUSED,
981 bool *arp_match_ip, enum ofputil_action_bitmap *actions)
983 *arp_match_ip = true;
984 *actions = (OFPUTIL_A_OUTPUT |
985 OFPUTIL_A_SET_VLAN_VID |
986 OFPUTIL_A_SET_VLAN_PCP |
987 OFPUTIL_A_STRIP_VLAN |
988 OFPUTIL_A_SET_DL_SRC |
989 OFPUTIL_A_SET_DL_DST |
990 OFPUTIL_A_SET_NW_SRC |
991 OFPUTIL_A_SET_NW_DST |
992 OFPUTIL_A_SET_NW_TOS |
993 OFPUTIL_A_SET_TP_SRC |
994 OFPUTIL_A_SET_TP_DST |
999 get_tables(struct ofproto *ofproto_, struct ofp_table_stats *ots)
1001 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1002 struct dpif_dp_stats s;
1004 strcpy(ots->name, "classifier");
1006 dpif_get_dp_stats(ofproto->dpif, &s);
1007 put_32aligned_be64(&ots->lookup_count, htonll(s.n_hit + s.n_missed));
1008 put_32aligned_be64(&ots->matched_count,
1009 htonll(s.n_hit + ofproto->n_matches));
1012 static struct ofport *
1015 struct ofport_dpif *port = xmalloc(sizeof *port);
1020 port_dealloc(struct ofport *port_)
1022 struct ofport_dpif *port = ofport_dpif_cast(port_);
1027 port_construct(struct ofport *port_)
1029 struct ofport_dpif *port = ofport_dpif_cast(port_);
1030 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1032 ofproto->need_revalidate = true;
1033 port->odp_port = ofp_port_to_odp_port(port->up.ofp_port);
1034 port->bundle = NULL;
1036 port->tag = tag_create_random();
1037 port->may_enable = true;
1038 port->stp_port = NULL;
1039 port->stp_state = STP_DISABLED;
1040 hmap_init(&port->priorities);
1041 port->realdev_ofp_port = 0;
1042 port->vlandev_vid = 0;
1043 port->carrier_seq = netdev_get_carrier_resets(port->up.netdev);
1045 if (ofproto->sflow) {
1046 dpif_sflow_add_port(ofproto->sflow, port_);
1053 port_destruct(struct ofport *port_)
1055 struct ofport_dpif *port = ofport_dpif_cast(port_);
1056 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1058 ofproto->need_revalidate = true;
1059 bundle_remove(port_);
1060 set_cfm(port_, NULL);
1061 if (ofproto->sflow) {
1062 dpif_sflow_del_port(ofproto->sflow, port->odp_port);
1065 ofport_clear_priorities(port);
1066 hmap_destroy(&port->priorities);
1070 port_modified(struct ofport *port_)
1072 struct ofport_dpif *port = ofport_dpif_cast(port_);
1074 if (port->bundle && port->bundle->bond) {
1075 bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1080 port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
1082 struct ofport_dpif *port = ofport_dpif_cast(port_);
1083 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1084 enum ofputil_port_config changed = old_config ^ port->up.pp.config;
1086 if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
1087 OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD)) {
1088 ofproto->need_revalidate = true;
1090 if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
1091 bundle_update(port->bundle);
1097 set_sflow(struct ofproto *ofproto_,
1098 const struct ofproto_sflow_options *sflow_options)
1100 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1101 struct dpif_sflow *ds = ofproto->sflow;
1103 if (sflow_options) {
1105 struct ofport_dpif *ofport;
1107 ds = ofproto->sflow = dpif_sflow_create(ofproto->dpif);
1108 HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1109 dpif_sflow_add_port(ds, &ofport->up);
1111 ofproto->need_revalidate = true;
1113 dpif_sflow_set_options(ds, sflow_options);
1116 dpif_sflow_destroy(ds);
1117 ofproto->need_revalidate = true;
1118 ofproto->sflow = NULL;
1125 set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
1127 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1134 struct ofproto_dpif *ofproto;
1136 ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1137 ofproto->need_revalidate = true;
1138 ofport->cfm = cfm_create(netdev_get_name(ofport->up.netdev));
1141 if (cfm_configure(ofport->cfm, s)) {
1147 cfm_destroy(ofport->cfm);
1153 get_cfm_fault(const struct ofport *ofport_)
1155 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1157 return ofport->cfm ? cfm_get_fault(ofport->cfm) : -1;
1161 get_cfm_remote_mpids(const struct ofport *ofport_, const uint64_t **rmps,
1164 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1167 cfm_get_remote_mpids(ofport->cfm, rmps, n_rmps);
1175 get_cfm_health(const struct ofport *ofport_)
1177 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1179 return ofport->cfm ? cfm_get_health(ofport->cfm) : -1;
1182 /* Spanning Tree. */
1185 send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
1187 struct ofproto_dpif *ofproto = ofproto_;
1188 struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
1189 struct ofport_dpif *ofport;
1191 ofport = stp_port_get_aux(sp);
1193 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
1194 ofproto->up.name, port_num);
1196 struct eth_header *eth = pkt->l2;
1198 netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
1199 if (eth_addr_is_zero(eth->eth_src)) {
1200 VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
1201 "with unknown MAC", ofproto->up.name, port_num);
1203 send_packet(ofport, pkt);
1209 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
1211 set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
1213 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1215 /* Only revalidate flows if the configuration changed. */
1216 if (!s != !ofproto->stp) {
1217 ofproto->need_revalidate = true;
1221 if (!ofproto->stp) {
1222 ofproto->stp = stp_create(ofproto_->name, s->system_id,
1223 send_bpdu_cb, ofproto);
1224 ofproto->stp_last_tick = time_msec();
1227 stp_set_bridge_id(ofproto->stp, s->system_id);
1228 stp_set_bridge_priority(ofproto->stp, s->priority);
1229 stp_set_hello_time(ofproto->stp, s->hello_time);
1230 stp_set_max_age(ofproto->stp, s->max_age);
1231 stp_set_forward_delay(ofproto->stp, s->fwd_delay);
1233 struct ofport *ofport;
1235 HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
1236 set_stp_port(ofport, NULL);
1239 stp_destroy(ofproto->stp);
1240 ofproto->stp = NULL;
1247 get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
1249 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1253 s->bridge_id = stp_get_bridge_id(ofproto->stp);
1254 s->designated_root = stp_get_designated_root(ofproto->stp);
1255 s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
1264 update_stp_port_state(struct ofport_dpif *ofport)
1266 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1267 enum stp_state state;
1269 /* Figure out new state. */
1270 state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
1274 if (ofport->stp_state != state) {
1275 enum ofputil_port_state of_state;
1278 VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
1279 netdev_get_name(ofport->up.netdev),
1280 stp_state_name(ofport->stp_state),
1281 stp_state_name(state));
1282 if (stp_learn_in_state(ofport->stp_state)
1283 != stp_learn_in_state(state)) {
1284 /* xxx Learning action flows should also be flushed. */
1285 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
1287 fwd_change = stp_forward_in_state(ofport->stp_state)
1288 != stp_forward_in_state(state);
1290 ofproto->need_revalidate = true;
1291 ofport->stp_state = state;
1292 ofport->stp_state_entered = time_msec();
1294 if (fwd_change && ofport->bundle) {
1295 bundle_update(ofport->bundle);
1298 /* Update the STP state bits in the OpenFlow port description. */
1299 of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
1300 of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
1301 : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
1302 : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
1303 : state == STP_BLOCKING ? OFPUTIL_PS_STP_BLOCK
1305 ofproto_port_set_state(&ofport->up, of_state);
1309 /* Configures STP on 'ofport_' using the settings defined in 's'. The
1310 * caller is responsible for assigning STP port numbers and ensuring
1311 * there are no duplicates. */
1313 set_stp_port(struct ofport *ofport_,
1314 const struct ofproto_port_stp_settings *s)
1316 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1317 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1318 struct stp_port *sp = ofport->stp_port;
1320 if (!s || !s->enable) {
1322 ofport->stp_port = NULL;
1323 stp_port_disable(sp);
1324 update_stp_port_state(ofport);
1327 } else if (sp && stp_port_no(sp) != s->port_num
1328 && ofport == stp_port_get_aux(sp)) {
1329 /* The port-id changed, so disable the old one if it's not
1330 * already in use by another port. */
1331 stp_port_disable(sp);
1334 sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
1335 stp_port_enable(sp);
1337 stp_port_set_aux(sp, ofport);
1338 stp_port_set_priority(sp, s->priority);
1339 stp_port_set_path_cost(sp, s->path_cost);
1341 update_stp_port_state(ofport);
1347 get_stp_port_status(struct ofport *ofport_,
1348 struct ofproto_port_stp_status *s)
1350 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1351 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1352 struct stp_port *sp = ofport->stp_port;
1354 if (!ofproto->stp || !sp) {
1360 s->port_id = stp_port_get_id(sp);
1361 s->state = stp_port_get_state(sp);
1362 s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
1363 s->role = stp_port_get_role(sp);
1364 stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
1370 stp_run(struct ofproto_dpif *ofproto)
1373 long long int now = time_msec();
1374 long long int elapsed = now - ofproto->stp_last_tick;
1375 struct stp_port *sp;
1378 stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
1379 ofproto->stp_last_tick = now;
1381 while (stp_get_changed_port(ofproto->stp, &sp)) {
1382 struct ofport_dpif *ofport = stp_port_get_aux(sp);
1385 update_stp_port_state(ofport);
1389 if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
1390 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
1396 stp_wait(struct ofproto_dpif *ofproto)
1399 poll_timer_wait(1000);
1403 /* Returns true if STP should process 'flow'. */
1405 stp_should_process_flow(const struct flow *flow)
1407 return eth_addr_equals(flow->dl_dst, eth_addr_stp);
1411 stp_process_packet(const struct ofport_dpif *ofport,
1412 const struct ofpbuf *packet)
1414 struct ofpbuf payload = *packet;
1415 struct eth_header *eth = payload.data;
1416 struct stp_port *sp = ofport->stp_port;
1418 /* Sink packets on ports that have STP disabled when the bridge has
1420 if (!sp || stp_port_get_state(sp) == STP_DISABLED) {
1424 /* Trim off padding on payload. */
1425 if (payload.size > ntohs(eth->eth_type) + ETH_HEADER_LEN) {
1426 payload.size = ntohs(eth->eth_type) + ETH_HEADER_LEN;
1429 if (ofpbuf_try_pull(&payload, ETH_HEADER_LEN + LLC_HEADER_LEN)) {
1430 stp_received_bpdu(sp, payload.data, payload.size);
1434 static struct priority_to_dscp *
1435 get_priority(const struct ofport_dpif *ofport, uint32_t priority)
1437 struct priority_to_dscp *pdscp;
1440 hash = hash_int(priority, 0);
1441 HMAP_FOR_EACH_IN_BUCKET (pdscp, hmap_node, hash, &ofport->priorities) {
1442 if (pdscp->priority == priority) {
1450 ofport_clear_priorities(struct ofport_dpif *ofport)
1452 struct priority_to_dscp *pdscp, *next;
1454 HMAP_FOR_EACH_SAFE (pdscp, next, hmap_node, &ofport->priorities) {
1455 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
1461 set_queues(struct ofport *ofport_,
1462 const struct ofproto_port_queue *qdscp_list,
1465 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1466 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1467 struct hmap new = HMAP_INITIALIZER(&new);
1470 for (i = 0; i < n_qdscp; i++) {
1471 struct priority_to_dscp *pdscp;
1475 dscp = (qdscp_list[i].dscp << 2) & IP_DSCP_MASK;
1476 if (dpif_queue_to_priority(ofproto->dpif, qdscp_list[i].queue,
1481 pdscp = get_priority(ofport, priority);
1483 hmap_remove(&ofport->priorities, &pdscp->hmap_node);
1485 pdscp = xmalloc(sizeof *pdscp);
1486 pdscp->priority = priority;
1488 ofproto->need_revalidate = true;
1491 if (pdscp->dscp != dscp) {
1493 ofproto->need_revalidate = true;
1496 hmap_insert(&new, &pdscp->hmap_node, hash_int(pdscp->priority, 0));
1499 if (!hmap_is_empty(&ofport->priorities)) {
1500 ofport_clear_priorities(ofport);
1501 ofproto->need_revalidate = true;
1504 hmap_swap(&new, &ofport->priorities);
1512 /* Expires all MAC learning entries associated with 'bundle' and forces its
1513 * ofproto to revalidate every flow.
1515 * Normally MAC learning entries are removed only from the ofproto associated
1516 * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
1517 * are removed from every ofproto. When patch ports and SLB bonds are in use
1518 * and a VM migration happens and the gratuitous ARPs are somehow lost, this
1519 * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
1520 * with the host from which it migrated. */
1522 bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
1524 struct ofproto_dpif *ofproto = bundle->ofproto;
1525 struct mac_learning *ml = ofproto->ml;
1526 struct mac_entry *mac, *next_mac;
1528 ofproto->need_revalidate = true;
1529 LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
1530 if (mac->port.p == bundle) {
1532 struct ofproto_dpif *o;
1534 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
1536 struct mac_entry *e;
1538 e = mac_learning_lookup(o->ml, mac->mac, mac->vlan,
1541 tag_set_add(&o->revalidate_set, e->tag);
1542 mac_learning_expire(o->ml, e);
1548 mac_learning_expire(ml, mac);
1553 static struct ofbundle *
1554 bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
1556 struct ofbundle *bundle;
1558 HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
1559 &ofproto->bundles) {
1560 if (bundle->aux == aux) {
1567 /* Looks up each of the 'n_auxes' pointers in 'auxes' as bundles and adds the
1568 * ones that are found to 'bundles'. */
1570 bundle_lookup_multiple(struct ofproto_dpif *ofproto,
1571 void **auxes, size_t n_auxes,
1572 struct hmapx *bundles)
1576 hmapx_init(bundles);
1577 for (i = 0; i < n_auxes; i++) {
1578 struct ofbundle *bundle = bundle_lookup(ofproto, auxes[i]);
1580 hmapx_add(bundles, bundle);
1586 bundle_update(struct ofbundle *bundle)
1588 struct ofport_dpif *port;
1590 bundle->floodable = true;
1591 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
1592 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
1593 || !stp_forward_in_state(port->stp_state)) {
1594 bundle->floodable = false;
1601 bundle_del_port(struct ofport_dpif *port)
1603 struct ofbundle *bundle = port->bundle;
1605 bundle->ofproto->need_revalidate = true;
1607 list_remove(&port->bundle_node);
1608 port->bundle = NULL;
1611 lacp_slave_unregister(bundle->lacp, port);
1614 bond_slave_unregister(bundle->bond, port);
1617 bundle_update(bundle);
1621 bundle_add_port(struct ofbundle *bundle, uint32_t ofp_port,
1622 struct lacp_slave_settings *lacp,
1623 uint32_t bond_stable_id)
1625 struct ofport_dpif *port;
1627 port = get_ofp_port(bundle->ofproto, ofp_port);
1632 if (port->bundle != bundle) {
1633 bundle->ofproto->need_revalidate = true;
1635 bundle_del_port(port);
1638 port->bundle = bundle;
1639 list_push_back(&bundle->ports, &port->bundle_node);
1640 if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
1641 || !stp_forward_in_state(port->stp_state)) {
1642 bundle->floodable = false;
1646 port->bundle->ofproto->need_revalidate = true;
1647 lacp_slave_register(bundle->lacp, port, lacp);
1650 port->bond_stable_id = bond_stable_id;
1656 bundle_destroy(struct ofbundle *bundle)
1658 struct ofproto_dpif *ofproto;
1659 struct ofport_dpif *port, *next_port;
1666 ofproto = bundle->ofproto;
1667 for (i = 0; i < MAX_MIRRORS; i++) {
1668 struct ofmirror *m = ofproto->mirrors[i];
1670 if (m->out == bundle) {
1672 } else if (hmapx_find_and_delete(&m->srcs, bundle)
1673 || hmapx_find_and_delete(&m->dsts, bundle)) {
1674 ofproto->need_revalidate = true;
1679 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
1680 bundle_del_port(port);
1683 bundle_flush_macs(bundle, true);
1684 hmap_remove(&ofproto->bundles, &bundle->hmap_node);
1686 free(bundle->trunks);
1687 lacp_destroy(bundle->lacp);
1688 bond_destroy(bundle->bond);
1693 bundle_set(struct ofproto *ofproto_, void *aux,
1694 const struct ofproto_bundle_settings *s)
1696 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1697 bool need_flush = false;
1698 struct ofport_dpif *port;
1699 struct ofbundle *bundle;
1700 unsigned long *trunks;
1706 bundle_destroy(bundle_lookup(ofproto, aux));
1710 assert(s->n_slaves == 1 || s->bond != NULL);
1711 assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
1713 bundle = bundle_lookup(ofproto, aux);
1715 bundle = xmalloc(sizeof *bundle);
1717 bundle->ofproto = ofproto;
1718 hmap_insert(&ofproto->bundles, &bundle->hmap_node,
1719 hash_pointer(aux, 0));
1721 bundle->name = NULL;
1723 list_init(&bundle->ports);
1724 bundle->vlan_mode = PORT_VLAN_TRUNK;
1726 bundle->trunks = NULL;
1727 bundle->use_priority_tags = s->use_priority_tags;
1728 bundle->lacp = NULL;
1729 bundle->bond = NULL;
1731 bundle->floodable = true;
1733 bundle->src_mirrors = 0;
1734 bundle->dst_mirrors = 0;
1735 bundle->mirror_out = 0;
1738 if (!bundle->name || strcmp(s->name, bundle->name)) {
1740 bundle->name = xstrdup(s->name);
1745 if (!bundle->lacp) {
1746 ofproto->need_revalidate = true;
1747 bundle->lacp = lacp_create();
1749 lacp_configure(bundle->lacp, s->lacp);
1751 lacp_destroy(bundle->lacp);
1752 bundle->lacp = NULL;
1755 /* Update set of ports. */
1757 for (i = 0; i < s->n_slaves; i++) {
1758 if (!bundle_add_port(bundle, s->slaves[i],
1759 s->lacp ? &s->lacp_slaves[i] : NULL,
1760 s->bond_stable_ids ? s->bond_stable_ids[i] : 0)) {
1764 if (!ok || list_size(&bundle->ports) != s->n_slaves) {
1765 struct ofport_dpif *next_port;
1767 LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
1768 for (i = 0; i < s->n_slaves; i++) {
1769 if (s->slaves[i] == port->up.ofp_port) {
1774 bundle_del_port(port);
1778 assert(list_size(&bundle->ports) <= s->n_slaves);
1780 if (list_is_empty(&bundle->ports)) {
1781 bundle_destroy(bundle);
1785 /* Set VLAN tagging mode */
1786 if (s->vlan_mode != bundle->vlan_mode
1787 || s->use_priority_tags != bundle->use_priority_tags) {
1788 bundle->vlan_mode = s->vlan_mode;
1789 bundle->use_priority_tags = s->use_priority_tags;
1794 vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
1795 : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
1797 if (vlan != bundle->vlan) {
1798 bundle->vlan = vlan;
1802 /* Get trunked VLANs. */
1803 switch (s->vlan_mode) {
1804 case PORT_VLAN_ACCESS:
1808 case PORT_VLAN_TRUNK:
1809 trunks = (unsigned long *) s->trunks;
1812 case PORT_VLAN_NATIVE_UNTAGGED:
1813 case PORT_VLAN_NATIVE_TAGGED:
1814 if (vlan != 0 && (!s->trunks
1815 || !bitmap_is_set(s->trunks, vlan)
1816 || bitmap_is_set(s->trunks, 0))) {
1817 /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
1819 trunks = bitmap_clone(s->trunks, 4096);
1821 trunks = bitmap_allocate1(4096);
1823 bitmap_set1(trunks, vlan);
1824 bitmap_set0(trunks, 0);
1826 trunks = (unsigned long *) s->trunks;
1833 if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
1834 free(bundle->trunks);
1835 if (trunks == s->trunks) {
1836 bundle->trunks = vlan_bitmap_clone(trunks);
1838 bundle->trunks = trunks;
1843 if (trunks != s->trunks) {
1848 if (!list_is_short(&bundle->ports)) {
1849 bundle->ofproto->has_bonded_bundles = true;
1851 if (bond_reconfigure(bundle->bond, s->bond)) {
1852 ofproto->need_revalidate = true;
1855 bundle->bond = bond_create(s->bond);
1856 ofproto->need_revalidate = true;
1859 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
1860 bond_slave_register(bundle->bond, port, port->bond_stable_id,
1864 bond_destroy(bundle->bond);
1865 bundle->bond = NULL;
1868 /* If we changed something that would affect MAC learning, un-learn
1869 * everything on this port and force flow revalidation. */
1871 bundle_flush_macs(bundle, false);
1878 bundle_remove(struct ofport *port_)
1880 struct ofport_dpif *port = ofport_dpif_cast(port_);
1881 struct ofbundle *bundle = port->bundle;
1884 bundle_del_port(port);
1885 if (list_is_empty(&bundle->ports)) {
1886 bundle_destroy(bundle);
1887 } else if (list_is_short(&bundle->ports)) {
1888 bond_destroy(bundle->bond);
1889 bundle->bond = NULL;
1895 send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
1897 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
1898 struct ofport_dpif *port = port_;
1899 uint8_t ea[ETH_ADDR_LEN];
1902 error = netdev_get_etheraddr(port->up.netdev, ea);
1904 struct ofpbuf packet;
1907 ofpbuf_init(&packet, 0);
1908 packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
1910 memcpy(packet_pdu, pdu, pdu_size);
1912 send_packet(port, &packet);
1913 ofpbuf_uninit(&packet);
1915 VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
1916 "%s (%s)", port->bundle->name,
1917 netdev_get_name(port->up.netdev), strerror(error));
1922 bundle_send_learning_packets(struct ofbundle *bundle)
1924 struct ofproto_dpif *ofproto = bundle->ofproto;
1925 int error, n_packets, n_errors;
1926 struct mac_entry *e;
1928 error = n_packets = n_errors = 0;
1929 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
1930 if (e->port.p != bundle) {
1931 struct ofpbuf *learning_packet;
1932 struct ofport_dpif *port;
1936 /* The assignment to "port" is unnecessary but makes "grep"ing for
1937 * struct ofport_dpif more effective. */
1938 learning_packet = bond_compose_learning_packet(bundle->bond,
1942 ret = send_packet(port, learning_packet);
1943 ofpbuf_delete(learning_packet);
1953 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
1954 VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
1955 "packets, last error was: %s",
1956 bundle->name, n_errors, n_packets, strerror(error));
1958 VLOG_DBG("bond %s: sent %d gratuitous learning packets",
1959 bundle->name, n_packets);
1964 bundle_run(struct ofbundle *bundle)
1967 lacp_run(bundle->lacp, send_pdu_cb);
1970 struct ofport_dpif *port;
1972 LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
1973 bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
1976 bond_run(bundle->bond, &bundle->ofproto->revalidate_set,
1977 lacp_status(bundle->lacp));
1978 if (bond_should_send_learning_packets(bundle->bond)) {
1979 bundle_send_learning_packets(bundle);
1985 bundle_wait(struct ofbundle *bundle)
1988 lacp_wait(bundle->lacp);
1991 bond_wait(bundle->bond);
1998 mirror_scan(struct ofproto_dpif *ofproto)
2002 for (idx = 0; idx < MAX_MIRRORS; idx++) {
2003 if (!ofproto->mirrors[idx]) {
2010 static struct ofmirror *
2011 mirror_lookup(struct ofproto_dpif *ofproto, void *aux)
2015 for (i = 0; i < MAX_MIRRORS; i++) {
2016 struct ofmirror *mirror = ofproto->mirrors[i];
2017 if (mirror && mirror->aux == aux) {
2025 /* Update the 'dup_mirrors' member of each of the ofmirrors in 'ofproto'. */
2027 mirror_update_dups(struct ofproto_dpif *ofproto)
2031 for (i = 0; i < MAX_MIRRORS; i++) {
2032 struct ofmirror *m = ofproto->mirrors[i];
2035 m->dup_mirrors = MIRROR_MASK_C(1) << i;
2039 for (i = 0; i < MAX_MIRRORS; i++) {
2040 struct ofmirror *m1 = ofproto->mirrors[i];
2047 for (j = i + 1; j < MAX_MIRRORS; j++) {
2048 struct ofmirror *m2 = ofproto->mirrors[j];
2050 if (m2 && m1->out == m2->out && m1->out_vlan == m2->out_vlan) {
2051 m1->dup_mirrors |= MIRROR_MASK_C(1) << j;
2052 m2->dup_mirrors |= m1->dup_mirrors;
2059 mirror_set(struct ofproto *ofproto_, void *aux,
2060 const struct ofproto_mirror_settings *s)
2062 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2063 mirror_mask_t mirror_bit;
2064 struct ofbundle *bundle;
2065 struct ofmirror *mirror;
2066 struct ofbundle *out;
2067 struct hmapx srcs; /* Contains "struct ofbundle *"s. */
2068 struct hmapx dsts; /* Contains "struct ofbundle *"s. */
2071 mirror = mirror_lookup(ofproto, aux);
2073 mirror_destroy(mirror);
2079 idx = mirror_scan(ofproto);
2081 VLOG_WARN("bridge %s: maximum of %d port mirrors reached, "
2083 ofproto->up.name, MAX_MIRRORS, s->name);
2087 mirror = ofproto->mirrors[idx] = xzalloc(sizeof *mirror);
2088 mirror->ofproto = ofproto;
2091 mirror->out_vlan = -1;
2092 mirror->name = NULL;
2095 if (!mirror->name || strcmp(s->name, mirror->name)) {
2097 mirror->name = xstrdup(s->name);
2100 /* Get the new configuration. */
2101 if (s->out_bundle) {
2102 out = bundle_lookup(ofproto, s->out_bundle);
2104 mirror_destroy(mirror);
2110 out_vlan = s->out_vlan;
2112 bundle_lookup_multiple(ofproto, s->srcs, s->n_srcs, &srcs);
2113 bundle_lookup_multiple(ofproto, s->dsts, s->n_dsts, &dsts);
2115 /* If the configuration has not changed, do nothing. */
2116 if (hmapx_equals(&srcs, &mirror->srcs)
2117 && hmapx_equals(&dsts, &mirror->dsts)
2118 && vlan_bitmap_equal(mirror->vlans, s->src_vlans)
2119 && mirror->out == out
2120 && mirror->out_vlan == out_vlan)
2122 hmapx_destroy(&srcs);
2123 hmapx_destroy(&dsts);
2127 hmapx_swap(&srcs, &mirror->srcs);
2128 hmapx_destroy(&srcs);
2130 hmapx_swap(&dsts, &mirror->dsts);
2131 hmapx_destroy(&dsts);
2133 free(mirror->vlans);
2134 mirror->vlans = vlan_bitmap_clone(s->src_vlans);
2137 mirror->out_vlan = out_vlan;
2139 /* Update bundles. */
2140 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2141 HMAP_FOR_EACH (bundle, hmap_node, &mirror->ofproto->bundles) {
2142 if (hmapx_contains(&mirror->srcs, bundle)) {
2143 bundle->src_mirrors |= mirror_bit;
2145 bundle->src_mirrors &= ~mirror_bit;
2148 if (hmapx_contains(&mirror->dsts, bundle)) {
2149 bundle->dst_mirrors |= mirror_bit;
2151 bundle->dst_mirrors &= ~mirror_bit;
2154 if (mirror->out == bundle) {
2155 bundle->mirror_out |= mirror_bit;
2157 bundle->mirror_out &= ~mirror_bit;
2161 ofproto->need_revalidate = true;
2162 ofproto->has_mirrors = true;
2163 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
2164 mirror_update_dups(ofproto);
2170 mirror_destroy(struct ofmirror *mirror)
2172 struct ofproto_dpif *ofproto;
2173 mirror_mask_t mirror_bit;
2174 struct ofbundle *bundle;
2181 ofproto = mirror->ofproto;
2182 ofproto->need_revalidate = true;
2183 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
2185 mirror_bit = MIRROR_MASK_C(1) << mirror->idx;
2186 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
2187 bundle->src_mirrors &= ~mirror_bit;
2188 bundle->dst_mirrors &= ~mirror_bit;
2189 bundle->mirror_out &= ~mirror_bit;
2192 hmapx_destroy(&mirror->srcs);
2193 hmapx_destroy(&mirror->dsts);
2194 free(mirror->vlans);
2196 ofproto->mirrors[mirror->idx] = NULL;
2200 mirror_update_dups(ofproto);
2202 ofproto->has_mirrors = false;
2203 for (i = 0; i < MAX_MIRRORS; i++) {
2204 if (ofproto->mirrors[i]) {
2205 ofproto->has_mirrors = true;
2212 mirror_get_stats(struct ofproto *ofproto_, void *aux,
2213 uint64_t *packets, uint64_t *bytes)
2215 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2216 struct ofmirror *mirror = mirror_lookup(ofproto, aux);
2219 *packets = *bytes = UINT64_MAX;
2223 *packets = mirror->packet_count;
2224 *bytes = mirror->byte_count;
2230 set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2232 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2233 if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
2234 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
2240 is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
2242 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2243 struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2244 return bundle && bundle->mirror_out != 0;
2248 forward_bpdu_changed(struct ofproto *ofproto_)
2250 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2251 /* Revalidate cached flows whenever forward_bpdu option changes. */
2252 ofproto->need_revalidate = true;
2256 set_mac_idle_time(struct ofproto *ofproto_, unsigned int idle_time)
2258 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2259 mac_learning_set_idle_time(ofproto->ml, idle_time);
2264 static struct ofport_dpif *
2265 get_ofp_port(struct ofproto_dpif *ofproto, uint16_t ofp_port)
2267 struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2268 return ofport ? ofport_dpif_cast(ofport) : NULL;
2271 static struct ofport_dpif *
2272 get_odp_port(struct ofproto_dpif *ofproto, uint32_t odp_port)
2274 return get_ofp_port(ofproto, odp_port_to_ofp_port(odp_port));
2278 ofproto_port_from_dpif_port(struct ofproto_port *ofproto_port,
2279 struct dpif_port *dpif_port)
2281 ofproto_port->name = dpif_port->name;
2282 ofproto_port->type = dpif_port->type;
2283 ofproto_port->ofp_port = odp_port_to_ofp_port(dpif_port->port_no);
2287 port_run(struct ofport_dpif *ofport)
2289 long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2290 bool carrier_changed = carrier_seq != ofport->carrier_seq;
2291 bool enable = netdev_get_carrier(ofport->up.netdev);
2293 ofport->carrier_seq = carrier_seq;
2296 cfm_run(ofport->cfm);
2298 if (cfm_should_send_ccm(ofport->cfm)) {
2299 struct ofpbuf packet;
2301 ofpbuf_init(&packet, 0);
2302 cfm_compose_ccm(ofport->cfm, &packet, ofport->up.pp.hw_addr);
2303 send_packet(ofport, &packet);
2304 ofpbuf_uninit(&packet);
2307 enable = enable && !cfm_get_fault(ofport->cfm)
2308 && cfm_get_opup(ofport->cfm);
2311 if (ofport->bundle) {
2312 enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
2313 if (carrier_changed) {
2314 lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
2318 if (ofport->may_enable != enable) {
2319 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2321 if (ofproto->has_bundle_action) {
2322 ofproto->need_revalidate = true;
2326 ofport->may_enable = enable;
2330 port_wait(struct ofport_dpif *ofport)
2333 cfm_wait(ofport->cfm);
2338 port_query_by_name(const struct ofproto *ofproto_, const char *devname,
2339 struct ofproto_port *ofproto_port)
2341 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2342 struct dpif_port dpif_port;
2345 error = dpif_port_query_by_name(ofproto->dpif, devname, &dpif_port);
2347 ofproto_port_from_dpif_port(ofproto_port, &dpif_port);
2353 port_add(struct ofproto *ofproto_, struct netdev *netdev, uint16_t *ofp_portp)
2355 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2359 error = dpif_port_add(ofproto->dpif, netdev, &odp_port);
2361 *ofp_portp = odp_port_to_ofp_port(odp_port);
2367 port_del(struct ofproto *ofproto_, uint16_t ofp_port)
2369 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2372 error = dpif_port_del(ofproto->dpif, ofp_port_to_odp_port(ofp_port));
2374 struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
2376 /* The caller is going to close ofport->up.netdev. If this is a
2377 * bonded port, then the bond is using that netdev, so remove it
2378 * from the bond. The client will need to reconfigure everything
2379 * after deleting ports, so then the slave will get re-added. */
2380 bundle_remove(&ofport->up);
2387 port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
2389 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2392 error = netdev_get_stats(ofport->up.netdev, stats);
2394 if (!error && ofport->odp_port == OVSP_LOCAL) {
2395 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2397 /* ofproto->stats.tx_packets represents packets that we created
2398 * internally and sent to some port (e.g. packets sent with
2399 * send_packet()). Account for them as if they had come from
2400 * OFPP_LOCAL and got forwarded. */
2402 if (stats->rx_packets != UINT64_MAX) {
2403 stats->rx_packets += ofproto->stats.tx_packets;
2406 if (stats->rx_bytes != UINT64_MAX) {
2407 stats->rx_bytes += ofproto->stats.tx_bytes;
2410 /* ofproto->stats.rx_packets represents packets that were received on
2411 * some port and we processed internally and dropped (e.g. STP).
2412 * Account fro them as if they had been forwarded to OFPP_LOCAL. */
2414 if (stats->tx_packets != UINT64_MAX) {
2415 stats->tx_packets += ofproto->stats.rx_packets;
2418 if (stats->tx_bytes != UINT64_MAX) {
2419 stats->tx_bytes += ofproto->stats.rx_bytes;
2426 /* Account packets for LOCAL port. */
2428 ofproto_update_local_port_stats(const struct ofproto *ofproto_,
2429 size_t tx_size, size_t rx_size)
2431 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2434 ofproto->stats.rx_packets++;
2435 ofproto->stats.rx_bytes += rx_size;
2438 ofproto->stats.tx_packets++;
2439 ofproto->stats.tx_bytes += tx_size;
2443 struct port_dump_state {
2444 struct dpif_port_dump dump;
2449 port_dump_start(const struct ofproto *ofproto_, void **statep)
2451 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2452 struct port_dump_state *state;
2454 *statep = state = xmalloc(sizeof *state);
2455 dpif_port_dump_start(&state->dump, ofproto->dpif);
2456 state->done = false;
2461 port_dump_next(const struct ofproto *ofproto_ OVS_UNUSED, void *state_,
2462 struct ofproto_port *port)
2464 struct port_dump_state *state = state_;
2465 struct dpif_port dpif_port;
2467 if (dpif_port_dump_next(&state->dump, &dpif_port)) {
2468 ofproto_port_from_dpif_port(port, &dpif_port);
2471 int error = dpif_port_dump_done(&state->dump);
2473 return error ? error : EOF;
2478 port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
2480 struct port_dump_state *state = state_;
2483 dpif_port_dump_done(&state->dump);
2490 port_poll(const struct ofproto *ofproto_, char **devnamep)
2492 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2493 return dpif_port_poll(ofproto->dpif, devnamep);
2497 port_poll_wait(const struct ofproto *ofproto_)
2499 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2500 dpif_port_poll_wait(ofproto->dpif);
2504 port_is_lacp_current(const struct ofport *ofport_)
2506 const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2507 return (ofport->bundle && ofport->bundle->lacp
2508 ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
2512 /* Upcall handling. */
2514 /* Flow miss batching.
2516 * Some dpifs implement operations faster when you hand them off in a batch.
2517 * To allow batching, "struct flow_miss" queues the dpif-related work needed
2518 * for a given flow. Each "struct flow_miss" corresponds to sending one or
2519 * more packets, plus possibly installing the flow in the dpif.
2521 * So far we only batch the operations that affect flow setup time the most.
2522 * It's possible to batch more than that, but the benefit might be minimal. */
2524 struct hmap_node hmap_node;
2526 enum odp_key_fitness key_fitness;
2527 const struct nlattr *key;
2529 ovs_be16 initial_tci;
2530 struct list packets;
2533 struct flow_miss_op {
2534 struct dpif_op dpif_op;
2535 struct subfacet *subfacet; /* Subfacet */
2536 void *garbage; /* Pointer to pass to free(), NULL if none. */
2537 uint64_t stub[1024 / 8]; /* Temporary buffer. */
2540 /* Sends an OFPT_PACKET_IN message for 'packet' of type OFPR_NO_MATCH to each
2541 * OpenFlow controller as necessary according to their individual
2542 * configurations. */
2544 send_packet_in_miss(struct ofproto_dpif *ofproto, const struct ofpbuf *packet,
2545 const struct flow *flow)
2547 struct ofputil_packet_in pin;
2549 pin.packet = packet->data;
2550 pin.packet_len = packet->size;
2551 pin.reason = OFPR_NO_MATCH;
2552 pin.controller_id = 0;
2557 pin.send_len = 0; /* not used for flow table misses */
2559 flow_get_metadata(flow, &pin.fmd);
2561 /* Registers aren't meaningful on a miss. */
2562 memset(pin.fmd.reg_masks, 0, sizeof pin.fmd.reg_masks);
2564 connmgr_send_packet_in(ofproto->up.connmgr, &pin);
2568 process_special(struct ofproto_dpif *ofproto, const struct flow *flow,
2569 const struct ofpbuf *packet)
2571 struct ofport_dpif *ofport = get_ofp_port(ofproto, flow->in_port);
2577 if (ofport->cfm && cfm_should_process_flow(ofport->cfm, flow)) {
2579 cfm_process_heartbeat(ofport->cfm, packet);
2582 } else if (ofport->bundle && ofport->bundle->lacp
2583 && flow->dl_type == htons(ETH_TYPE_LACP)) {
2585 lacp_process_packet(ofport->bundle->lacp, ofport, packet);
2588 } else if (ofproto->stp && stp_should_process_flow(flow)) {
2590 stp_process_packet(ofport, packet);
2597 static struct flow_miss *
2598 flow_miss_find(struct hmap *todo, const struct flow *flow, uint32_t hash)
2600 struct flow_miss *miss;
2602 HMAP_FOR_EACH_WITH_HASH (miss, hmap_node, hash, todo) {
2603 if (flow_equal(&miss->flow, flow)) {
2611 /* Partially Initializes 'op' as an "execute" operation for 'miss' and
2612 * 'packet'. The caller must initialize op->actions and op->actions_len. If
2613 * 'miss' is associated with a subfacet the caller must also initialize the
2614 * returned op->subfacet, and if anything needs to be freed after processing
2615 * the op, the caller must initialize op->garbage also. */
2617 init_flow_miss_execute_op(struct flow_miss *miss, struct ofpbuf *packet,
2618 struct flow_miss_op *op)
2620 if (miss->flow.vlan_tci != miss->initial_tci) {
2621 /* This packet was received on a VLAN splinter port. We
2622 * added a VLAN to the packet to make the packet resemble
2623 * the flow, but the actions were composed assuming that
2624 * the packet contained no VLAN. So, we must remove the
2625 * VLAN header from the packet before trying to execute the
2627 eth_pop_vlan(packet);
2630 op->subfacet = NULL;
2632 op->dpif_op.type = DPIF_OP_EXECUTE;
2633 op->dpif_op.u.execute.key = miss->key;
2634 op->dpif_op.u.execute.key_len = miss->key_len;
2635 op->dpif_op.u.execute.packet = packet;
2638 /* Helper for handle_flow_miss_without_facet() and
2639 * handle_flow_miss_with_facet(). */
2641 handle_flow_miss_common(struct rule_dpif *rule,
2642 struct ofpbuf *packet, const struct flow *flow)
2644 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2646 ofproto->n_matches++;
2648 if (rule->up.cr.priority == FAIL_OPEN_PRIORITY) {
2650 * Extra-special case for fail-open mode.
2652 * We are in fail-open mode and the packet matched the fail-open
2653 * rule, but we are connected to a controller too. We should send
2654 * the packet up to the controller in the hope that it will try to
2655 * set up a flow and thereby allow us to exit fail-open.
2657 * See the top-level comment in fail-open.c for more information.
2659 send_packet_in_miss(ofproto, packet, flow);
2663 /* Figures out whether a flow that missed in 'ofproto', whose details are in
2664 * 'miss', is likely to be worth tracking in detail in userspace and (usually)
2665 * installing a datapath flow. The answer is usually "yes" (a return value of
2666 * true). However, for short flows the cost of bookkeeping is much higher than
2667 * the benefits, so when the datapath holds a large number of flows we impose
2668 * some heuristics to decide which flows are likely to be worth tracking. */
2670 flow_miss_should_make_facet(struct ofproto_dpif *ofproto,
2671 struct flow_miss *miss, uint32_t hash)
2673 if (!ofproto->governor) {
2676 n_subfacets = hmap_count(&ofproto->subfacets);
2677 if (n_subfacets * 2 <= ofproto->up.flow_eviction_threshold) {
2681 ofproto->governor = governor_create(ofproto->up.name);
2684 return governor_should_install_flow(ofproto->governor, hash,
2685 list_size(&miss->packets));
2688 /* Handles 'miss', which matches 'rule', without creating a facet or subfacet
2689 * or creating any datapath flow. May add an "execute" operation to 'ops' and
2690 * increment '*n_ops'. */
2692 handle_flow_miss_without_facet(struct flow_miss *miss,
2693 struct rule_dpif *rule,
2694 struct flow_miss_op *ops, size_t *n_ops)
2696 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
2697 struct action_xlate_ctx ctx;
2698 struct ofpbuf *packet;
2700 LIST_FOR_EACH (packet, list_node, &miss->packets) {
2701 struct flow_miss_op *op = &ops[*n_ops];
2702 struct dpif_flow_stats stats;
2703 struct ofpbuf odp_actions;
2705 COVERAGE_INC(facet_suppress);
2707 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
2709 dpif_flow_stats_extract(&miss->flow, packet, &stats);
2710 rule_credit_stats(rule, &stats);
2712 action_xlate_ctx_init(&ctx, ofproto, &miss->flow, miss->initial_tci,
2714 ctx.resubmit_stats = &stats;
2715 xlate_actions(&ctx, rule->up.actions, rule->up.n_actions,
2718 if (odp_actions.size) {
2719 struct dpif_execute *execute = &op->dpif_op.u.execute;
2721 init_flow_miss_execute_op(miss, packet, op);
2722 execute->actions = odp_actions.data;
2723 execute->actions_len = odp_actions.size;
2724 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
2728 ofpbuf_uninit(&odp_actions);
2733 /* Handles 'miss', which matches 'facet'. May add any required datapath
2734 * operations to 'ops', incrementing '*n_ops' for each new op. */
2736 handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
2737 struct flow_miss_op *ops, size_t *n_ops)
2739 struct subfacet *subfacet;
2740 struct ofpbuf *packet;
2742 subfacet = subfacet_create(facet,
2743 miss->key_fitness, miss->key, miss->key_len,
2746 LIST_FOR_EACH (packet, list_node, &miss->packets) {
2747 struct flow_miss_op *op = &ops[*n_ops];
2748 struct dpif_flow_stats stats;
2749 struct ofpbuf odp_actions;
2751 handle_flow_miss_common(facet->rule, packet, &miss->flow);
2753 ofpbuf_use_stub(&odp_actions, op->stub, sizeof op->stub);
2754 if (!facet->may_install || !subfacet->actions) {
2755 subfacet_make_actions(subfacet, packet, &odp_actions);
2758 dpif_flow_stats_extract(&facet->flow, packet, &stats);
2759 subfacet_update_stats(subfacet, &stats);
2761 if (subfacet->actions_len) {
2762 struct dpif_execute *execute = &op->dpif_op.u.execute;
2764 init_flow_miss_execute_op(miss, packet, op);
2765 op->subfacet = subfacet;
2766 if (facet->may_install) {
2767 execute->actions = subfacet->actions;
2768 execute->actions_len = subfacet->actions_len;
2769 ofpbuf_uninit(&odp_actions);
2771 execute->actions = odp_actions.data;
2772 execute->actions_len = odp_actions.size;
2773 op->garbage = ofpbuf_get_uninit_pointer(&odp_actions);
2778 ofpbuf_uninit(&odp_actions);
2782 if (facet->may_install && subfacet->key_fitness != ODP_FIT_TOO_LITTLE) {
2783 struct flow_miss_op *op = &ops[(*n_ops)++];
2784 struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
2786 op->subfacet = subfacet;
2788 op->dpif_op.type = DPIF_OP_FLOW_PUT;
2789 put->flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
2790 put->key = miss->key;
2791 put->key_len = miss->key_len;
2792 put->actions = subfacet->actions;
2793 put->actions_len = subfacet->actions_len;
2798 /* Handles flow miss 'miss' on 'ofproto'. The flow does not match any flow in
2799 * the OpenFlow flow table. */
2801 handle_flow_miss_no_rule(struct ofproto_dpif *ofproto, struct flow_miss *miss)
2803 uint16_t in_port = miss->flow.in_port;
2804 struct ofport_dpif *port = get_ofp_port(ofproto, in_port);
2807 VLOG_WARN_RL(&rl, "packet-in on unknown port %"PRIu16, in_port);
2810 if (port && port->up.pp.config & OFPUTIL_PC_NO_PACKET_IN) {
2811 /* XXX install 'drop' flow entry */
2812 COVERAGE_INC(ofproto_dpif_no_packet_in);
2814 const struct ofpbuf *packet;
2816 LIST_FOR_EACH (packet, list_node, &miss->packets) {
2817 send_packet_in_miss(ofproto, packet, &miss->flow);
2822 /* Handles flow miss 'miss' on 'ofproto'. May add any required datapath
2823 * operations to 'ops', incrementing '*n_ops' for each new op. */
2825 handle_flow_miss(struct ofproto_dpif *ofproto, struct flow_miss *miss,
2826 struct flow_miss_op *ops, size_t *n_ops)
2828 struct facet *facet;
2831 /* The caller must ensure that miss->hmap_node.hash contains
2832 * flow_hash(miss->flow, 0). */
2833 hash = miss->hmap_node.hash;
2835 facet = facet_lookup_valid(ofproto, &miss->flow, hash);
2837 struct rule_dpif *rule = rule_dpif_lookup(ofproto, &miss->flow, 0);
2839 handle_flow_miss_no_rule(ofproto, miss);
2841 } else if (!flow_miss_should_make_facet(ofproto, miss, hash)) {
2842 handle_flow_miss_without_facet(miss, rule, ops, n_ops);
2846 facet = facet_create(rule, &miss->flow, hash);
2848 handle_flow_miss_with_facet(miss, facet, ops, n_ops);
2851 /* Like odp_flow_key_to_flow(), this function converts the 'key_len' bytes of
2852 * OVS_KEY_ATTR_* attributes in 'key' to a flow structure in 'flow' and returns
2853 * an ODP_FIT_* value that indicates how well 'key' fits our expectations for
2854 * what a flow key should contain.
2856 * This function also includes some logic to help make VLAN splinters
2857 * transparent to the rest of the upcall processing logic. In particular, if
2858 * the extracted in_port is a VLAN splinter port, it replaces flow->in_port by
2859 * the "real" port, sets flow->vlan_tci correctly for the VLAN of the VLAN
2860 * splinter port, and pushes a VLAN header onto 'packet' (if it is nonnull).
2862 * Sets '*initial_tci' to the VLAN TCI with which the packet was really
2863 * received, that is, the actual VLAN TCI extracted by odp_flow_key_to_flow().
2864 * (This differs from the value returned in flow->vlan_tci only for packets
2865 * received on VLAN splinters.)
2867 static enum odp_key_fitness
2868 ofproto_dpif_extract_flow_key(const struct ofproto_dpif *ofproto,
2869 const struct nlattr *key, size_t key_len,
2870 struct flow *flow, ovs_be16 *initial_tci,
2871 struct ofpbuf *packet)
2873 enum odp_key_fitness fitness;
2875 fitness = odp_flow_key_to_flow(key, key_len, flow);
2876 if (fitness == ODP_FIT_ERROR) {
2879 *initial_tci = flow->vlan_tci;
2881 if (vsp_adjust_flow(ofproto, flow)) {
2883 /* Make the packet resemble the flow, so that it gets sent to an
2884 * OpenFlow controller properly, so that it looks correct for
2885 * sFlow, and so that flow_extract() will get the correct vlan_tci
2886 * if it is called on 'packet'.
2888 * The allocated space inside 'packet' probably also contains
2889 * 'key', that is, both 'packet' and 'key' are probably part of a
2890 * struct dpif_upcall (see the large comment on that structure
2891 * definition), so pushing data on 'packet' is in general not a
2892 * good idea since it could overwrite 'key' or free it as a side
2893 * effect. However, it's OK in this special case because we know
2894 * that 'packet' is inside a Netlink attribute: pushing 4 bytes
2895 * will just overwrite the 4-byte "struct nlattr", which is fine
2896 * since we don't need that header anymore. */
2897 eth_push_vlan(packet, flow->vlan_tci);
2900 /* Let the caller know that we can't reproduce 'key' from 'flow'. */
2901 if (fitness == ODP_FIT_PERFECT) {
2902 fitness = ODP_FIT_TOO_MUCH;
2910 handle_miss_upcalls(struct ofproto_dpif *ofproto, struct dpif_upcall *upcalls,
2913 struct dpif_upcall *upcall;
2914 struct flow_miss *miss;
2915 struct flow_miss misses[FLOW_MISS_MAX_BATCH];
2916 struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH * 2];
2917 struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH * 2];
2927 /* Construct the to-do list.
2929 * This just amounts to extracting the flow from each packet and sticking
2930 * the packets that have the same flow in the same "flow_miss" structure so
2931 * that we can process them together. */
2934 for (upcall = upcalls; upcall < &upcalls[n_upcalls]; upcall++) {
2935 struct flow_miss *miss = &misses[n_misses];
2936 struct flow_miss *existing_miss;
2939 /* Obtain metadata and check userspace/kernel agreement on flow match,
2940 * then set 'flow''s header pointers. */
2941 miss->key_fitness = ofproto_dpif_extract_flow_key(
2942 ofproto, upcall->key, upcall->key_len,
2943 &miss->flow, &miss->initial_tci, upcall->packet);
2944 if (miss->key_fitness == ODP_FIT_ERROR) {
2947 flow_extract(upcall->packet, miss->flow.skb_priority,
2948 miss->flow.tun_id, miss->flow.in_port, &miss->flow);
2950 /* Handle 802.1ag, LACP, and STP specially. */
2951 if (process_special(ofproto, &miss->flow, upcall->packet)) {
2952 ofproto_update_local_port_stats(&ofproto->up,
2953 0, upcall->packet->size);
2954 ofproto->n_matches++;
2958 /* Add other packets to a to-do list. */
2959 hash = flow_hash(&miss->flow, 0);
2960 existing_miss = flow_miss_find(&todo, &miss->flow, hash);
2961 if (!existing_miss) {
2962 hmap_insert(&todo, &miss->hmap_node, hash);
2963 miss->key = upcall->key;
2964 miss->key_len = upcall->key_len;
2965 list_init(&miss->packets);
2969 miss = existing_miss;
2971 list_push_back(&miss->packets, &upcall->packet->list_node);
2974 /* Process each element in the to-do list, constructing the set of
2975 * operations to batch. */
2977 HMAP_FOR_EACH (miss, hmap_node, &todo) {
2978 handle_flow_miss(ofproto, miss, flow_miss_ops, &n_ops);
2980 assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
2982 /* Execute batch. */
2983 for (i = 0; i < n_ops; i++) {
2984 dpif_ops[i] = &flow_miss_ops[i].dpif_op;
2986 dpif_operate(ofproto->dpif, dpif_ops, n_ops);
2988 /* Free memory and update facets. */
2989 for (i = 0; i < n_ops; i++) {
2990 struct flow_miss_op *op = &flow_miss_ops[i];
2992 switch (op->dpif_op.type) {
2993 case DPIF_OP_EXECUTE:
2996 case DPIF_OP_FLOW_PUT:
2997 if (!op->dpif_op.error) {
2998 op->subfacet->installed = true;
3002 case DPIF_OP_FLOW_DEL:
3008 hmap_destroy(&todo);
3012 handle_userspace_upcall(struct ofproto_dpif *ofproto,
3013 struct dpif_upcall *upcall)
3015 struct user_action_cookie cookie;
3016 enum odp_key_fitness fitness;
3017 ovs_be16 initial_tci;
3020 memcpy(&cookie, &upcall->userdata, sizeof(cookie));
3022 fitness = ofproto_dpif_extract_flow_key(ofproto, upcall->key,
3023 upcall->key_len, &flow,
3024 &initial_tci, upcall->packet);
3025 if (fitness == ODP_FIT_ERROR) {
3029 if (cookie.type == USER_ACTION_COOKIE_SFLOW) {
3030 if (ofproto->sflow) {
3031 dpif_sflow_received(ofproto->sflow, upcall->packet, &flow,
3035 VLOG_WARN_RL(&rl, "invalid user cookie : 0x%"PRIx64, upcall->userdata);
3040 handle_upcalls(struct ofproto_dpif *ofproto, unsigned int max_batch)
3042 struct dpif_upcall misses[FLOW_MISS_MAX_BATCH];
3043 struct ofpbuf miss_bufs[FLOW_MISS_MAX_BATCH];
3044 uint64_t miss_buf_stubs[FLOW_MISS_MAX_BATCH][4096 / 8];
3049 assert(max_batch <= FLOW_MISS_MAX_BATCH);
3053 for (n_processed = 0; n_processed < max_batch; n_processed++) {
3054 struct dpif_upcall *upcall = &misses[n_misses];
3055 struct ofpbuf *buf = &miss_bufs[n_misses];
3058 ofpbuf_use_stub(buf, miss_buf_stubs[n_misses],
3059 sizeof miss_buf_stubs[n_misses]);
3060 error = dpif_recv(ofproto->dpif, upcall, buf);
3066 switch (upcall->type) {
3067 case DPIF_UC_ACTION:
3068 handle_userspace_upcall(ofproto, upcall);
3073 /* Handle it later. */
3077 case DPIF_N_UC_TYPES:
3079 VLOG_WARN_RL(&rl, "upcall has unexpected type %"PRIu32,
3085 handle_miss_upcalls(ofproto, misses, n_misses);
3086 for (i = 0; i < n_misses; i++) {
3087 ofpbuf_uninit(&miss_bufs[i]);
3093 /* Flow expiration. */
3095 static int subfacet_max_idle(const struct ofproto_dpif *);
3096 static void update_stats(struct ofproto_dpif *);
3097 static void rule_expire(struct rule_dpif *);
3098 static void expire_subfacets(struct ofproto_dpif *, int dp_max_idle);
3100 /* This function is called periodically by run(). Its job is to collect
3101 * updates for the flows that have been installed into the datapath, most
3102 * importantly when they last were used, and then use that information to
3103 * expire flows that have not been used recently.
3105 * Returns the number of milliseconds after which it should be called again. */
3107 expire(struct ofproto_dpif *ofproto)
3109 struct rule_dpif *rule, *next_rule;
3110 struct oftable *table;
3113 /* Update stats for each flow in the datapath. */
3114 update_stats(ofproto);
3116 /* Expire subfacets that have been idle too long. */
3117 dp_max_idle = subfacet_max_idle(ofproto);
3118 expire_subfacets(ofproto, dp_max_idle);
3120 /* Expire OpenFlow flows whose idle_timeout or hard_timeout has passed. */
3121 OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
3122 struct cls_cursor cursor;
3124 cls_cursor_init(&cursor, &table->cls, NULL);
3125 CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
3130 /* All outstanding data in existing flows has been accounted, so it's a
3131 * good time to do bond rebalancing. */
3132 if (ofproto->has_bonded_bundles) {
3133 struct ofbundle *bundle;
3135 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
3137 bond_rebalance(bundle->bond, &ofproto->revalidate_set);
3142 return MIN(dp_max_idle, 1000);
3145 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
3147 * This function also pushes statistics updates to rules which each facet
3148 * resubmits into. Generally these statistics will be accurate. However, if a
3149 * facet changes the rule it resubmits into at some time in between
3150 * update_stats() runs, it is possible that statistics accrued to the
3151 * old rule will be incorrectly attributed to the new rule. This could be
3152 * avoided by calling update_stats() whenever rules are created or
3153 * deleted. However, the performance impact of making so many calls to the
3154 * datapath do not justify the benefit of having perfectly accurate statistics.
3157 update_stats(struct ofproto_dpif *p)
3159 const struct dpif_flow_stats *stats;
3160 struct dpif_flow_dump dump;
3161 const struct nlattr *key;
3164 dpif_flow_dump_start(&dump, p->dpif);
3165 while (dpif_flow_dump_next(&dump, &key, &key_len, NULL, NULL, &stats)) {
3166 struct subfacet *subfacet;
3168 subfacet = subfacet_find(p, key, key_len);
3169 if (subfacet && subfacet->installed) {
3170 struct facet *facet = subfacet->facet;
3172 if (stats->n_packets >= subfacet->dp_packet_count) {
3173 uint64_t extra = stats->n_packets - subfacet->dp_packet_count;
3174 facet->packet_count += extra;
3176 VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
3179 if (stats->n_bytes >= subfacet->dp_byte_count) {
3180 facet->byte_count += stats->n_bytes - subfacet->dp_byte_count;
3182 VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
3185 subfacet->dp_packet_count = stats->n_packets;
3186 subfacet->dp_byte_count = stats->n_bytes;
3188 facet->tcp_flags |= stats->tcp_flags;
3190 subfacet_update_time(subfacet, stats->used);
3191 if (facet->accounted_bytes < facet->byte_count) {
3193 facet_account(facet);
3194 facet->accounted_bytes = facet->byte_count;
3196 facet_push_stats(facet);
3198 if (!VLOG_DROP_WARN(&rl)) {
3202 odp_flow_key_format(key, key_len, &s);
3203 VLOG_WARN("unexpected flow from datapath %s", ds_cstr(&s));
3207 COVERAGE_INC(facet_unexpected);
3208 /* There's a flow in the datapath that we know nothing about, or a
3209 * flow that shouldn't be installed but was anyway. Delete it. */
3210 dpif_flow_del(p->dpif, key, key_len, NULL);
3213 dpif_flow_dump_done(&dump);
3216 /* Calculates and returns the number of milliseconds of idle time after which
3217 * subfacets should expire from the datapath. When a subfacet expires, we fold
3218 * its statistics into its facet, and when a facet's last subfacet expires, we
3219 * fold its statistic into its rule. */
3221 subfacet_max_idle(const struct ofproto_dpif *ofproto)
3224 * Idle time histogram.
3226 * Most of the time a switch has a relatively small number of subfacets.
3227 * When this is the case we might as well keep statistics for all of them
3228 * in userspace and to cache them in the kernel datapath for performance as
3231 * As the number of subfacets increases, the memory required to maintain
3232 * statistics about them in userspace and in the kernel becomes
3233 * significant. However, with a large number of subfacets it is likely
3234 * that only a few of them are "heavy hitters" that consume a large amount
3235 * of bandwidth. At this point, only heavy hitters are worth caching in
3236 * the kernel and maintaining in userspaces; other subfacets we can
3239 * The technique used to compute the idle time is to build a histogram with
3240 * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each. Each subfacet
3241 * that is installed in the kernel gets dropped in the appropriate bucket.
3242 * After the histogram has been built, we compute the cutoff so that only
3243 * the most-recently-used 1% of subfacets (but at least
3244 * ofproto->up.flow_eviction_threshold flows) are kept cached. At least
3245 * the most-recently-used bucket of subfacets is kept, so actually an
3246 * arbitrary number of subfacets can be kept in any given expiration run
3247 * (though the next run will delete most of those unless they receive
3250 * This requires a second pass through the subfacets, in addition to the
3251 * pass made by update_stats(), because the former function never looks at
3252 * uninstallable subfacets.
3254 enum { BUCKET_WIDTH = ROUND_UP(100, TIME_UPDATE_INTERVAL) };
3255 enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
3256 int buckets[N_BUCKETS] = { 0 };
3257 int total, subtotal, bucket;
3258 struct subfacet *subfacet;
3262 total = hmap_count(&ofproto->subfacets);
3263 if (total <= ofproto->up.flow_eviction_threshold) {
3264 return N_BUCKETS * BUCKET_WIDTH;
3267 /* Build histogram. */
3269 HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->subfacets) {
3270 long long int idle = now - subfacet->used;
3271 int bucket = (idle <= 0 ? 0
3272 : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
3273 : (unsigned int) idle / BUCKET_WIDTH);
3277 /* Find the first bucket whose flows should be expired. */
3278 subtotal = bucket = 0;
3280 subtotal += buckets[bucket++];
3281 } while (bucket < N_BUCKETS &&
3282 subtotal < MAX(ofproto->up.flow_eviction_threshold, total / 100));
3284 if (VLOG_IS_DBG_ENABLED()) {
3288 ds_put_cstr(&s, "keep");
3289 for (i = 0; i < N_BUCKETS; i++) {
3291 ds_put_cstr(&s, ", drop");
3294 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
3297 VLOG_INFO("%s: %s (msec:count)", ofproto->up.name, ds_cstr(&s));
3301 return bucket * BUCKET_WIDTH;
3304 enum { EXPIRE_MAX_BATCH = 50 };
3307 expire_batch(struct ofproto_dpif *ofproto, struct subfacet **subfacets, int n)
3309 struct odputil_keybuf keybufs[EXPIRE_MAX_BATCH];
3310 struct dpif_op ops[EXPIRE_MAX_BATCH];
3311 struct dpif_op *opsp[EXPIRE_MAX_BATCH];
3312 struct ofpbuf keys[EXPIRE_MAX_BATCH];
3313 struct dpif_flow_stats stats[EXPIRE_MAX_BATCH];
3316 for (i = 0; i < n; i++) {
3317 ops[i].type = DPIF_OP_FLOW_DEL;
3318 subfacet_get_key(subfacets[i], &keybufs[i], &keys[i]);
3319 ops[i].u.flow_del.key = keys[i].data;
3320 ops[i].u.flow_del.key_len = keys[i].size;
3321 ops[i].u.flow_del.stats = &stats[i];
3325 dpif_operate(ofproto->dpif, opsp, n);
3326 for (i = 0; i < n; i++) {
3327 subfacet_reset_dp_stats(subfacets[i], &stats[i]);
3328 subfacets[i]->installed = false;
3329 subfacet_destroy(subfacets[i]);
3334 expire_subfacets(struct ofproto_dpif *ofproto, int dp_max_idle)
3336 long long int cutoff = time_msec() - dp_max_idle;
3338 struct subfacet *subfacet, *next_subfacet;
3339 struct subfacet *batch[EXPIRE_MAX_BATCH];
3343 HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
3344 &ofproto->subfacets) {
3345 if (subfacet->used < cutoff) {
3346 if (subfacet->installed) {
3347 batch[n_batch++] = subfacet;
3348 if (n_batch >= EXPIRE_MAX_BATCH) {
3349 expire_batch(ofproto, batch, n_batch);
3353 subfacet_destroy(subfacet);
3359 expire_batch(ofproto, batch, n_batch);
3363 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3364 * then delete it entirely. */
3366 rule_expire(struct rule_dpif *rule)
3368 struct facet *facet, *next_facet;
3372 /* Has 'rule' expired? */
3374 if (rule->up.hard_timeout
3375 && now > rule->up.modified + rule->up.hard_timeout * 1000) {
3376 reason = OFPRR_HARD_TIMEOUT;
3377 } else if (rule->up.idle_timeout
3378 && now > rule->up.used + rule->up.idle_timeout * 1000) {
3379 reason = OFPRR_IDLE_TIMEOUT;
3384 COVERAGE_INC(ofproto_dpif_expired);
3386 /* Update stats. (This is a no-op if the rule expired due to an idle
3387 * timeout, because that only happens when the rule has no facets left.) */
3388 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
3389 facet_remove(facet);
3392 /* Get rid of the rule. */
3393 ofproto_rule_expire(&rule->up, reason);
3398 /* Creates and returns a new facet owned by 'rule', given a 'flow'.
3400 * The caller must already have determined that no facet with an identical
3401 * 'flow' exists in 'ofproto' and that 'flow' is the best match for 'rule' in
3402 * the ofproto's classifier table.
3404 * 'hash' must be the return value of flow_hash(flow, 0).
3406 * The facet will initially have no subfacets. The caller should create (at
3407 * least) one subfacet with subfacet_create(). */
3408 static struct facet *
3409 facet_create(struct rule_dpif *rule, const struct flow *flow, uint32_t hash)
3411 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3412 struct facet *facet;
3414 facet = xzalloc(sizeof *facet);
3415 facet->used = time_msec();
3416 hmap_insert(&ofproto->facets, &facet->hmap_node, hash);
3417 list_push_back(&rule->facets, &facet->list_node);
3419 facet->flow = *flow;
3420 list_init(&facet->subfacets);
3421 netflow_flow_init(&facet->nf_flow);
3422 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
3428 facet_free(struct facet *facet)
3433 /* Executes, within 'ofproto', the 'n_actions' actions in 'actions' on
3434 * 'packet', which arrived on 'in_port'.
3436 * Takes ownership of 'packet'. */
3438 execute_odp_actions(struct ofproto_dpif *ofproto, const struct flow *flow,
3439 const struct nlattr *odp_actions, size_t actions_len,
3440 struct ofpbuf *packet)
3442 struct odputil_keybuf keybuf;
3446 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
3447 odp_flow_key_from_flow(&key, flow);
3449 error = dpif_execute(ofproto->dpif, key.data, key.size,
3450 odp_actions, actions_len, packet);
3452 ofpbuf_delete(packet);
3456 /* Remove 'facet' from 'ofproto' and free up the associated memory:
3458 * - If 'facet' was installed in the datapath, uninstalls it and updates its
3459 * rule's statistics, via subfacet_uninstall().
3461 * - Removes 'facet' from its rule and from ofproto->facets.
3464 facet_remove(struct facet *facet)
3466 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3467 struct subfacet *subfacet, *next_subfacet;
3469 assert(!list_is_empty(&facet->subfacets));
3471 /* First uninstall all of the subfacets to get final statistics. */
3472 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3473 subfacet_uninstall(subfacet);
3476 /* Flush the final stats to the rule.
3478 * This might require us to have at least one subfacet around so that we
3479 * can use its actions for accounting in facet_account(), which is why we
3480 * have uninstalled but not yet destroyed the subfacets. */
3481 facet_flush_stats(facet);
3483 /* Now we're really all done so destroy everything. */
3484 LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
3485 &facet->subfacets) {
3486 subfacet_destroy__(subfacet);
3488 hmap_remove(&ofproto->facets, &facet->hmap_node);
3489 list_remove(&facet->list_node);
3493 /* Feed information from 'facet' back into the learning table to keep it in
3494 * sync with what is actually flowing through the datapath. */
3496 facet_learn(struct facet *facet)
3498 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3499 struct action_xlate_ctx ctx;
3501 if (!facet->has_learn
3502 && !facet->has_normal
3503 && (!facet->has_fin_timeout
3504 || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
3508 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
3509 facet->flow.vlan_tci,
3510 facet->rule, facet->tcp_flags, NULL);
3511 ctx.may_learn = true;
3512 xlate_actions_for_side_effects(&ctx, facet->rule->up.actions,
3513 facet->rule->up.n_actions);
3517 facet_account(struct facet *facet)
3519 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3520 struct subfacet *subfacet;
3521 const struct nlattr *a;
3526 if (!facet->has_normal || !ofproto->has_bonded_bundles) {
3529 n_bytes = facet->byte_count - facet->accounted_bytes;
3531 /* This loop feeds byte counters to bond_account() for rebalancing to use
3532 * as a basis. We also need to track the actual VLAN on which the packet
3533 * is going to be sent to ensure that it matches the one passed to
3534 * bond_choose_output_slave(). (Otherwise, we will account to the wrong
3537 * We use the actions from an arbitrary subfacet because they should all
3538 * be equally valid for our purpose. */
3539 subfacet = CONTAINER_OF(list_front(&facet->subfacets),
3540 struct subfacet, list_node);
3541 vlan_tci = facet->flow.vlan_tci;
3542 NL_ATTR_FOR_EACH_UNSAFE (a, left,
3543 subfacet->actions, subfacet->actions_len) {
3544 const struct ovs_action_push_vlan *vlan;
3545 struct ofport_dpif *port;
3547 switch (nl_attr_type(a)) {
3548 case OVS_ACTION_ATTR_OUTPUT:
3549 port = get_odp_port(ofproto, nl_attr_get_u32(a));
3550 if (port && port->bundle && port->bundle->bond) {
3551 bond_account(port->bundle->bond, &facet->flow,
3552 vlan_tci_to_vid(vlan_tci), n_bytes);
3556 case OVS_ACTION_ATTR_POP_VLAN:
3557 vlan_tci = htons(0);
3560 case OVS_ACTION_ATTR_PUSH_VLAN:
3561 vlan = nl_attr_get(a);
3562 vlan_tci = vlan->vlan_tci;
3568 /* Returns true if the only action for 'facet' is to send to the controller.
3569 * (We don't report NetFlow expiration messages for such facets because they
3570 * are just part of the control logic for the network, not real traffic). */
3572 facet_is_controller_flow(struct facet *facet)
3575 && facet->rule->up.n_actions == 1
3576 && action_outputs_to_port(&facet->rule->up.actions[0],
3577 htons(OFPP_CONTROLLER)));
3580 /* Folds all of 'facet''s statistics into its rule. Also updates the
3581 * accounting ofhook and emits a NetFlow expiration if appropriate. All of
3582 * 'facet''s statistics in the datapath should have been zeroed and folded into
3583 * its packet and byte counts before this function is called. */
3585 facet_flush_stats(struct facet *facet)
3587 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3588 struct subfacet *subfacet;
3590 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3591 assert(!subfacet->dp_byte_count);
3592 assert(!subfacet->dp_packet_count);
3595 facet_push_stats(facet);
3596 if (facet->accounted_bytes < facet->byte_count) {
3597 facet_account(facet);
3598 facet->accounted_bytes = facet->byte_count;
3601 if (ofproto->netflow && !facet_is_controller_flow(facet)) {
3602 struct ofexpired expired;
3603 expired.flow = facet->flow;
3604 expired.packet_count = facet->packet_count;
3605 expired.byte_count = facet->byte_count;
3606 expired.used = facet->used;
3607 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
3610 facet->rule->packet_count += facet->packet_count;
3611 facet->rule->byte_count += facet->byte_count;
3613 /* Reset counters to prevent double counting if 'facet' ever gets
3615 facet_reset_counters(facet);
3617 netflow_flow_clear(&facet->nf_flow);
3618 facet->tcp_flags = 0;
3621 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3622 * Returns it if found, otherwise a null pointer.
3624 * 'hash' must be the return value of flow_hash(flow, 0).
3626 * The returned facet might need revalidation; use facet_lookup_valid()
3627 * instead if that is important. */
3628 static struct facet *
3629 facet_find(struct ofproto_dpif *ofproto,
3630 const struct flow *flow, uint32_t hash)
3632 struct facet *facet;
3634 HMAP_FOR_EACH_WITH_HASH (facet, hmap_node, hash, &ofproto->facets) {
3635 if (flow_equal(flow, &facet->flow)) {
3643 /* Searches 'ofproto''s table of facets for one exactly equal to 'flow'.
3644 * Returns it if found, otherwise a null pointer.
3646 * 'hash' must be the return value of flow_hash(flow, 0).
3648 * The returned facet is guaranteed to be valid. */
3649 static struct facet *
3650 facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow,
3653 struct facet *facet = facet_find(ofproto, flow, hash);
3655 /* The facet we found might not be valid, since we could be in need of
3656 * revalidation. If it is not valid, don't return it. */
3658 && (ofproto->need_revalidate
3659 || tag_set_intersects(&ofproto->revalidate_set, facet->tags))
3660 && !facet_revalidate(facet)) {
3661 COVERAGE_INC(facet_invalidated);
3669 facet_check_consistency(struct facet *facet)
3671 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3673 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3675 uint64_t odp_actions_stub[1024 / 8];
3676 struct ofpbuf odp_actions;
3678 struct rule_dpif *rule;
3679 struct subfacet *subfacet;
3680 bool may_log = false;
3683 /* Check the rule for consistency. */
3684 rule = rule_dpif_lookup(ofproto, &facet->flow, 0);
3686 if (!VLOG_DROP_WARN(&rl)) {
3687 char *s = flow_to_string(&facet->flow);
3688 VLOG_WARN("%s: facet should not exist", s);
3692 } else if (rule != facet->rule) {
3693 may_log = !VLOG_DROP_WARN(&rl);
3699 flow_format(&s, &facet->flow);
3700 ds_put_format(&s, ": facet associated with wrong rule (was "
3701 "table=%"PRIu8",", facet->rule->up.table_id);
3702 cls_rule_format(&facet->rule->up.cr, &s);
3703 ds_put_format(&s, ") (should have been table=%"PRIu8",",
3705 cls_rule_format(&rule->up.cr, &s);
3706 ds_put_char(&s, ')');
3708 VLOG_WARN("%s", ds_cstr(&s));
3715 /* Check the datapath actions for consistency. */
3716 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
3717 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3718 struct action_xlate_ctx ctx;
3719 bool actions_changed;
3720 bool should_install;
3722 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
3723 subfacet->initial_tci, rule, 0, NULL);
3724 xlate_actions(&ctx, rule->up.actions, rule->up.n_actions,
3727 should_install = (ctx.may_set_up_flow
3728 && subfacet->key_fitness != ODP_FIT_TOO_LITTLE);
3729 if (!should_install && !subfacet->installed) {
3730 /* The actions for uninstallable flows may vary from one packet to
3731 * the next, so don't compare the actions. */
3735 actions_changed = (subfacet->actions_len != odp_actions.size
3736 || memcmp(subfacet->actions, odp_actions.data,
3737 subfacet->actions_len));
3738 if (should_install != subfacet->installed || actions_changed) {
3740 may_log = !VLOG_DROP_WARN(&rl);
3745 struct odputil_keybuf keybuf;
3750 subfacet_get_key(subfacet, &keybuf, &key);
3751 odp_flow_key_format(key.data, key.size, &s);
3753 ds_put_cstr(&s, ": inconsistency in subfacet");
3754 if (should_install != subfacet->installed) {
3755 enum odp_key_fitness fitness = subfacet->key_fitness;
3757 ds_put_format(&s, " (should%s have been installed)",
3758 should_install ? "" : " not");
3759 ds_put_format(&s, " (may_set_up_flow=%s, fitness=%s)",
3760 ctx.may_set_up_flow ? "true" : "false",
3761 odp_key_fitness_to_string(fitness));
3763 if (actions_changed) {
3764 ds_put_cstr(&s, " (actions were: ");
3765 format_odp_actions(&s, subfacet->actions,
3766 subfacet->actions_len);
3767 ds_put_cstr(&s, ") (correct actions: ");
3768 format_odp_actions(&s, odp_actions.data, odp_actions.size);
3769 ds_put_char(&s, ')');
3771 ds_put_cstr(&s, " (actions: ");
3772 format_odp_actions(&s, subfacet->actions,
3773 subfacet->actions_len);
3774 ds_put_char(&s, ')');
3776 VLOG_WARN("%s", ds_cstr(&s));
3781 ofpbuf_uninit(&odp_actions);
3786 /* Re-searches the classifier for 'facet':
3788 * - If the rule found is different from 'facet''s current rule, moves
3789 * 'facet' to the new rule and recompiles its actions.
3791 * - If the rule found is the same as 'facet''s current rule, leaves 'facet'
3792 * where it is and recompiles its actions anyway.
3794 * - If there is none, destroys 'facet'.
3796 * Returns true if 'facet' still exists, false if it has been destroyed. */
3798 facet_revalidate(struct facet *facet)
3800 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3802 struct nlattr *odp_actions;
3805 struct actions *new_actions;
3807 struct action_xlate_ctx ctx;
3808 uint64_t odp_actions_stub[1024 / 8];
3809 struct ofpbuf odp_actions;
3811 struct rule_dpif *new_rule;
3812 struct subfacet *subfacet;
3813 bool actions_changed;
3816 COVERAGE_INC(facet_revalidate);
3818 /* Determine the new rule. */
3819 new_rule = rule_dpif_lookup(ofproto, &facet->flow, 0);
3821 /* No new rule, so delete the facet. */
3822 facet_remove(facet);
3826 /* Calculate new datapath actions.
3828 * We do not modify any 'facet' state yet, because we might need to, e.g.,
3829 * emit a NetFlow expiration and, if so, we need to have the old state
3830 * around to properly compose it. */
3832 /* If the datapath actions changed or the installability changed,
3833 * then we need to talk to the datapath. */
3836 memset(&ctx, 0, sizeof ctx);
3837 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
3838 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3839 bool should_install;
3841 action_xlate_ctx_init(&ctx, ofproto, &facet->flow,
3842 subfacet->initial_tci, new_rule, 0, NULL);
3843 xlate_actions(&ctx, new_rule->up.actions, new_rule->up.n_actions,
3845 actions_changed = (subfacet->actions_len != odp_actions.size
3846 || memcmp(subfacet->actions, odp_actions.data,
3847 subfacet->actions_len));
3849 should_install = (ctx.may_set_up_flow
3850 && subfacet->key_fitness != ODP_FIT_TOO_LITTLE);
3851 if (actions_changed || should_install != subfacet->installed) {
3852 if (should_install) {
3853 struct dpif_flow_stats stats;
3855 subfacet_install(subfacet,
3856 odp_actions.data, odp_actions.size, &stats);
3857 subfacet_update_stats(subfacet, &stats);
3859 subfacet_uninstall(subfacet);
3863 new_actions = xcalloc(list_size(&facet->subfacets),
3864 sizeof *new_actions);
3866 new_actions[i].odp_actions = xmemdup(odp_actions.data,
3868 new_actions[i].actions_len = odp_actions.size;
3873 ofpbuf_uninit(&odp_actions);
3876 facet_flush_stats(facet);
3879 /* Update 'facet' now that we've taken care of all the old state. */
3880 facet->tags = ctx.tags;
3881 facet->nf_flow.output_iface = ctx.nf_output_iface;
3882 facet->may_install = ctx.may_set_up_flow;
3883 facet->has_learn = ctx.has_learn;
3884 facet->has_normal = ctx.has_normal;
3885 facet->has_fin_timeout = ctx.has_fin_timeout;
3886 facet->mirrors = ctx.mirrors;
3889 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3890 if (new_actions[i].odp_actions) {
3891 free(subfacet->actions);
3892 subfacet->actions = new_actions[i].odp_actions;
3893 subfacet->actions_len = new_actions[i].actions_len;
3899 if (facet->rule != new_rule) {
3900 COVERAGE_INC(facet_changed_rule);
3901 list_remove(&facet->list_node);
3902 list_push_back(&new_rule->facets, &facet->list_node);
3903 facet->rule = new_rule;
3904 facet->used = new_rule->up.created;
3905 facet->prev_used = facet->used;
3911 /* Updates 'facet''s used time. Caller is responsible for calling
3912 * facet_push_stats() to update the flows which 'facet' resubmits into. */
3914 facet_update_time(struct facet *facet, long long int used)
3916 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
3917 if (used > facet->used) {
3919 ofproto_rule_update_used(&facet->rule->up, used);
3920 netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, used);
3925 facet_reset_counters(struct facet *facet)
3927 facet->packet_count = 0;
3928 facet->byte_count = 0;
3929 facet->prev_packet_count = 0;
3930 facet->prev_byte_count = 0;
3931 facet->accounted_bytes = 0;
3935 facet_push_stats(struct facet *facet)
3937 struct dpif_flow_stats stats;
3939 assert(facet->packet_count >= facet->prev_packet_count);
3940 assert(facet->byte_count >= facet->prev_byte_count);
3941 assert(facet->used >= facet->prev_used);
3943 stats.n_packets = facet->packet_count - facet->prev_packet_count;
3944 stats.n_bytes = facet->byte_count - facet->prev_byte_count;
3945 stats.used = facet->used;
3946 stats.tcp_flags = 0;
3948 if (stats.n_packets || stats.n_bytes || facet->used > facet->prev_used) {
3949 facet->prev_packet_count = facet->packet_count;
3950 facet->prev_byte_count = facet->byte_count;
3951 facet->prev_used = facet->used;
3953 flow_push_stats(facet->rule, &facet->flow, &stats);
3955 update_mirror_stats(ofproto_dpif_cast(facet->rule->up.ofproto),
3956 facet->mirrors, stats.n_packets, stats.n_bytes);
3961 rule_credit_stats(struct rule_dpif *rule, const struct dpif_flow_stats *stats)
3963 rule->packet_count += stats->n_packets;
3964 rule->byte_count += stats->n_bytes;
3965 ofproto_rule_update_used(&rule->up, stats->used);
3968 /* Pushes flow statistics to the rules which 'flow' resubmits into given
3969 * 'rule''s actions and mirrors. */
3971 flow_push_stats(struct rule_dpif *rule,
3972 const struct flow *flow, const struct dpif_flow_stats *stats)
3974 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
3975 struct action_xlate_ctx ctx;
3977 ofproto_rule_update_used(&rule->up, stats->used);
3979 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, rule,
3981 ctx.resubmit_stats = stats;
3982 xlate_actions_for_side_effects(&ctx, rule->up.actions, rule->up.n_actions);
3987 static struct subfacet *
3988 subfacet_find__(struct ofproto_dpif *ofproto,
3989 const struct nlattr *key, size_t key_len, uint32_t key_hash,
3990 const struct flow *flow)
3992 struct subfacet *subfacet;
3994 HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
3995 &ofproto->subfacets) {
3997 ? (subfacet->key_len == key_len
3998 && !memcmp(key, subfacet->key, key_len))
3999 : flow_equal(flow, &subfacet->facet->flow)) {
4007 /* Searches 'facet' (within 'ofproto') for a subfacet with the specified
4008 * 'key_fitness', 'key', and 'key_len'. Returns the existing subfacet if
4009 * there is one, otherwise creates and returns a new subfacet.
4011 * If the returned subfacet is new, then subfacet->actions will be NULL, in
4012 * which case the caller must populate the actions with
4013 * subfacet_make_actions(). */
4014 static struct subfacet *
4015 subfacet_create(struct facet *facet, enum odp_key_fitness key_fitness,
4016 const struct nlattr *key, size_t key_len, ovs_be16 initial_tci)
4018 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4019 uint32_t key_hash = odp_flow_key_hash(key, key_len);
4020 struct subfacet *subfacet;
4022 subfacet = subfacet_find__(ofproto, key, key_len, key_hash, &facet->flow);
4024 if (subfacet->facet == facet) {
4028 /* This shouldn't happen. */
4029 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
4030 subfacet_destroy(subfacet);
4033 subfacet = (list_is_empty(&facet->subfacets)
4034 ? &facet->one_subfacet
4035 : xmalloc(sizeof *subfacet));
4036 hmap_insert(&ofproto->subfacets, &subfacet->hmap_node, key_hash);
4037 list_push_back(&facet->subfacets, &subfacet->list_node);
4038 subfacet->facet = facet;
4039 subfacet->key_fitness = key_fitness;
4040 if (key_fitness != ODP_FIT_PERFECT) {
4041 subfacet->key = xmemdup(key, key_len);
4042 subfacet->key_len = key_len;
4044 subfacet->key = NULL;
4045 subfacet->key_len = 0;
4047 subfacet->used = time_msec();
4048 subfacet->dp_packet_count = 0;
4049 subfacet->dp_byte_count = 0;
4050 subfacet->actions_len = 0;
4051 subfacet->actions = NULL;
4052 subfacet->installed = false;
4053 subfacet->initial_tci = initial_tci;
4058 /* Searches 'ofproto' for a subfacet with the given 'key', 'key_len', and
4059 * 'flow'. Returns the subfacet if one exists, otherwise NULL. */
4060 static struct subfacet *
4061 subfacet_find(struct ofproto_dpif *ofproto,
4062 const struct nlattr *key, size_t key_len)
4064 uint32_t key_hash = odp_flow_key_hash(key, key_len);
4065 enum odp_key_fitness fitness;
4068 fitness = odp_flow_key_to_flow(key, key_len, &flow);
4069 if (fitness == ODP_FIT_ERROR) {
4073 return subfacet_find__(ofproto, key, key_len, key_hash, &flow);
4076 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
4077 * its facet within 'ofproto', and frees it. */
4079 subfacet_destroy__(struct subfacet *subfacet)
4081 struct facet *facet = subfacet->facet;
4082 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4084 subfacet_uninstall(subfacet);
4085 hmap_remove(&ofproto->subfacets, &subfacet->hmap_node);
4086 list_remove(&subfacet->list_node);
4087 free(subfacet->key);
4088 free(subfacet->actions);
4089 if (subfacet != &facet->one_subfacet) {
4094 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
4095 * last remaining subfacet in its facet destroys the facet too. */
4097 subfacet_destroy(struct subfacet *subfacet)
4099 struct facet *facet = subfacet->facet;
4101 if (list_is_singleton(&facet->subfacets)) {
4102 /* facet_remove() needs at least one subfacet (it will remove it). */
4103 facet_remove(facet);
4105 subfacet_destroy__(subfacet);
4109 /* Initializes 'key' with the sequence of OVS_KEY_ATTR_* Netlink attributes
4110 * that can be used to refer to 'subfacet'. The caller must provide 'keybuf'
4111 * for use as temporary storage. */
4113 subfacet_get_key(struct subfacet *subfacet, struct odputil_keybuf *keybuf,
4116 if (!subfacet->key) {
4117 ofpbuf_use_stack(key, keybuf, sizeof *keybuf);
4118 odp_flow_key_from_flow(key, &subfacet->facet->flow);
4120 ofpbuf_use_const(key, subfacet->key, subfacet->key_len);
4124 /* Composes the datapath actions for 'subfacet' based on its rule's actions.
4125 * Translates the actions into 'odp_actions', which the caller must have
4126 * initialized and is responsible for uninitializing. */
4128 subfacet_make_actions(struct subfacet *subfacet, const struct ofpbuf *packet,
4129 struct ofpbuf *odp_actions)
4131 struct facet *facet = subfacet->facet;
4132 struct rule_dpif *rule = facet->rule;
4133 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4135 struct action_xlate_ctx ctx;
4137 action_xlate_ctx_init(&ctx, ofproto, &facet->flow, subfacet->initial_tci,
4139 xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, odp_actions);
4140 facet->tags = ctx.tags;
4141 facet->may_install = ctx.may_set_up_flow;
4142 facet->has_learn = ctx.has_learn;
4143 facet->has_normal = ctx.has_normal;
4144 facet->has_fin_timeout = ctx.has_fin_timeout;
4145 facet->nf_flow.output_iface = ctx.nf_output_iface;
4146 facet->mirrors = ctx.mirrors;
4148 if (subfacet->actions_len != odp_actions->size
4149 || memcmp(subfacet->actions, odp_actions->data, odp_actions->size)) {
4150 free(subfacet->actions);
4151 subfacet->actions_len = odp_actions->size;
4152 subfacet->actions = xmemdup(odp_actions->data, odp_actions->size);
4156 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
4157 * bytes of actions in 'actions'. If 'stats' is non-null, statistics counters
4158 * in the datapath will be zeroed and 'stats' will be updated with traffic new
4159 * since 'subfacet' was last updated.
4161 * Returns 0 if successful, otherwise a positive errno value. */
4163 subfacet_install(struct subfacet *subfacet,
4164 const struct nlattr *actions, size_t actions_len,
4165 struct dpif_flow_stats *stats)
4167 struct facet *facet = subfacet->facet;
4168 struct ofproto_dpif *ofproto = ofproto_dpif_cast(facet->rule->up.ofproto);
4169 struct odputil_keybuf keybuf;
4170 enum dpif_flow_put_flags flags;
4174 flags = DPIF_FP_CREATE | DPIF_FP_MODIFY;
4176 flags |= DPIF_FP_ZERO_STATS;
4179 subfacet_get_key(subfacet, &keybuf, &key);
4180 ret = dpif_flow_put(ofproto->dpif, flags, key.data, key.size,
4181 actions, actions_len, stats);
4184 subfacet_reset_dp_stats(subfacet, stats);
4190 /* If 'subfacet' is installed in the datapath, uninstalls it. */
4192 subfacet_uninstall(struct subfacet *subfacet)
4194 if (subfacet->installed) {
4195 struct rule_dpif *rule = subfacet->facet->rule;
4196 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4197 struct odputil_keybuf keybuf;
4198 struct dpif_flow_stats stats;
4202 subfacet_get_key(subfacet, &keybuf, &key);
4203 error = dpif_flow_del(ofproto->dpif, key.data, key.size, &stats);
4204 subfacet_reset_dp_stats(subfacet, &stats);
4206 subfacet_update_stats(subfacet, &stats);
4208 subfacet->installed = false;
4210 assert(subfacet->dp_packet_count == 0);
4211 assert(subfacet->dp_byte_count == 0);
4215 /* Resets 'subfacet''s datapath statistics counters. This should be called
4216 * when 'subfacet''s statistics are cleared in the datapath. If 'stats' is
4217 * non-null, it should contain the statistics returned by dpif when 'subfacet'
4218 * was reset in the datapath. 'stats' will be modified to include only
4219 * statistics new since 'subfacet' was last updated. */
4221 subfacet_reset_dp_stats(struct subfacet *subfacet,
4222 struct dpif_flow_stats *stats)
4225 && subfacet->dp_packet_count <= stats->n_packets
4226 && subfacet->dp_byte_count <= stats->n_bytes) {
4227 stats->n_packets -= subfacet->dp_packet_count;
4228 stats->n_bytes -= subfacet->dp_byte_count;
4231 subfacet->dp_packet_count = 0;
4232 subfacet->dp_byte_count = 0;
4235 /* Updates 'subfacet''s used time. The caller is responsible for calling
4236 * facet_push_stats() to update the flows which 'subfacet' resubmits into. */
4238 subfacet_update_time(struct subfacet *subfacet, long long int used)
4240 if (used > subfacet->used) {
4241 subfacet->used = used;
4242 facet_update_time(subfacet->facet, used);
4246 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
4248 * Because of the meaning of a subfacet's counters, it only makes sense to do
4249 * this if 'stats' are not tracked in the datapath, that is, if 'stats'
4250 * represents a packet that was sent by hand or if it represents statistics
4251 * that have been cleared out of the datapath. */
4253 subfacet_update_stats(struct subfacet *subfacet,
4254 const struct dpif_flow_stats *stats)
4256 if (stats->n_packets || stats->used > subfacet->used) {
4257 struct facet *facet = subfacet->facet;
4259 subfacet_update_time(subfacet, stats->used);
4260 facet->packet_count += stats->n_packets;
4261 facet->byte_count += stats->n_bytes;
4262 facet->tcp_flags |= stats->tcp_flags;
4263 facet_push_stats(facet);
4264 netflow_flow_update_flags(&facet->nf_flow, stats->tcp_flags);
4270 static struct rule_dpif *
4271 rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
4274 struct cls_rule *cls_rule;
4275 struct classifier *cls;
4277 if (table_id >= N_TABLES) {
4281 cls = &ofproto->up.tables[table_id].cls;
4282 if (flow->nw_frag & FLOW_NW_FRAG_ANY
4283 && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
4284 /* For OFPC_NORMAL frag_handling, we must pretend that transport ports
4285 * are unavailable. */
4286 struct flow ofpc_normal_flow = *flow;
4287 ofpc_normal_flow.tp_src = htons(0);
4288 ofpc_normal_flow.tp_dst = htons(0);
4289 cls_rule = classifier_lookup(cls, &ofpc_normal_flow);
4291 cls_rule = classifier_lookup(cls, flow);
4293 return rule_dpif_cast(rule_from_cls_rule(cls_rule));
4297 complete_operation(struct rule_dpif *rule)
4299 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4301 rule_invalidate(rule);
4303 struct dpif_completion *c = xmalloc(sizeof *c);
4304 c->op = rule->up.pending;
4305 list_push_back(&ofproto->completions, &c->list_node);
4307 ofoperation_complete(rule->up.pending, 0);
4311 static struct rule *
4314 struct rule_dpif *rule = xmalloc(sizeof *rule);
4319 rule_dealloc(struct rule *rule_)
4321 struct rule_dpif *rule = rule_dpif_cast(rule_);
4326 rule_construct(struct rule *rule_)
4328 struct rule_dpif *rule = rule_dpif_cast(rule_);
4329 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4330 struct rule_dpif *victim;
4334 error = validate_actions(rule->up.actions, rule->up.n_actions,
4335 &rule->up.cr.flow, ofproto->max_ports);
4340 rule->packet_count = 0;
4341 rule->byte_count = 0;
4343 victim = rule_dpif_cast(ofoperation_get_victim(rule->up.pending));
4344 if (victim && !list_is_empty(&victim->facets)) {
4345 struct facet *facet;
4347 rule->facets = victim->facets;
4348 list_moved(&rule->facets);
4349 LIST_FOR_EACH (facet, list_node, &rule->facets) {
4350 /* XXX: We're only clearing our local counters here. It's possible
4351 * that quite a few packets are unaccounted for in the datapath
4352 * statistics. These will be accounted to the new rule instead of
4353 * cleared as required. This could be fixed by clearing out the
4354 * datapath statistics for this facet, but currently it doesn't
4356 facet_reset_counters(facet);
4360 /* Must avoid list_moved() in this case. */
4361 list_init(&rule->facets);
4364 table_id = rule->up.table_id;
4365 rule->tag = (victim ? victim->tag
4367 : rule_calculate_tag(&rule->up.cr.flow, &rule->up.cr.wc,
4368 ofproto->tables[table_id].basis));
4370 complete_operation(rule);
4375 rule_destruct(struct rule *rule_)
4377 struct rule_dpif *rule = rule_dpif_cast(rule_);
4378 struct facet *facet, *next_facet;
4380 LIST_FOR_EACH_SAFE (facet, next_facet, list_node, &rule->facets) {
4381 facet_revalidate(facet);
4384 complete_operation(rule);
4388 rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
4390 struct rule_dpif *rule = rule_dpif_cast(rule_);
4391 struct facet *facet;
4393 /* Start from historical data for 'rule' itself that are no longer tracked
4394 * in facets. This counts, for example, facets that have expired. */
4395 *packets = rule->packet_count;
4396 *bytes = rule->byte_count;
4398 /* Add any statistics that are tracked by facets. This includes
4399 * statistical data recently updated by ofproto_update_stats() as well as
4400 * stats for packets that were executed "by hand" via dpif_execute(). */
4401 LIST_FOR_EACH (facet, list_node, &rule->facets) {
4402 *packets += facet->packet_count;
4403 *bytes += facet->byte_count;
4408 rule_execute(struct rule *rule_, const struct flow *flow,
4409 struct ofpbuf *packet)
4411 struct rule_dpif *rule = rule_dpif_cast(rule_);
4412 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4414 struct dpif_flow_stats stats;
4416 struct action_xlate_ctx ctx;
4417 uint64_t odp_actions_stub[1024 / 8];
4418 struct ofpbuf odp_actions;
4420 dpif_flow_stats_extract(flow, packet, &stats);
4421 rule_credit_stats(rule, &stats);
4423 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
4424 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci,
4425 rule, stats.tcp_flags, packet);
4426 ctx.resubmit_stats = &stats;
4427 xlate_actions(&ctx, rule->up.actions, rule->up.n_actions, &odp_actions);
4429 execute_odp_actions(ofproto, flow, odp_actions.data,
4430 odp_actions.size, packet);
4432 ofpbuf_uninit(&odp_actions);
4438 rule_modify_actions(struct rule *rule_)
4440 struct rule_dpif *rule = rule_dpif_cast(rule_);
4441 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4444 error = validate_actions(rule->up.actions, rule->up.n_actions,
4445 &rule->up.cr.flow, ofproto->max_ports);
4447 ofoperation_complete(rule->up.pending, error);
4451 complete_operation(rule);
4454 /* Sends 'packet' out 'ofport'.
4455 * May modify 'packet'.
4456 * Returns 0 if successful, otherwise a positive errno value. */
4458 send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
4460 const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
4461 struct ofpbuf key, odp_actions;
4462 struct odputil_keybuf keybuf;
4467 flow_extract((struct ofpbuf *) packet, 0, 0, 0, &flow);
4468 odp_port = vsp_realdev_to_vlandev(ofproto, ofport->odp_port,
4470 if (odp_port != ofport->odp_port) {
4471 eth_pop_vlan(packet);
4472 flow.vlan_tci = htons(0);
4475 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
4476 odp_flow_key_from_flow(&key, &flow);
4478 ofpbuf_init(&odp_actions, 32);
4479 compose_sflow_action(ofproto, &odp_actions, &flow, odp_port);
4481 nl_msg_put_u32(&odp_actions, OVS_ACTION_ATTR_OUTPUT, odp_port);
4482 error = dpif_execute(ofproto->dpif,
4484 odp_actions.data, odp_actions.size,
4486 ofpbuf_uninit(&odp_actions);
4489 VLOG_WARN_RL(&rl, "%s: failed to send packet on port %"PRIu32" (%s)",
4490 ofproto->up.name, odp_port, strerror(error));
4492 ofproto_update_local_port_stats(ofport->up.ofproto, packet->size, 0);
4496 /* OpenFlow to datapath action translation. */
4498 static void do_xlate_actions(const union ofp_action *in, size_t n_in,
4499 struct action_xlate_ctx *ctx);
4500 static void xlate_normal(struct action_xlate_ctx *);
4503 put_userspace_action(const struct ofproto_dpif *ofproto,
4504 struct ofpbuf *odp_actions,
4505 const struct flow *flow,
4506 const struct user_action_cookie *cookie)
4510 pid = dpif_port_get_pid(ofproto->dpif,
4511 ofp_port_to_odp_port(flow->in_port));
4513 return odp_put_userspace_action(pid, cookie, odp_actions);
4516 /* Compose SAMPLE action for sFlow. */
4518 compose_sflow_action(const struct ofproto_dpif *ofproto,
4519 struct ofpbuf *odp_actions,
4520 const struct flow *flow,
4523 uint32_t port_ifindex;
4524 uint32_t probability;
4525 struct user_action_cookie cookie;
4526 size_t sample_offset, actions_offset;
4527 int cookie_offset, n_output;
4529 if (!ofproto->sflow || flow->in_port == OFPP_NONE) {
4533 if (odp_port == OVSP_NONE) {
4537 port_ifindex = dpif_sflow_odp_port_to_ifindex(ofproto->sflow, odp_port);
4541 sample_offset = nl_msg_start_nested(odp_actions, OVS_ACTION_ATTR_SAMPLE);
4543 /* Number of packets out of UINT_MAX to sample. */
4544 probability = dpif_sflow_get_probability(ofproto->sflow);
4545 nl_msg_put_u32(odp_actions, OVS_SAMPLE_ATTR_PROBABILITY, probability);
4547 actions_offset = nl_msg_start_nested(odp_actions, OVS_SAMPLE_ATTR_ACTIONS);
4549 cookie.type = USER_ACTION_COOKIE_SFLOW;
4550 cookie.data = port_ifindex;
4551 cookie.n_output = n_output;
4552 cookie.vlan_tci = 0;
4553 cookie_offset = put_userspace_action(ofproto, odp_actions, flow, &cookie);
4555 nl_msg_end_nested(odp_actions, actions_offset);
4556 nl_msg_end_nested(odp_actions, sample_offset);
4557 return cookie_offset;
4560 /* SAMPLE action must be first action in any given list of actions.
4561 * At this point we do not have all information required to build it. So try to
4562 * build sample action as complete as possible. */
4564 add_sflow_action(struct action_xlate_ctx *ctx)
4566 ctx->user_cookie_offset = compose_sflow_action(ctx->ofproto,
4568 &ctx->flow, OVSP_NONE);
4569 ctx->sflow_odp_port = 0;
4570 ctx->sflow_n_outputs = 0;
4573 /* Fix SAMPLE action according to data collected while composing ODP actions.
4574 * We need to fix SAMPLE actions OVS_SAMPLE_ATTR_ACTIONS attribute, i.e. nested
4575 * USERSPACE action's user-cookie which is required for sflow. */
4577 fix_sflow_action(struct action_xlate_ctx *ctx)
4579 const struct flow *base = &ctx->base_flow;
4580 struct user_action_cookie *cookie;
4582 if (!ctx->user_cookie_offset) {
4586 cookie = ofpbuf_at(ctx->odp_actions, ctx->user_cookie_offset,
4588 assert(cookie != NULL);
4589 assert(cookie->type == USER_ACTION_COOKIE_SFLOW);
4591 if (ctx->sflow_n_outputs) {
4592 cookie->data = dpif_sflow_odp_port_to_ifindex(ctx->ofproto->sflow,
4593 ctx->sflow_odp_port);
4595 if (ctx->sflow_n_outputs >= 255) {
4596 cookie->n_output = 255;
4598 cookie->n_output = ctx->sflow_n_outputs;
4600 cookie->vlan_tci = base->vlan_tci;
4604 compose_output_action__(struct action_xlate_ctx *ctx, uint16_t ofp_port,
4607 const struct ofport_dpif *ofport = get_ofp_port(ctx->ofproto, ofp_port);
4608 uint16_t odp_port = ofp_port_to_odp_port(ofp_port);
4609 ovs_be16 flow_vlan_tci = ctx->flow.vlan_tci;
4610 uint8_t flow_nw_tos = ctx->flow.nw_tos;
4614 struct priority_to_dscp *pdscp;
4616 if (ofport->up.pp.config & OFPUTIL_PC_NO_FWD
4617 || (check_stp && !stp_forward_in_state(ofport->stp_state))) {
4621 pdscp = get_priority(ofport, ctx->flow.skb_priority);
4623 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
4624 ctx->flow.nw_tos |= pdscp->dscp;
4627 /* We may not have an ofport record for this port, but it doesn't hurt
4628 * to allow forwarding to it anyhow. Maybe such a port will appear
4629 * later and we're pre-populating the flow table. */
4632 out_port = vsp_realdev_to_vlandev(ctx->ofproto, odp_port,
4633 ctx->flow.vlan_tci);
4634 if (out_port != odp_port) {
4635 ctx->flow.vlan_tci = htons(0);
4637 commit_odp_actions(&ctx->flow, &ctx->base_flow, ctx->odp_actions);
4638 nl_msg_put_u32(ctx->odp_actions, OVS_ACTION_ATTR_OUTPUT, out_port);
4640 ctx->sflow_odp_port = odp_port;
4641 ctx->sflow_n_outputs++;
4642 ctx->nf_output_iface = ofp_port;
4643 ctx->flow.vlan_tci = flow_vlan_tci;
4644 ctx->flow.nw_tos = flow_nw_tos;
4648 compose_output_action(struct action_xlate_ctx *ctx, uint16_t ofp_port)
4650 compose_output_action__(ctx, ofp_port, true);
4654 xlate_table_action(struct action_xlate_ctx *ctx,
4655 uint16_t in_port, uint8_t table_id)
4657 if (ctx->recurse < MAX_RESUBMIT_RECURSION) {
4658 struct ofproto_dpif *ofproto = ctx->ofproto;
4659 struct rule_dpif *rule;
4660 uint16_t old_in_port;
4661 uint8_t old_table_id;
4663 old_table_id = ctx->table_id;
4664 ctx->table_id = table_id;
4666 /* Look up a flow with 'in_port' as the input port. */
4667 old_in_port = ctx->flow.in_port;
4668 ctx->flow.in_port = in_port;
4669 rule = rule_dpif_lookup(ofproto, &ctx->flow, table_id);
4672 if (table_id > 0 && table_id < N_TABLES) {
4673 struct table_dpif *table = &ofproto->tables[table_id];
4674 if (table->other_table) {
4675 ctx->tags |= (rule && rule->tag
4677 : rule_calculate_tag(&ctx->flow,
4678 &table->other_table->wc,
4683 /* Restore the original input port. Otherwise OFPP_NORMAL and
4684 * OFPP_IN_PORT will have surprising behavior. */
4685 ctx->flow.in_port = old_in_port;
4687 if (ctx->resubmit_hook) {
4688 ctx->resubmit_hook(ctx, rule);
4692 struct rule_dpif *old_rule = ctx->rule;
4694 if (ctx->resubmit_stats) {
4695 rule_credit_stats(rule, ctx->resubmit_stats);
4700 do_xlate_actions(rule->up.actions, rule->up.n_actions, ctx);
4701 ctx->rule = old_rule;
4705 ctx->table_id = old_table_id;
4707 static struct vlog_rate_limit recurse_rl = VLOG_RATE_LIMIT_INIT(1, 1);
4709 VLOG_ERR_RL(&recurse_rl, "resubmit actions recursed over %d times",
4710 MAX_RESUBMIT_RECURSION);
4711 ctx->max_resubmit_trigger = true;
4716 xlate_resubmit_table(struct action_xlate_ctx *ctx,
4717 const struct nx_action_resubmit *nar)
4722 in_port = (nar->in_port == htons(OFPP_IN_PORT)
4724 : ntohs(nar->in_port));
4725 table_id = nar->table == 255 ? ctx->table_id : nar->table;
4727 xlate_table_action(ctx, in_port, table_id);
4731 flood_packets(struct action_xlate_ctx *ctx, bool all)
4733 struct ofport_dpif *ofport;
4735 HMAP_FOR_EACH (ofport, up.hmap_node, &ctx->ofproto->up.ports) {
4736 uint16_t ofp_port = ofport->up.ofp_port;
4738 if (ofp_port == ctx->flow.in_port) {
4743 compose_output_action__(ctx, ofp_port, false);
4744 } else if (!(ofport->up.pp.config & OFPUTIL_PC_NO_FLOOD)) {
4745 compose_output_action(ctx, ofp_port);
4749 ctx->nf_output_iface = NF_OUT_FLOOD;
4753 execute_controller_action(struct action_xlate_ctx *ctx, int len,
4754 enum ofp_packet_in_reason reason,
4755 uint16_t controller_id)
4757 struct ofputil_packet_in pin;
4758 struct ofpbuf *packet;
4760 ctx->may_set_up_flow = false;
4765 packet = ofpbuf_clone(ctx->packet);
4767 if (packet->l2 && packet->l3) {
4768 struct eth_header *eh;
4770 eth_pop_vlan(packet);
4773 /* If the Ethernet type is less than ETH_TYPE_MIN, it's likely an 802.2
4774 * LLC frame. Calculating the Ethernet type of these frames is more
4775 * trouble than seems appropriate for a simple assertion. */
4776 assert(ntohs(eh->eth_type) < ETH_TYPE_MIN
4777 || eh->eth_type == ctx->flow.dl_type);
4779 memcpy(eh->eth_src, ctx->flow.dl_src, sizeof eh->eth_src);
4780 memcpy(eh->eth_dst, ctx->flow.dl_dst, sizeof eh->eth_dst);
4782 if (ctx->flow.vlan_tci & htons(VLAN_CFI)) {
4783 eth_push_vlan(packet, ctx->flow.vlan_tci);
4787 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
4788 packet_set_ipv4(packet, ctx->flow.nw_src, ctx->flow.nw_dst,
4789 ctx->flow.nw_tos, ctx->flow.nw_ttl);
4793 if (ctx->flow.nw_proto == IPPROTO_TCP) {
4794 packet_set_tcp_port(packet, ctx->flow.tp_src,
4796 } else if (ctx->flow.nw_proto == IPPROTO_UDP) {
4797 packet_set_udp_port(packet, ctx->flow.tp_src,
4804 pin.packet = packet->data;
4805 pin.packet_len = packet->size;
4806 pin.reason = reason;
4807 pin.controller_id = controller_id;
4808 pin.table_id = ctx->table_id;
4809 pin.cookie = ctx->rule ? ctx->rule->up.flow_cookie : 0;
4812 flow_get_metadata(&ctx->flow, &pin.fmd);
4814 connmgr_send_packet_in(ctx->ofproto->up.connmgr, &pin);
4815 ofpbuf_delete(packet);
4819 compose_dec_ttl(struct action_xlate_ctx *ctx)
4821 if (ctx->flow.dl_type != htons(ETH_TYPE_IP) &&
4822 ctx->flow.dl_type != htons(ETH_TYPE_IPV6)) {
4826 if (ctx->flow.nw_ttl > 1) {
4830 execute_controller_action(ctx, UINT16_MAX, OFPR_INVALID_TTL, 0);
4832 /* Stop processing for current table. */
4838 xlate_output_action__(struct action_xlate_ctx *ctx,
4839 uint16_t port, uint16_t max_len)
4841 uint16_t prev_nf_output_iface = ctx->nf_output_iface;
4843 ctx->nf_output_iface = NF_OUT_DROP;
4847 compose_output_action(ctx, ctx->flow.in_port);
4850 xlate_table_action(ctx, ctx->flow.in_port, ctx->table_id);
4856 flood_packets(ctx, false);
4859 flood_packets(ctx, true);
4861 case OFPP_CONTROLLER:
4862 execute_controller_action(ctx, max_len, OFPR_ACTION, 0);
4868 if (port != ctx->flow.in_port) {
4869 compose_output_action(ctx, port);
4874 if (prev_nf_output_iface == NF_OUT_FLOOD) {
4875 ctx->nf_output_iface = NF_OUT_FLOOD;
4876 } else if (ctx->nf_output_iface == NF_OUT_DROP) {
4877 ctx->nf_output_iface = prev_nf_output_iface;
4878 } else if (prev_nf_output_iface != NF_OUT_DROP &&
4879 ctx->nf_output_iface != NF_OUT_FLOOD) {
4880 ctx->nf_output_iface = NF_OUT_MULTI;
4885 xlate_output_reg_action(struct action_xlate_ctx *ctx,
4886 const struct nx_action_output_reg *naor)
4888 struct mf_subfield src;
4891 nxm_decode(&src, naor->src, naor->ofs_nbits);
4892 ofp_port = mf_get_subfield(&src, &ctx->flow);
4894 if (ofp_port <= UINT16_MAX) {
4895 xlate_output_action__(ctx, ofp_port, ntohs(naor->max_len));
4900 xlate_output_action(struct action_xlate_ctx *ctx,
4901 const struct ofp_action_output *oao)
4903 xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
4907 xlate_enqueue_action(struct action_xlate_ctx *ctx,
4908 const struct ofp_action_enqueue *oae)
4911 uint32_t flow_priority, priority;
4914 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(oae->queue_id),
4917 /* Fall back to ordinary output action. */
4918 xlate_output_action__(ctx, ntohs(oae->port), 0);
4922 /* Figure out datapath output port. */
4923 ofp_port = ntohs(oae->port);
4924 if (ofp_port == OFPP_IN_PORT) {
4925 ofp_port = ctx->flow.in_port;
4926 } else if (ofp_port == ctx->flow.in_port) {
4930 /* Add datapath actions. */
4931 flow_priority = ctx->flow.skb_priority;
4932 ctx->flow.skb_priority = priority;
4933 compose_output_action(ctx, ofp_port);
4934 ctx->flow.skb_priority = flow_priority;
4936 /* Update NetFlow output port. */
4937 if (ctx->nf_output_iface == NF_OUT_DROP) {
4938 ctx->nf_output_iface = ofp_port;
4939 } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
4940 ctx->nf_output_iface = NF_OUT_MULTI;
4945 xlate_set_queue_action(struct action_xlate_ctx *ctx,
4946 const struct nx_action_set_queue *nasq)
4951 error = dpif_queue_to_priority(ctx->ofproto->dpif, ntohl(nasq->queue_id),
4954 /* Couldn't translate queue to a priority, so ignore. A warning
4955 * has already been logged. */
4959 ctx->flow.skb_priority = priority;
4962 struct xlate_reg_state {
4968 xlate_autopath(struct action_xlate_ctx *ctx,
4969 const struct nx_action_autopath *naa)
4971 uint16_t ofp_port = ntohl(naa->id);
4972 struct ofport_dpif *port = get_ofp_port(ctx->ofproto, ofp_port);
4974 if (!port || !port->bundle) {
4975 ofp_port = OFPP_NONE;
4976 } else if (port->bundle->bond) {
4977 /* Autopath does not support VLAN hashing. */
4978 struct ofport_dpif *slave = bond_choose_output_slave(
4979 port->bundle->bond, &ctx->flow, 0, &ctx->tags);
4981 ofp_port = slave->up.ofp_port;
4984 autopath_execute(naa, &ctx->flow, ofp_port);
4988 slave_enabled_cb(uint16_t ofp_port, void *ofproto_)
4990 struct ofproto_dpif *ofproto = ofproto_;
4991 struct ofport_dpif *port;
5001 case OFPP_CONTROLLER: /* Not supported by the bundle action. */
5004 port = get_ofp_port(ofproto, ofp_port);
5005 return port ? port->may_enable : false;
5010 xlate_learn_action(struct action_xlate_ctx *ctx,
5011 const struct nx_action_learn *learn)
5013 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 1);
5014 struct ofputil_flow_mod fm;
5017 learn_execute(learn, &ctx->flow, &fm);
5019 error = ofproto_flow_mod(&ctx->ofproto->up, &fm);
5020 if (error && !VLOG_DROP_WARN(&rl)) {
5021 VLOG_WARN("learning action failed to modify flow table (%s)",
5022 ofperr_get_name(error));
5028 /* Reduces '*timeout' to no more than 'max'. A value of zero in either case
5029 * means "infinite". */
5031 reduce_timeout(uint16_t max, uint16_t *timeout)
5033 if (max && (!*timeout || *timeout > max)) {
5039 xlate_fin_timeout(struct action_xlate_ctx *ctx,
5040 const struct nx_action_fin_timeout *naft)
5042 if (ctx->tcp_flags & (TCP_FIN | TCP_RST) && ctx->rule) {
5043 struct rule_dpif *rule = ctx->rule;
5045 reduce_timeout(ntohs(naft->fin_idle_timeout), &rule->up.idle_timeout);
5046 reduce_timeout(ntohs(naft->fin_hard_timeout), &rule->up.hard_timeout);
5051 may_receive(const struct ofport_dpif *port, struct action_xlate_ctx *ctx)
5053 if (port->up.pp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp)
5054 ? OFPUTIL_PC_NO_RECV_STP
5055 : OFPUTIL_PC_NO_RECV)) {
5059 /* Only drop packets here if both forwarding and learning are
5060 * disabled. If just learning is enabled, we need to have
5061 * OFPP_NORMAL and the learning action have a look at the packet
5062 * before we can drop it. */
5063 if (!stp_forward_in_state(port->stp_state)
5064 && !stp_learn_in_state(port->stp_state)) {
5072 do_xlate_actions(const union ofp_action *in, size_t n_in,
5073 struct action_xlate_ctx *ctx)
5075 const struct ofport_dpif *port;
5076 const union ofp_action *ia;
5077 bool was_evictable = true;
5080 port = get_ofp_port(ctx->ofproto, ctx->flow.in_port);
5081 if (port && !may_receive(port, ctx)) {
5082 /* Drop this flow. */
5087 /* Don't let the rule we're working on get evicted underneath us. */
5088 was_evictable = ctx->rule->up.evictable;
5089 ctx->rule->up.evictable = false;
5091 OFPUTIL_ACTION_FOR_EACH_UNSAFE (ia, left, in, n_in) {
5092 const struct ofp_action_dl_addr *oada;
5093 const struct nx_action_resubmit *nar;
5094 const struct nx_action_set_tunnel *nast;
5095 const struct nx_action_set_queue *nasq;
5096 const struct nx_action_multipath *nam;
5097 const struct nx_action_autopath *naa;
5098 const struct nx_action_bundle *nab;
5099 const struct nx_action_output_reg *naor;
5100 const struct nx_action_controller *nac;
5101 enum ofputil_action_code code;
5108 code = ofputil_decode_action_unsafe(ia);
5110 case OFPUTIL_OFPAT10_OUTPUT:
5111 xlate_output_action(ctx, &ia->output);
5114 case OFPUTIL_OFPAT10_SET_VLAN_VID:
5115 ctx->flow.vlan_tci &= ~htons(VLAN_VID_MASK);
5116 ctx->flow.vlan_tci |= ia->vlan_vid.vlan_vid | htons(VLAN_CFI);
5119 case OFPUTIL_OFPAT10_SET_VLAN_PCP:
5120 ctx->flow.vlan_tci &= ~htons(VLAN_PCP_MASK);
5121 ctx->flow.vlan_tci |= htons(
5122 (ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) | VLAN_CFI);
5125 case OFPUTIL_OFPAT10_STRIP_VLAN:
5126 ctx->flow.vlan_tci = htons(0);
5129 case OFPUTIL_OFPAT10_SET_DL_SRC:
5130 oada = ((struct ofp_action_dl_addr *) ia);
5131 memcpy(ctx->flow.dl_src, oada->dl_addr, ETH_ADDR_LEN);
5134 case OFPUTIL_OFPAT10_SET_DL_DST:
5135 oada = ((struct ofp_action_dl_addr *) ia);
5136 memcpy(ctx->flow.dl_dst, oada->dl_addr, ETH_ADDR_LEN);
5139 case OFPUTIL_OFPAT10_SET_NW_SRC:
5140 ctx->flow.nw_src = ia->nw_addr.nw_addr;
5143 case OFPUTIL_OFPAT10_SET_NW_DST:
5144 ctx->flow.nw_dst = ia->nw_addr.nw_addr;
5147 case OFPUTIL_OFPAT10_SET_NW_TOS:
5148 /* OpenFlow 1.0 only supports IPv4. */
5149 if (ctx->flow.dl_type == htons(ETH_TYPE_IP)) {
5150 ctx->flow.nw_tos &= ~IP_DSCP_MASK;
5151 ctx->flow.nw_tos |= ia->nw_tos.nw_tos & IP_DSCP_MASK;
5155 case OFPUTIL_OFPAT10_SET_TP_SRC:
5156 ctx->flow.tp_src = ia->tp_port.tp_port;
5159 case OFPUTIL_OFPAT10_SET_TP_DST:
5160 ctx->flow.tp_dst = ia->tp_port.tp_port;
5163 case OFPUTIL_OFPAT10_ENQUEUE:
5164 xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
5167 case OFPUTIL_NXAST_RESUBMIT:
5168 nar = (const struct nx_action_resubmit *) ia;
5169 xlate_table_action(ctx, ntohs(nar->in_port), ctx->table_id);
5172 case OFPUTIL_NXAST_RESUBMIT_TABLE:
5173 xlate_resubmit_table(ctx, (const struct nx_action_resubmit *) ia);
5176 case OFPUTIL_NXAST_SET_TUNNEL:
5177 nast = (const struct nx_action_set_tunnel *) ia;
5178 tun_id = htonll(ntohl(nast->tun_id));
5179 ctx->flow.tun_id = tun_id;
5182 case OFPUTIL_NXAST_SET_QUEUE:
5183 nasq = (const struct nx_action_set_queue *) ia;
5184 xlate_set_queue_action(ctx, nasq);
5187 case OFPUTIL_NXAST_POP_QUEUE:
5188 ctx->flow.skb_priority = ctx->orig_skb_priority;
5191 case OFPUTIL_NXAST_REG_MOVE:
5192 nxm_execute_reg_move((const struct nx_action_reg_move *) ia,
5196 case OFPUTIL_NXAST_REG_LOAD:
5197 nxm_execute_reg_load((const struct nx_action_reg_load *) ia,
5201 case OFPUTIL_NXAST_NOTE:
5202 /* Nothing to do. */
5205 case OFPUTIL_NXAST_SET_TUNNEL64:
5206 tun_id = ((const struct nx_action_set_tunnel64 *) ia)->tun_id;
5207 ctx->flow.tun_id = tun_id;
5210 case OFPUTIL_NXAST_MULTIPATH:
5211 nam = (const struct nx_action_multipath *) ia;
5212 multipath_execute(nam, &ctx->flow);
5215 case OFPUTIL_NXAST_AUTOPATH:
5216 naa = (const struct nx_action_autopath *) ia;
5217 xlate_autopath(ctx, naa);
5220 case OFPUTIL_NXAST_BUNDLE:
5221 ctx->ofproto->has_bundle_action = true;
5222 nab = (const struct nx_action_bundle *) ia;
5223 xlate_output_action__(ctx, bundle_execute(nab, &ctx->flow,
5228 case OFPUTIL_NXAST_BUNDLE_LOAD:
5229 ctx->ofproto->has_bundle_action = true;
5230 nab = (const struct nx_action_bundle *) ia;
5231 bundle_execute_load(nab, &ctx->flow, slave_enabled_cb,
5235 case OFPUTIL_NXAST_OUTPUT_REG:
5236 naor = (const struct nx_action_output_reg *) ia;
5237 xlate_output_reg_action(ctx, naor);
5240 case OFPUTIL_NXAST_LEARN:
5241 ctx->has_learn = true;
5242 if (ctx->may_learn) {
5243 xlate_learn_action(ctx, (const struct nx_action_learn *) ia);
5247 case OFPUTIL_NXAST_DEC_TTL:
5248 if (compose_dec_ttl(ctx)) {
5253 case OFPUTIL_NXAST_EXIT:
5257 case OFPUTIL_NXAST_FIN_TIMEOUT:
5258 ctx->has_fin_timeout = true;
5259 xlate_fin_timeout(ctx, (const struct nx_action_fin_timeout *) ia);
5262 case OFPUTIL_NXAST_CONTROLLER:
5263 nac = (const struct nx_action_controller *) ia;
5264 execute_controller_action(ctx, ntohs(nac->max_len), nac->reason,
5265 ntohs(nac->controller_id));
5271 /* We've let OFPP_NORMAL and the learning action look at the packet,
5272 * so drop it now if forwarding is disabled. */
5273 if (port && !stp_forward_in_state(port->stp_state)) {
5274 ofpbuf_clear(ctx->odp_actions);
5275 add_sflow_action(ctx);
5278 ctx->rule->up.evictable = was_evictable;
5283 action_xlate_ctx_init(struct action_xlate_ctx *ctx,
5284 struct ofproto_dpif *ofproto, const struct flow *flow,
5285 ovs_be16 initial_tci, struct rule_dpif *rule,
5286 uint8_t tcp_flags, const struct ofpbuf *packet)
5288 ctx->ofproto = ofproto;
5290 ctx->base_flow = ctx->flow;
5291 ctx->base_flow.tun_id = 0;
5292 ctx->base_flow.vlan_tci = initial_tci;
5294 ctx->packet = packet;
5295 ctx->may_learn = packet != NULL;
5296 ctx->tcp_flags = tcp_flags;
5297 ctx->resubmit_hook = NULL;
5298 ctx->resubmit_stats = NULL;
5301 /* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions in
5302 * 'odp_actions', using 'ctx'. */
5304 xlate_actions(struct action_xlate_ctx *ctx,
5305 const union ofp_action *in, size_t n_in,
5306 struct ofpbuf *odp_actions)
5308 /* Normally false. Set to true if we ever hit MAX_RESUBMIT_RECURSION, so
5309 * that in the future we always keep a copy of the original flow for
5310 * tracing purposes. */
5311 static bool hit_resubmit_limit;
5313 COVERAGE_INC(ofproto_dpif_xlate);
5315 ofpbuf_clear(odp_actions);
5316 ofpbuf_reserve(odp_actions, NL_A_U32_SIZE);
5318 ctx->odp_actions = odp_actions;
5320 ctx->may_set_up_flow = true;
5321 ctx->has_learn = false;
5322 ctx->has_normal = false;
5323 ctx->has_fin_timeout = false;
5324 ctx->nf_output_iface = NF_OUT_DROP;
5327 ctx->max_resubmit_trigger = false;
5328 ctx->orig_skb_priority = ctx->flow.skb_priority;
5332 if (ctx->ofproto->has_mirrors || hit_resubmit_limit) {
5333 /* Do this conditionally because the copy is expensive enough that it
5334 * shows up in profiles.
5336 * We keep orig_flow in 'ctx' only because I couldn't make GCC 4.4
5337 * believe that I wasn't using it without initializing it if I kept it
5338 * in a local variable. */
5339 ctx->orig_flow = ctx->flow;
5342 if (ctx->flow.nw_frag & FLOW_NW_FRAG_ANY) {
5343 switch (ctx->ofproto->up.frag_handling) {
5344 case OFPC_FRAG_NORMAL:
5345 /* We must pretend that transport ports are unavailable. */
5346 ctx->flow.tp_src = ctx->base_flow.tp_src = htons(0);
5347 ctx->flow.tp_dst = ctx->base_flow.tp_dst = htons(0);
5350 case OFPC_FRAG_DROP:
5353 case OFPC_FRAG_REASM:
5356 case OFPC_FRAG_NX_MATCH:
5357 /* Nothing to do. */
5360 case OFPC_INVALID_TTL_TO_CONTROLLER:
5365 if (process_special(ctx->ofproto, &ctx->flow, ctx->packet)) {
5366 ctx->may_set_up_flow = false;
5368 static struct vlog_rate_limit trace_rl = VLOG_RATE_LIMIT_INIT(1, 1);
5369 ovs_be16 initial_tci = ctx->base_flow.vlan_tci;
5371 add_sflow_action(ctx);
5372 do_xlate_actions(in, n_in, ctx);
5374 if (ctx->max_resubmit_trigger && !ctx->resubmit_hook) {
5375 if (!hit_resubmit_limit) {
5376 /* We didn't record the original flow. Make sure we do from
5378 hit_resubmit_limit = true;
5379 } else if (!VLOG_DROP_ERR(&trace_rl)) {
5380 struct ds ds = DS_EMPTY_INITIALIZER;
5382 ofproto_trace(ctx->ofproto, &ctx->orig_flow, ctx->packet,
5384 VLOG_ERR("Trace triggered by excessive resubmit "
5385 "recursion:\n%s", ds_cstr(&ds));
5390 if (!connmgr_may_set_up_flow(ctx->ofproto->up.connmgr, &ctx->flow,
5391 ctx->odp_actions->data,
5392 ctx->odp_actions->size)) {
5393 ctx->may_set_up_flow = false;
5395 && connmgr_msg_in_hook(ctx->ofproto->up.connmgr, &ctx->flow,
5397 compose_output_action(ctx, OFPP_LOCAL);
5400 if (ctx->ofproto->has_mirrors) {
5401 add_mirror_actions(ctx, &ctx->orig_flow);
5403 fix_sflow_action(ctx);
5407 /* Translates the 'n_in' "union ofp_action"s in 'in' into datapath actions,
5408 * using 'ctx', and discards the datapath actions. */
5410 xlate_actions_for_side_effects(struct action_xlate_ctx *ctx,
5411 const union ofp_action *in, size_t n_in)
5413 uint64_t odp_actions_stub[1024 / 8];
5414 struct ofpbuf odp_actions;
5416 ofpbuf_use_stub(&odp_actions, odp_actions_stub, sizeof odp_actions_stub);
5417 xlate_actions(ctx, in, n_in, &odp_actions);
5418 ofpbuf_uninit(&odp_actions);
5421 /* OFPP_NORMAL implementation. */
5423 static struct ofport_dpif *ofbundle_get_a_port(const struct ofbundle *);
5425 /* Given 'vid', the VID obtained from the 802.1Q header that was received as
5426 * part of a packet (specify 0 if there was no 802.1Q header), and 'in_bundle',
5427 * the bundle on which the packet was received, returns the VLAN to which the
5430 * Both 'vid' and the return value are in the range 0...4095. */
5432 input_vid_to_vlan(const struct ofbundle *in_bundle, uint16_t vid)
5434 switch (in_bundle->vlan_mode) {
5435 case PORT_VLAN_ACCESS:
5436 return in_bundle->vlan;
5439 case PORT_VLAN_TRUNK:
5442 case PORT_VLAN_NATIVE_UNTAGGED:
5443 case PORT_VLAN_NATIVE_TAGGED:
5444 return vid ? vid : in_bundle->vlan;
5451 /* Checks whether a packet with the given 'vid' may ingress on 'in_bundle'.
5452 * If so, returns true. Otherwise, returns false and, if 'warn' is true, logs
5455 * 'vid' should be the VID obtained from the 802.1Q header that was received as
5456 * part of a packet (specify 0 if there was no 802.1Q header), in the range
5459 input_vid_is_valid(uint16_t vid, struct ofbundle *in_bundle, bool warn)
5461 /* Allow any VID on the OFPP_NONE port. */
5462 if (in_bundle == &ofpp_none_bundle) {
5466 switch (in_bundle->vlan_mode) {
5467 case PORT_VLAN_ACCESS:
5470 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5471 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" tagged "
5472 "packet received on port %s configured as VLAN "
5473 "%"PRIu16" access port",
5474 in_bundle->ofproto->up.name, vid,
5475 in_bundle->name, in_bundle->vlan);
5481 case PORT_VLAN_NATIVE_UNTAGGED:
5482 case PORT_VLAN_NATIVE_TAGGED:
5484 /* Port must always carry its native VLAN. */
5488 case PORT_VLAN_TRUNK:
5489 if (!ofbundle_includes_vlan(in_bundle, vid)) {
5491 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5492 VLOG_WARN_RL(&rl, "bridge %s: dropping VLAN %"PRIu16" packet "
5493 "received on port %s not configured for trunking "
5495 in_bundle->ofproto->up.name, vid,
5496 in_bundle->name, vid);
5508 /* Given 'vlan', the VLAN that a packet belongs to, and
5509 * 'out_bundle', a bundle on which the packet is to be output, returns the VID
5510 * that should be included in the 802.1Q header. (If the return value is 0,
5511 * then the 802.1Q header should only be included in the packet if there is a
5514 * Both 'vlan' and the return value are in the range 0...4095. */
5516 output_vlan_to_vid(const struct ofbundle *out_bundle, uint16_t vlan)
5518 switch (out_bundle->vlan_mode) {
5519 case PORT_VLAN_ACCESS:
5522 case PORT_VLAN_TRUNK:
5523 case PORT_VLAN_NATIVE_TAGGED:
5526 case PORT_VLAN_NATIVE_UNTAGGED:
5527 return vlan == out_bundle->vlan ? 0 : vlan;
5535 output_normal(struct action_xlate_ctx *ctx, const struct ofbundle *out_bundle,
5538 struct ofport_dpif *port;
5540 ovs_be16 tci, old_tci;
5542 vid = output_vlan_to_vid(out_bundle, vlan);
5543 if (!out_bundle->bond) {
5544 port = ofbundle_get_a_port(out_bundle);
5546 port = bond_choose_output_slave(out_bundle->bond, &ctx->flow,
5549 /* No slaves enabled, so drop packet. */
5554 old_tci = ctx->flow.vlan_tci;
5556 if (tci || out_bundle->use_priority_tags) {
5557 tci |= ctx->flow.vlan_tci & htons(VLAN_PCP_MASK);
5559 tci |= htons(VLAN_CFI);
5562 ctx->flow.vlan_tci = tci;
5564 compose_output_action(ctx, port->up.ofp_port);
5565 ctx->flow.vlan_tci = old_tci;
5569 mirror_mask_ffs(mirror_mask_t mask)
5571 BUILD_ASSERT_DECL(sizeof(unsigned int) >= sizeof(mask));
5576 ofbundle_trunks_vlan(const struct ofbundle *bundle, uint16_t vlan)
5578 return (bundle->vlan_mode != PORT_VLAN_ACCESS
5579 && (!bundle->trunks || bitmap_is_set(bundle->trunks, vlan)));
5583 ofbundle_includes_vlan(const struct ofbundle *bundle, uint16_t vlan)
5585 return vlan == bundle->vlan || ofbundle_trunks_vlan(bundle, vlan);
5588 /* Returns an arbitrary interface within 'bundle'. */
5589 static struct ofport_dpif *
5590 ofbundle_get_a_port(const struct ofbundle *bundle)
5592 return CONTAINER_OF(list_front(&bundle->ports),
5593 struct ofport_dpif, bundle_node);
5597 vlan_is_mirrored(const struct ofmirror *m, int vlan)
5599 return !m->vlans || bitmap_is_set(m->vlans, vlan);
5602 /* Returns true if a packet with Ethernet destination MAC 'dst' may be mirrored
5603 * to a VLAN. In general most packets may be mirrored but we want to drop
5604 * protocols that may confuse switches. */
5606 eth_dst_may_rspan(const uint8_t dst[ETH_ADDR_LEN])
5608 /* If you change this function's behavior, please update corresponding
5609 * documentation in vswitch.xml at the same time. */
5610 if (dst[0] != 0x01) {
5611 /* All the currently banned MACs happen to start with 01 currently, so
5612 * this is a quick way to eliminate most of the good ones. */
5614 if (eth_addr_is_reserved(dst)) {
5615 /* Drop STP, IEEE pause frames, and other reserved protocols
5616 * (01-80-c2-00-00-0x). */
5620 if (dst[0] == 0x01 && dst[1] == 0x00 && dst[2] == 0x0c) {
5622 if ((dst[3] & 0xfe) == 0xcc &&
5623 (dst[4] & 0xfe) == 0xcc &&
5624 (dst[5] & 0xfe) == 0xcc) {
5625 /* Drop the following protocols plus others following the same
5628 CDP, VTP, DTP, PAgP (01-00-0c-cc-cc-cc)
5629 Spanning Tree PVSTP+ (01-00-0c-cc-cc-cd)
5630 STP Uplink Fast (01-00-0c-cd-cd-cd) */
5634 if (!(dst[3] | dst[4] | dst[5])) {
5635 /* Drop Inter Switch Link packets (01-00-0c-00-00-00). */
5644 add_mirror_actions(struct action_xlate_ctx *ctx, const struct flow *orig_flow)
5646 struct ofproto_dpif *ofproto = ctx->ofproto;
5647 mirror_mask_t mirrors;
5648 struct ofbundle *in_bundle;
5651 const struct nlattr *a;
5654 in_bundle = lookup_input_bundle(ctx->ofproto, orig_flow->in_port,
5655 ctx->packet != NULL, NULL);
5659 mirrors = in_bundle->src_mirrors;
5661 /* Drop frames on bundles reserved for mirroring. */
5662 if (in_bundle->mirror_out) {
5663 if (ctx->packet != NULL) {
5664 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5665 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
5666 "%s, which is reserved exclusively for mirroring",
5667 ctx->ofproto->up.name, in_bundle->name);
5673 vid = vlan_tci_to_vid(orig_flow->vlan_tci);
5674 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
5677 vlan = input_vid_to_vlan(in_bundle, vid);
5679 /* Look at the output ports to check for destination selections. */
5681 NL_ATTR_FOR_EACH (a, left, ctx->odp_actions->data,
5682 ctx->odp_actions->size) {
5683 enum ovs_action_attr type = nl_attr_type(a);
5684 struct ofport_dpif *ofport;
5686 if (type != OVS_ACTION_ATTR_OUTPUT) {
5690 ofport = get_odp_port(ofproto, nl_attr_get_u32(a));
5691 if (ofport && ofport->bundle) {
5692 mirrors |= ofport->bundle->dst_mirrors;
5700 /* Restore the original packet before adding the mirror actions. */
5701 ctx->flow = *orig_flow;
5706 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
5708 if (!vlan_is_mirrored(m, vlan)) {
5709 mirrors &= mirrors - 1;
5713 mirrors &= ~m->dup_mirrors;
5714 ctx->mirrors |= m->dup_mirrors;
5716 output_normal(ctx, m->out, vlan);
5717 } else if (eth_dst_may_rspan(orig_flow->dl_dst)
5718 && vlan != m->out_vlan) {
5719 struct ofbundle *bundle;
5721 HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
5722 if (ofbundle_includes_vlan(bundle, m->out_vlan)
5723 && !bundle->mirror_out) {
5724 output_normal(ctx, bundle, m->out_vlan);
5732 update_mirror_stats(struct ofproto_dpif *ofproto, mirror_mask_t mirrors,
5733 uint64_t packets, uint64_t bytes)
5739 for (; mirrors; mirrors &= mirrors - 1) {
5742 m = ofproto->mirrors[mirror_mask_ffs(mirrors) - 1];
5745 /* In normal circumstances 'm' will not be NULL. However,
5746 * if mirrors are reconfigured, we can temporarily get out
5747 * of sync in facet_revalidate(). We could "correct" the
5748 * mirror list before reaching here, but doing that would
5749 * not properly account the traffic stats we've currently
5750 * accumulated for previous mirror configuration. */
5754 m->packet_count += packets;
5755 m->byte_count += bytes;
5759 /* A VM broadcasts a gratuitous ARP to indicate that it has resumed after
5760 * migration. Older Citrix-patched Linux DomU used gratuitous ARP replies to
5761 * indicate this; newer upstream kernels use gratuitous ARP requests. */
5763 is_gratuitous_arp(const struct flow *flow)
5765 return (flow->dl_type == htons(ETH_TYPE_ARP)
5766 && eth_addr_is_broadcast(flow->dl_dst)
5767 && (flow->nw_proto == ARP_OP_REPLY
5768 || (flow->nw_proto == ARP_OP_REQUEST
5769 && flow->nw_src == flow->nw_dst)));
5773 update_learning_table(struct ofproto_dpif *ofproto,
5774 const struct flow *flow, int vlan,
5775 struct ofbundle *in_bundle)
5777 struct mac_entry *mac;
5779 /* Don't learn the OFPP_NONE port. */
5780 if (in_bundle == &ofpp_none_bundle) {
5784 if (!mac_learning_may_learn(ofproto->ml, flow->dl_src, vlan)) {
5788 mac = mac_learning_insert(ofproto->ml, flow->dl_src, vlan);
5789 if (is_gratuitous_arp(flow)) {
5790 /* We don't want to learn from gratuitous ARP packets that are
5791 * reflected back over bond slaves so we lock the learning table. */
5792 if (!in_bundle->bond) {
5793 mac_entry_set_grat_arp_lock(mac);
5794 } else if (mac_entry_is_grat_arp_locked(mac)) {
5799 if (mac_entry_is_new(mac) || mac->port.p != in_bundle) {
5800 /* The log messages here could actually be useful in debugging,
5801 * so keep the rate limit relatively high. */
5802 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
5803 VLOG_DBG_RL(&rl, "bridge %s: learned that "ETH_ADDR_FMT" is "
5804 "on port %s in VLAN %d",
5805 ofproto->up.name, ETH_ADDR_ARGS(flow->dl_src),
5806 in_bundle->name, vlan);
5808 mac->port.p = in_bundle;
5809 tag_set_add(&ofproto->revalidate_set,
5810 mac_learning_changed(ofproto->ml, mac));
5814 static struct ofbundle *
5815 lookup_input_bundle(struct ofproto_dpif *ofproto, uint16_t in_port, bool warn,
5816 struct ofport_dpif **in_ofportp)
5818 struct ofport_dpif *ofport;
5820 /* Find the port and bundle for the received packet. */
5821 ofport = get_ofp_port(ofproto, in_port);
5823 *in_ofportp = ofport;
5825 if (ofport && ofport->bundle) {
5826 return ofport->bundle;
5829 /* Special-case OFPP_NONE, which a controller may use as the ingress
5830 * port for traffic that it is sourcing. */
5831 if (in_port == OFPP_NONE) {
5832 return &ofpp_none_bundle;
5835 /* Odd. A few possible reasons here:
5837 * - We deleted a port but there are still a few packets queued up
5840 * - Someone externally added a port (e.g. "ovs-dpctl add-if") that
5841 * we don't know about.
5843 * - The ofproto client didn't configure the port as part of a bundle.
5846 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5848 VLOG_WARN_RL(&rl, "bridge %s: received packet on unknown "
5849 "port %"PRIu16, ofproto->up.name, in_port);
5854 /* Determines whether packets in 'flow' within 'ofproto' should be forwarded or
5855 * dropped. Returns true if they may be forwarded, false if they should be
5858 * 'in_port' must be the ofport_dpif that corresponds to flow->in_port.
5859 * 'in_port' must be part of a bundle (e.g. in_port->bundle must be nonnull).
5861 * 'vlan' must be the VLAN that corresponds to flow->vlan_tci on 'in_port', as
5862 * returned by input_vid_to_vlan(). It must be a valid VLAN for 'in_port', as
5863 * checked by input_vid_is_valid().
5865 * May also add tags to '*tags', although the current implementation only does
5866 * so in one special case.
5869 is_admissible(struct ofproto_dpif *ofproto, const struct flow *flow,
5870 struct ofport_dpif *in_port, uint16_t vlan, tag_type *tags)
5872 struct ofbundle *in_bundle = in_port->bundle;
5874 /* Drop frames for reserved multicast addresses
5875 * only if forward_bpdu option is absent. */
5876 if (eth_addr_is_reserved(flow->dl_dst) && !ofproto->up.forward_bpdu) {
5880 if (in_bundle->bond) {
5881 struct mac_entry *mac;
5883 switch (bond_check_admissibility(in_bundle->bond, in_port,
5884 flow->dl_dst, tags)) {
5891 case BV_DROP_IF_MOVED:
5892 mac = mac_learning_lookup(ofproto->ml, flow->dl_src, vlan, NULL);
5893 if (mac && mac->port.p != in_bundle &&
5894 (!is_gratuitous_arp(flow)
5895 || mac_entry_is_grat_arp_locked(mac))) {
5906 xlate_normal(struct action_xlate_ctx *ctx)
5908 struct ofport_dpif *in_port;
5909 struct ofbundle *in_bundle;
5910 struct mac_entry *mac;
5914 ctx->has_normal = true;
5916 in_bundle = lookup_input_bundle(ctx->ofproto, ctx->flow.in_port,
5917 ctx->packet != NULL, &in_port);
5922 /* Drop malformed frames. */
5923 if (ctx->flow.dl_type == htons(ETH_TYPE_VLAN) &&
5924 !(ctx->flow.vlan_tci & htons(VLAN_CFI))) {
5925 if (ctx->packet != NULL) {
5926 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5927 VLOG_WARN_RL(&rl, "bridge %s: dropping packet with partial "
5928 "VLAN tag received on port %s",
5929 ctx->ofproto->up.name, in_bundle->name);
5934 /* Drop frames on bundles reserved for mirroring. */
5935 if (in_bundle->mirror_out) {
5936 if (ctx->packet != NULL) {
5937 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
5938 VLOG_WARN_RL(&rl, "bridge %s: dropping packet received on port "
5939 "%s, which is reserved exclusively for mirroring",
5940 ctx->ofproto->up.name, in_bundle->name);
5946 vid = vlan_tci_to_vid(ctx->flow.vlan_tci);
5947 if (!input_vid_is_valid(vid, in_bundle, ctx->packet != NULL)) {
5950 vlan = input_vid_to_vlan(in_bundle, vid);
5952 /* Check other admissibility requirements. */
5954 !is_admissible(ctx->ofproto, &ctx->flow, in_port, vlan, &ctx->tags)) {
5958 /* Learn source MAC. */
5959 if (ctx->may_learn) {
5960 update_learning_table(ctx->ofproto, &ctx->flow, vlan, in_bundle);
5963 /* Determine output bundle. */
5964 mac = mac_learning_lookup(ctx->ofproto->ml, ctx->flow.dl_dst, vlan,
5967 if (mac->port.p != in_bundle) {
5968 output_normal(ctx, mac->port.p, vlan);
5971 struct ofbundle *bundle;
5973 HMAP_FOR_EACH (bundle, hmap_node, &ctx->ofproto->bundles) {
5974 if (bundle != in_bundle
5975 && ofbundle_includes_vlan(bundle, vlan)
5976 && bundle->floodable
5977 && !bundle->mirror_out) {
5978 output_normal(ctx, bundle, vlan);
5981 ctx->nf_output_iface = NF_OUT_FLOOD;
5985 /* Optimized flow revalidation.
5987 * It's a difficult problem, in general, to tell which facets need to have
5988 * their actions recalculated whenever the OpenFlow flow table changes. We
5989 * don't try to solve that general problem: for most kinds of OpenFlow flow
5990 * table changes, we recalculate the actions for every facet. This is
5991 * relatively expensive, but it's good enough if the OpenFlow flow table
5992 * doesn't change very often.
5994 * However, we can expect one particular kind of OpenFlow flow table change to
5995 * happen frequently: changes caused by MAC learning. To avoid wasting a lot
5996 * of CPU on revalidating every facet whenever MAC learning modifies the flow
5997 * table, we add a special case that applies to flow tables in which every rule
5998 * has the same form (that is, the same wildcards), except that the table is
5999 * also allowed to have a single "catch-all" flow that matches all packets. We
6000 * optimize this case by tagging all of the facets that resubmit into the table
6001 * and invalidating the same tag whenever a flow changes in that table. The
6002 * end result is that we revalidate just the facets that need it (and sometimes
6003 * a few more, but not all of the facets or even all of the facets that
6004 * resubmit to the table modified by MAC learning). */
6006 /* Calculates the tag to use for 'flow' and wildcards 'wc' when it is inserted
6007 * into an OpenFlow table with the given 'basis'. */
6009 rule_calculate_tag(const struct flow *flow, const struct flow_wildcards *wc,
6012 if (flow_wildcards_is_catchall(wc)) {
6015 struct flow tag_flow = *flow;
6016 flow_zero_wildcards(&tag_flow, wc);
6017 return tag_create_deterministic(flow_hash(&tag_flow, secret));
6021 /* Following a change to OpenFlow table 'table_id' in 'ofproto', update the
6022 * taggability of that table.
6024 * This function must be called after *each* change to a flow table. If you
6025 * skip calling it on some changes then the pointer comparisons at the end can
6026 * be invalid if you get unlucky. For example, if a flow removal causes a
6027 * cls_table to be destroyed and then a flow insertion causes a cls_table with
6028 * different wildcards to be created with the same address, then this function
6029 * will incorrectly skip revalidation. */
6031 table_update_taggable(struct ofproto_dpif *ofproto, uint8_t table_id)
6033 struct table_dpif *table = &ofproto->tables[table_id];
6034 const struct oftable *oftable = &ofproto->up.tables[table_id];
6035 struct cls_table *catchall, *other;
6036 struct cls_table *t;
6038 catchall = other = NULL;
6040 switch (hmap_count(&oftable->cls.tables)) {
6042 /* We could tag this OpenFlow table but it would make the logic a
6043 * little harder and it's a corner case that doesn't seem worth it
6049 HMAP_FOR_EACH (t, hmap_node, &oftable->cls.tables) {
6050 if (cls_table_is_catchall(t)) {
6052 } else if (!other) {
6055 /* Indicate that we can't tag this by setting both tables to
6056 * NULL. (We know that 'catchall' is already NULL.) */
6063 /* Can't tag this table. */
6067 if (table->catchall_table != catchall || table->other_table != other) {
6068 table->catchall_table = catchall;
6069 table->other_table = other;
6070 ofproto->need_revalidate = true;
6074 /* Given 'rule' that has changed in some way (either it is a rule being
6075 * inserted, a rule being deleted, or a rule whose actions are being
6076 * modified), marks facets for revalidation to ensure that packets will be
6077 * forwarded correctly according to the new state of the flow table.
6079 * This function must be called after *each* change to a flow table. See
6080 * the comment on table_update_taggable() for more information. */
6082 rule_invalidate(const struct rule_dpif *rule)
6084 struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
6086 table_update_taggable(ofproto, rule->up.table_id);
6088 if (!ofproto->need_revalidate) {
6089 struct table_dpif *table = &ofproto->tables[rule->up.table_id];
6091 if (table->other_table && rule->tag) {
6092 tag_set_add(&ofproto->revalidate_set, rule->tag);
6094 ofproto->need_revalidate = true;
6100 set_frag_handling(struct ofproto *ofproto_,
6101 enum ofp_config_flags frag_handling)
6103 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
6105 if (frag_handling != OFPC_FRAG_REASM) {
6106 ofproto->need_revalidate = true;
6114 packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
6115 const struct flow *flow,
6116 const union ofp_action *ofp_actions, size_t n_ofp_actions)
6118 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
6121 if (flow->in_port >= ofproto->max_ports && flow->in_port < OFPP_MAX) {
6122 return OFPERR_NXBRC_BAD_IN_PORT;
6125 error = validate_actions(ofp_actions, n_ofp_actions, flow,
6126 ofproto->max_ports);
6128 struct odputil_keybuf keybuf;
6129 struct dpif_flow_stats stats;
6133 struct action_xlate_ctx ctx;
6134 uint64_t odp_actions_stub[1024 / 8];
6135 struct ofpbuf odp_actions;
6137 ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
6138 odp_flow_key_from_flow(&key, flow);
6140 dpif_flow_stats_extract(flow, packet, &stats);
6142 action_xlate_ctx_init(&ctx, ofproto, flow, flow->vlan_tci, NULL,
6143 packet_get_tcp_flags(packet, flow), packet);
6144 ctx.resubmit_stats = &stats;
6146 ofpbuf_use_stub(&odp_actions,
6147 odp_actions_stub, sizeof odp_actions_stub);
6148 xlate_actions(&ctx, ofp_actions, n_ofp_actions, &odp_actions);
6149 dpif_execute(ofproto->dpif, key.data, key.size,
6150 odp_actions.data, odp_actions.size, packet);
6151 ofpbuf_uninit(&odp_actions);
6159 set_netflow(struct ofproto *ofproto_,
6160 const struct netflow_options *netflow_options)
6162 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
6164 if (netflow_options) {
6165 if (!ofproto->netflow) {
6166 ofproto->netflow = netflow_create();
6168 return netflow_set_options(ofproto->netflow, netflow_options);
6170 netflow_destroy(ofproto->netflow);
6171 ofproto->netflow = NULL;
6177 get_netflow_ids(const struct ofproto *ofproto_,
6178 uint8_t *engine_type, uint8_t *engine_id)
6180 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
6182 dpif_get_netflow_ids(ofproto->dpif, engine_type, engine_id);
6186 send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
6188 if (!facet_is_controller_flow(facet) &&
6189 netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
6190 struct subfacet *subfacet;
6191 struct ofexpired expired;
6193 LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
6194 if (subfacet->installed) {
6195 struct dpif_flow_stats stats;
6197 subfacet_install(subfacet, subfacet->actions,
6198 subfacet->actions_len, &stats);
6199 subfacet_update_stats(subfacet, &stats);
6203 expired.flow = facet->flow;
6204 expired.packet_count = facet->packet_count;
6205 expired.byte_count = facet->byte_count;
6206 expired.used = facet->used;
6207 netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
6212 send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
6214 struct facet *facet;
6216 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
6217 send_active_timeout(ofproto, facet);
6221 static struct ofproto_dpif *
6222 ofproto_dpif_lookup(const char *name)
6224 struct ofproto_dpif *ofproto;
6226 HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
6227 hash_string(name, 0), &all_ofproto_dpifs) {
6228 if (!strcmp(ofproto->up.name, name)) {
6236 ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
6237 const char *argv[], void *aux OVS_UNUSED)
6239 struct ofproto_dpif *ofproto;
6242 ofproto = ofproto_dpif_lookup(argv[1]);
6244 unixctl_command_reply_error(conn, "no such bridge");
6247 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
6249 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
6250 mac_learning_flush(ofproto->ml, &ofproto->revalidate_set);
6254 unixctl_command_reply(conn, "table successfully flushed");
6258 ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
6259 const char *argv[], void *aux OVS_UNUSED)
6261 struct ds ds = DS_EMPTY_INITIALIZER;
6262 const struct ofproto_dpif *ofproto;
6263 const struct mac_entry *e;
6265 ofproto = ofproto_dpif_lookup(argv[1]);
6267 unixctl_command_reply_error(conn, "no such bridge");
6271 ds_put_cstr(&ds, " port VLAN MAC Age\n");
6272 LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
6273 struct ofbundle *bundle = e->port.p;
6274 ds_put_format(&ds, "%5d %4d "ETH_ADDR_FMT" %3d\n",
6275 ofbundle_get_a_port(bundle)->odp_port,
6276 e->vlan, ETH_ADDR_ARGS(e->mac),
6277 mac_entry_age(ofproto->ml, e));
6279 unixctl_command_reply(conn, ds_cstr(&ds));
6284 struct action_xlate_ctx ctx;
6290 trace_format_rule(struct ds *result, uint8_t table_id, int level,
6291 const struct rule_dpif *rule)
6293 ds_put_char_multiple(result, '\t', level);
6295 ds_put_cstr(result, "No match\n");
6299 ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
6300 table_id, ntohll(rule->up.flow_cookie));
6301 cls_rule_format(&rule->up.cr, result);
6302 ds_put_char(result, '\n');
6304 ds_put_char_multiple(result, '\t', level);
6305 ds_put_cstr(result, "OpenFlow ");
6306 ofp_print_actions(result, rule->up.actions, rule->up.n_actions);
6307 ds_put_char(result, '\n');
6311 trace_format_flow(struct ds *result, int level, const char *title,
6312 struct trace_ctx *trace)
6314 ds_put_char_multiple(result, '\t', level);
6315 ds_put_format(result, "%s: ", title);
6316 if (flow_equal(&trace->ctx.flow, &trace->flow)) {
6317 ds_put_cstr(result, "unchanged");
6319 flow_format(result, &trace->ctx.flow);
6320 trace->flow = trace->ctx.flow;
6322 ds_put_char(result, '\n');
6326 trace_format_regs(struct ds *result, int level, const char *title,
6327 struct trace_ctx *trace)
6331 ds_put_char_multiple(result, '\t', level);
6332 ds_put_format(result, "%s:", title);
6333 for (i = 0; i < FLOW_N_REGS; i++) {
6334 ds_put_format(result, " reg%zu=0x%"PRIx32, i, trace->flow.regs[i]);
6336 ds_put_char(result, '\n');
6340 trace_format_odp(struct ds *result, int level, const char *title,
6341 struct trace_ctx *trace)
6343 struct ofpbuf *odp_actions = trace->ctx.odp_actions;
6345 ds_put_char_multiple(result, '\t', level);
6346 ds_put_format(result, "%s: ", title);
6347 format_odp_actions(result, odp_actions->data, odp_actions->size);
6348 ds_put_char(result, '\n');
6352 trace_resubmit(struct action_xlate_ctx *ctx, struct rule_dpif *rule)
6354 struct trace_ctx *trace = CONTAINER_OF(ctx, struct trace_ctx, ctx);
6355 struct ds *result = trace->result;
6357 ds_put_char(result, '\n');
6358 trace_format_flow(result, ctx->recurse + 1, "Resubmitted flow", trace);
6359 trace_format_regs(result, ctx->recurse + 1, "Resubmitted regs", trace);
6360 trace_format_odp(result, ctx->recurse + 1, "Resubmitted odp", trace);
6361 trace_format_rule(result, ctx->table_id, ctx->recurse + 1, rule);
6365 ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
6366 void *aux OVS_UNUSED)
6368 const char *dpname = argv[1];
6369 struct ofproto_dpif *ofproto;
6370 struct ofpbuf odp_key;
6371 struct ofpbuf *packet;
6372 ovs_be16 initial_tci;
6378 ofpbuf_init(&odp_key, 0);
6381 ofproto = ofproto_dpif_lookup(dpname);
6383 unixctl_command_reply_error(conn, "Unknown ofproto (use ofproto/list "
6387 if (argc == 3 || (argc == 4 && !strcmp(argv[3], "-generate"))) {
6388 /* ofproto/trace dpname flow [-generate] */
6389 const char *flow_s = argv[2];
6390 const char *generate_s = argv[3];
6392 /* Allow 'flow_s' to be either a datapath flow or an OpenFlow-like
6393 * flow. We guess which type it is based on whether 'flow_s' contains
6394 * an '(', since a datapath flow always contains '(') but an
6395 * OpenFlow-like flow should not (in fact it's allowed but I believe
6396 * that's not documented anywhere).
6398 * An alternative would be to try to parse 'flow_s' both ways, but then
6399 * it would be tricky giving a sensible error message. After all, do
6400 * you just say "syntax error" or do you present both error messages?
6401 * Both choices seem lousy. */
6402 if (strchr(flow_s, '(')) {
6405 /* Convert string to datapath key. */
6406 ofpbuf_init(&odp_key, 0);
6407 error = odp_flow_key_from_string(flow_s, NULL, &odp_key);
6409 unixctl_command_reply_error(conn, "Bad flow syntax");
6413 /* Convert odp_key to flow. */
6414 error = ofproto_dpif_extract_flow_key(ofproto, odp_key.data,
6415 odp_key.size, &flow,
6416 &initial_tci, NULL);
6417 if (error == ODP_FIT_ERROR) {
6418 unixctl_command_reply_error(conn, "Invalid flow");
6424 error_s = parse_ofp_exact_flow(&flow, argv[2]);
6426 unixctl_command_reply_error(conn, error_s);
6431 initial_tci = flow.vlan_tci;
6432 vsp_adjust_flow(ofproto, &flow);
6435 /* Generate a packet, if requested. */
6437 packet = ofpbuf_new(0);
6438 flow_compose(packet, &flow);
6440 } else if (argc == 6) {
6441 /* ofproto/trace dpname priority tun_id in_port packet */
6442 const char *priority_s = argv[2];
6443 const char *tun_id_s = argv[3];
6444 const char *in_port_s = argv[4];
6445 const char *packet_s = argv[5];
6446 uint16_t in_port = ofp_port_to_odp_port(atoi(in_port_s));
6447 ovs_be64 tun_id = htonll(strtoull(tun_id_s, NULL, 0));
6448 uint32_t priority = atoi(priority_s);
6451 msg = eth_from_hex(packet_s, &packet);
6453 unixctl_command_reply_error(conn, msg);
6457 ds_put_cstr(&result, "Packet: ");
6458 s = ofp_packet_to_string(packet->data, packet->size);
6459 ds_put_cstr(&result, s);
6462 flow_extract(packet, priority, tun_id, in_port, &flow);
6463 initial_tci = flow.vlan_tci;
6465 unixctl_command_reply_error(conn, "Bad command syntax");
6469 ofproto_trace(ofproto, &flow, packet, initial_tci, &result);
6470 unixctl_command_reply(conn, ds_cstr(&result));
6473 ds_destroy(&result);
6474 ofpbuf_delete(packet);
6475 ofpbuf_uninit(&odp_key);
6479 ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
6480 const struct ofpbuf *packet, ovs_be16 initial_tci,
6483 struct rule_dpif *rule;
6485 ds_put_cstr(ds, "Flow: ");
6486 flow_format(ds, flow);
6487 ds_put_char(ds, '\n');
6489 rule = rule_dpif_lookup(ofproto, flow, 0);
6490 trace_format_rule(ds, 0, 0, rule);
6492 uint64_t odp_actions_stub[1024 / 8];
6493 struct ofpbuf odp_actions;
6495 struct trace_ctx trace;
6498 tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
6501 ofpbuf_use_stub(&odp_actions,
6502 odp_actions_stub, sizeof odp_actions_stub);
6503 action_xlate_ctx_init(&trace.ctx, ofproto, flow, initial_tci,
6504 rule, tcp_flags, packet);
6505 trace.ctx.resubmit_hook = trace_resubmit;
6506 xlate_actions(&trace.ctx, rule->up.actions, rule->up.n_actions,
6509 ds_put_char(ds, '\n');
6510 trace_format_flow(ds, 0, "Final flow", &trace);
6511 ds_put_cstr(ds, "Datapath actions: ");
6512 format_odp_actions(ds, odp_actions.data, odp_actions.size);
6513 ofpbuf_uninit(&odp_actions);
6515 if (!trace.ctx.may_set_up_flow) {
6517 ds_put_cstr(ds, "\nThis flow is not cachable.");
6519 ds_put_cstr(ds, "\nThe datapath actions are incomplete--"
6520 "for complete actions, please supply a packet.");
6527 ofproto_dpif_clog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
6528 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
6531 unixctl_command_reply(conn, NULL);
6535 ofproto_dpif_unclog(struct unixctl_conn *conn OVS_UNUSED, int argc OVS_UNUSED,
6536 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
6539 unixctl_command_reply(conn, NULL);
6542 /* Runs a self-check of flow translations in 'ofproto'. Appends a message to
6543 * 'reply' describing the results. */
6545 ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
6547 struct facet *facet;
6551 HMAP_FOR_EACH (facet, hmap_node, &ofproto->facets) {
6552 if (!facet_check_consistency(facet)) {
6557 ofproto->need_revalidate = true;
6561 ds_put_format(reply, "%s: self-check failed (%d errors)\n",
6562 ofproto->up.name, errors);
6564 ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
6569 ofproto_dpif_self_check(struct unixctl_conn *conn,
6570 int argc, const char *argv[], void *aux OVS_UNUSED)
6572 struct ds reply = DS_EMPTY_INITIALIZER;
6573 struct ofproto_dpif *ofproto;
6576 ofproto = ofproto_dpif_lookup(argv[1]);
6578 unixctl_command_reply_error(conn, "Unknown ofproto (use "
6579 "ofproto/list for help)");
6582 ofproto_dpif_self_check__(ofproto, &reply);
6584 HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
6585 ofproto_dpif_self_check__(ofproto, &reply);
6589 unixctl_command_reply(conn, ds_cstr(&reply));
6594 ofproto_dpif_unixctl_init(void)
6596 static bool registered;
6602 unixctl_command_register(
6604 "bridge {tun_id in_port packet | odp_flow [-generate]}",
6605 2, 5, ofproto_unixctl_trace, NULL);
6606 unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
6607 ofproto_unixctl_fdb_flush, NULL);
6608 unixctl_command_register("fdb/show", "bridge", 1, 1,
6609 ofproto_unixctl_fdb_show, NULL);
6610 unixctl_command_register("ofproto/clog", "", 0, 0,
6611 ofproto_dpif_clog, NULL);
6612 unixctl_command_register("ofproto/unclog", "", 0, 0,
6613 ofproto_dpif_unclog, NULL);
6614 unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
6615 ofproto_dpif_self_check, NULL);
6618 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
6620 * This is deprecated. It is only for compatibility with broken device drivers
6621 * in old versions of Linux that do not properly support VLANs when VLAN
6622 * devices are not used. When broken device drivers are no longer in
6623 * widespread use, we will delete these interfaces. */
6626 set_realdev(struct ofport *ofport_, uint16_t realdev_ofp_port, int vid)
6628 struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
6629 struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
6631 if (realdev_ofp_port == ofport->realdev_ofp_port
6632 && vid == ofport->vlandev_vid) {
6636 ofproto->need_revalidate = true;
6638 if (ofport->realdev_ofp_port) {
6641 if (realdev_ofp_port && ofport->bundle) {
6642 /* vlandevs are enslaved to their realdevs, so they are not allowed to
6643 * themselves be part of a bundle. */
6644 bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
6647 ofport->realdev_ofp_port = realdev_ofp_port;
6648 ofport->vlandev_vid = vid;
6650 if (realdev_ofp_port) {
6651 vsp_add(ofport, realdev_ofp_port, vid);
6658 hash_realdev_vid(uint16_t realdev_ofp_port, int vid)
6660 return hash_2words(realdev_ofp_port, vid);
6663 /* Returns the ODP port number of the Linux VLAN device that corresponds to
6664 * 'vlan_tci' on the network device with port number 'realdev_odp_port' in
6665 * 'ofproto'. For example, given 'realdev_odp_port' of eth0 and 'vlan_tci' 9,
6666 * it would return the port number of eth0.9.
6668 * Unless VLAN splinters are enabled for port 'realdev_odp_port', this
6669 * function just returns its 'realdev_odp_port' argument. */
6671 vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
6672 uint32_t realdev_odp_port, ovs_be16 vlan_tci)
6674 if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
6675 uint16_t realdev_ofp_port = odp_port_to_ofp_port(realdev_odp_port);
6676 int vid = vlan_tci_to_vid(vlan_tci);
6677 const struct vlan_splinter *vsp;
6679 HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
6680 hash_realdev_vid(realdev_ofp_port, vid),
6681 &ofproto->realdev_vid_map) {
6682 if (vsp->realdev_ofp_port == realdev_ofp_port
6683 && vsp->vid == vid) {
6684 return ofp_port_to_odp_port(vsp->vlandev_ofp_port);
6688 return realdev_odp_port;
6691 static struct vlan_splinter *
6692 vlandev_find(const struct ofproto_dpif *ofproto, uint16_t vlandev_ofp_port)
6694 struct vlan_splinter *vsp;
6696 HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node, hash_int(vlandev_ofp_port, 0),
6697 &ofproto->vlandev_map) {
6698 if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
6706 /* Returns the OpenFlow port number of the "real" device underlying the Linux
6707 * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
6708 * VLAN VID of the Linux VLAN device in '*vid'. For example, given
6709 * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
6710 * eth0 and store 9 in '*vid'.
6712 * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
6713 * VLAN device. Unless VLAN splinters are enabled, this is what this function
6716 vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
6717 uint16_t vlandev_ofp_port, int *vid)
6719 if (!hmap_is_empty(&ofproto->vlandev_map)) {
6720 const struct vlan_splinter *vsp;
6722 vsp = vlandev_find(ofproto, vlandev_ofp_port);
6727 return vsp->realdev_ofp_port;
6733 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
6734 * whether 'flow->in_port' represents a Linux VLAN device. If so, changes
6735 * 'flow->in_port' to the "real" device backing the VLAN device, sets
6736 * 'flow->vlan_tci' to the VLAN VID, and returns true. Otherwise (which is
6737 * always the case unless VLAN splinters are enabled), returns false without
6738 * making any changes. */
6740 vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
6745 realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port, &vid);
6750 /* Cause the flow to be processed as if it came in on the real device with
6751 * the VLAN device's VLAN ID. */
6752 flow->in_port = realdev;
6753 flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
6758 vsp_remove(struct ofport_dpif *port)
6760 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6761 struct vlan_splinter *vsp;
6763 vsp = vlandev_find(ofproto, port->up.ofp_port);
6765 hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
6766 hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
6769 port->realdev_ofp_port = 0;
6771 VLOG_ERR("missing vlan device record");
6776 vsp_add(struct ofport_dpif *port, uint16_t realdev_ofp_port, int vid)
6778 struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6780 if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
6781 && (vsp_realdev_to_vlandev(ofproto, realdev_ofp_port, htons(vid))
6782 == realdev_ofp_port)) {
6783 struct vlan_splinter *vsp;
6785 vsp = xmalloc(sizeof *vsp);
6786 hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
6787 hash_int(port->up.ofp_port, 0));
6788 hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
6789 hash_realdev_vid(realdev_ofp_port, vid));
6790 vsp->realdev_ofp_port = realdev_ofp_port;
6791 vsp->vlandev_ofp_port = port->up.ofp_port;
6794 port->realdev_ofp_port = realdev_ofp_port;
6796 VLOG_ERR("duplicate vlan device record");
6800 const struct ofproto_class ofproto_dpif_class = {
6829 port_is_lacp_current,
6830 NULL, /* rule_choose_table */
6837 rule_modify_actions,
6845 get_cfm_remote_mpids,
6850 get_stp_port_status,
6857 is_mirror_output_bundle,
6858 forward_bpdu_changed,