bridge: Refresh STP statistics separately from status
[sliver-openvswitch.git] / ofproto / ofproto-dpif.c
1 /*
2  * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16
17 #include <config.h>
18
19 #include "ofproto/ofproto-dpif.h"
20 #include "ofproto/ofproto-provider.h"
21
22 #include <errno.h>
23
24 #include "bfd.h"
25 #include "bond.h"
26 #include "bundle.h"
27 #include "byte-order.h"
28 #include "connmgr.h"
29 #include "coverage.h"
30 #include "cfm.h"
31 #include "dpif.h"
32 #include "dynamic-string.h"
33 #include "fail-open.h"
34 #include "guarded-list.h"
35 #include "hmapx.h"
36 #include "lacp.h"
37 #include "learn.h"
38 #include "mac-learning.h"
39 #include "meta-flow.h"
40 #include "multipath.h"
41 #include "netdev-vport.h"
42 #include "netdev.h"
43 #include "netlink.h"
44 #include "nx-match.h"
45 #include "odp-util.h"
46 #include "odp-execute.h"
47 #include "ofp-util.h"
48 #include "ofpbuf.h"
49 #include "ofp-actions.h"
50 #include "ofp-parse.h"
51 #include "ofp-print.h"
52 #include "ofproto-dpif-governor.h"
53 #include "ofproto-dpif-ipfix.h"
54 #include "ofproto-dpif-mirror.h"
55 #include "ofproto-dpif-monitor.h"
56 #include "ofproto-dpif-sflow.h"
57 #include "ofproto-dpif-upcall.h"
58 #include "ofproto-dpif-xlate.h"
59 #include "poll-loop.h"
60 #include "simap.h"
61 #include "smap.h"
62 #include "timer.h"
63 #include "tunnel.h"
64 #include "unaligned.h"
65 #include "unixctl.h"
66 #include "vlan-bitmap.h"
67 #include "vlog.h"
68
69 VLOG_DEFINE_THIS_MODULE(ofproto_dpif);
70
71 COVERAGE_DEFINE(ofproto_dpif_expired);
72 COVERAGE_DEFINE(facet_revalidate);
73 COVERAGE_DEFINE(facet_unexpected);
74 COVERAGE_DEFINE(facet_create);
75 COVERAGE_DEFINE(facet_remove);
76 COVERAGE_DEFINE(subfacet_create);
77 COVERAGE_DEFINE(subfacet_destroy);
78 COVERAGE_DEFINE(subfacet_install_fail);
79 COVERAGE_DEFINE(packet_in_overflow);
80
81 /* Number of implemented OpenFlow tables. */
82 enum { N_TABLES = 255 };
83 enum { TBL_INTERNAL = N_TABLES - 1 };    /* Used for internal hidden rules. */
84 BUILD_ASSERT_DECL(N_TABLES >= 2 && N_TABLES <= 255);
85
86 struct flow_miss;
87 struct facet;
88
89 struct rule_dpif {
90     struct rule up;
91
92     /* These statistics:
93      *
94      *   - Do include packets and bytes from facets that have been deleted or
95      *     whose own statistics have been folded into the rule.
96      *
97      *   - Do include packets and bytes sent "by hand" that were accounted to
98      *     the rule without any facet being involved (this is a rare corner
99      *     case in rule_execute()).
100      *
101      *   - Do not include packet or bytes that can be obtained from any facet's
102      *     packet_count or byte_count member or that can be obtained from the
103      *     datapath by, e.g., dpif_flow_get() for any subfacet.
104      */
105     struct ovs_mutex stats_mutex;
106     uint64_t packet_count OVS_GUARDED;  /* Number of packets received. */
107     uint64_t byte_count OVS_GUARDED;    /* Number of bytes received. */
108 };
109
110 static void rule_get_stats(struct rule *, uint64_t *packets, uint64_t *bytes);
111 static struct rule_dpif *rule_dpif_cast(const struct rule *);
112
113 struct group_dpif {
114     struct ofgroup up;
115
116     /* These statistics:
117      *
118      *   - Do include packets and bytes from facets that have been deleted or
119      *     whose own statistics have been folded into the rule.
120      *
121      *   - Do include packets and bytes sent "by hand" that were accounted to
122      *     the rule without any facet being involved (this is a rare corner
123      *     case in rule_execute()).
124      *
125      *   - Do not include packet or bytes that can be obtained from any facet's
126      *     packet_count or byte_count member or that can be obtained from the
127      *     datapath by, e.g., dpif_flow_get() for any subfacet.
128      */
129     struct ovs_mutex stats_mutex;
130     uint64_t packet_count OVS_GUARDED;  /* Number of packets received. */
131     uint64_t byte_count OVS_GUARDED;    /* Number of bytes received. */
132     struct bucket_counter *bucket_stats OVS_GUARDED;  /* Bucket statistics. */
133 };
134
135 struct ofbundle {
136     struct hmap_node hmap_node; /* In struct ofproto's "bundles" hmap. */
137     struct ofproto_dpif *ofproto; /* Owning ofproto. */
138     void *aux;                  /* Key supplied by ofproto's client. */
139     char *name;                 /* Identifier for log messages. */
140
141     /* Configuration. */
142     struct list ports;          /* Contains "struct ofport"s. */
143     enum port_vlan_mode vlan_mode; /* VLAN mode */
144     int vlan;                   /* -1=trunk port, else a 12-bit VLAN ID. */
145     unsigned long *trunks;      /* Bitmap of trunked VLANs, if 'vlan' == -1.
146                                  * NULL if all VLANs are trunked. */
147     struct lacp *lacp;          /* LACP if LACP is enabled, otherwise NULL. */
148     struct bond *bond;          /* Nonnull iff more than one port. */
149     bool use_priority_tags;     /* Use 802.1p tag for frames in VLAN 0? */
150
151     /* Status. */
152     bool floodable;          /* True if no port has OFPUTIL_PC_NO_FLOOD set. */
153 };
154
155 static void bundle_remove(struct ofport *);
156 static void bundle_update(struct ofbundle *);
157 static void bundle_destroy(struct ofbundle *);
158 static void bundle_del_port(struct ofport_dpif *);
159 static void bundle_run(struct ofbundle *);
160 static void bundle_wait(struct ofbundle *);
161
162 static void stp_run(struct ofproto_dpif *ofproto);
163 static void stp_wait(struct ofproto_dpif *ofproto);
164 static int set_stp_port(struct ofport *,
165                         const struct ofproto_port_stp_settings *);
166
167 static void compose_slow_path(const struct ofproto_dpif *, const struct flow *,
168                               enum slow_path_reason,
169                               uint64_t *stub, size_t stub_size,
170                               const struct nlattr **actionsp,
171                               size_t *actions_lenp);
172
173 /* A subfacet (see "struct subfacet" below) has three possible installation
174  * states:
175  *
176  *   - SF_NOT_INSTALLED: Not installed in the datapath.  This will only be the
177  *     case just after the subfacet is created, just before the subfacet is
178  *     destroyed, or if the datapath returns an error when we try to install a
179  *     subfacet.
180  *
181  *   - SF_FAST_PATH: The subfacet's actions are installed in the datapath.
182  *
183  *   - SF_SLOW_PATH: An action that sends every packet for the subfacet through
184  *     ofproto_dpif is installed in the datapath.
185  */
186 enum subfacet_path {
187     SF_NOT_INSTALLED,           /* No datapath flow for this subfacet. */
188     SF_FAST_PATH,               /* Full actions are installed. */
189     SF_SLOW_PATH,               /* Send-to-userspace action is installed. */
190 };
191
192 /* A dpif flow and actions associated with a facet.
193  *
194  * See also the large comment on struct facet. */
195 struct subfacet {
196     /* Owners. */
197     struct hmap_node hmap_node; /* In struct ofproto_dpif 'subfacets' list. */
198     struct list list_node;      /* In struct facet's 'facets' list. */
199     struct facet *facet;        /* Owning facet. */
200     struct dpif_backer *backer; /* Owning backer. */
201
202     struct nlattr *key;
203     int key_len;
204
205     long long int used;         /* Time last used; time created if not used. */
206     long long int created;      /* Time created. */
207
208     uint64_t dp_packet_count;   /* Last known packet count in the datapath. */
209     uint64_t dp_byte_count;     /* Last known byte count in the datapath. */
210
211     enum subfacet_path path;    /* Installed in datapath? */
212 };
213
214 #define SUBFACET_DESTROY_MAX_BATCH 50
215
216 static struct subfacet *subfacet_create(struct facet *, struct flow_miss *,
217                                         uint32_t key_hash);
218 static struct subfacet *subfacet_find(struct dpif_backer *,
219                                       const struct nlattr *key, size_t key_len,
220                                       uint32_t key_hash);
221 static void subfacet_destroy(struct subfacet *);
222 static void subfacet_destroy__(struct subfacet *);
223 static void subfacet_destroy_batch(struct dpif_backer *,
224                                    struct subfacet **, int n);
225 static void subfacet_reset_dp_stats(struct subfacet *,
226                                     struct dpif_flow_stats *);
227 static void subfacet_update_stats(struct subfacet *,
228                                   const struct dpif_flow_stats *);
229 static int subfacet_install(struct subfacet *,
230                             const struct ofpbuf *odp_actions,
231                             struct dpif_flow_stats *);
232 static void subfacet_uninstall(struct subfacet *);
233
234 /* A unique, non-overlapping instantiation of an OpenFlow flow.
235  *
236  * A facet associates a "struct flow", which represents the Open vSwitch
237  * userspace idea of an exact-match flow, with one or more subfacets.
238  * While the facet is created based on an exact-match flow, it is stored
239  * within the ofproto based on the wildcards that could be expressed
240  * based on the flow table and other configuration.  (See the 'wc'
241  * description in "struct xlate_out" for more details.)
242  *
243  * Each subfacet tracks the datapath's idea of the flow equivalent to
244  * the facet.  When the kernel module (or other dpif implementation) and
245  * Open vSwitch userspace agree on the definition of a flow key, there
246  * is exactly one subfacet per facet.  If the dpif implementation
247  * supports more-specific flow matching than userspace, however, a facet
248  * can have more than one subfacet.  Examples include the dpif
249  * implementation not supporting the same wildcards as userspace or some
250  * distinction in flow that userspace simply doesn't understand.
251  *
252  * Flow expiration works in terms of subfacets, so a facet must have at
253  * least one subfacet or it will never expire, leaking memory. */
254 struct facet {
255     /* Owner. */
256     struct ofproto_dpif *ofproto;
257
258     /* Owned data. */
259     struct list subfacets;
260     long long int used;         /* Time last used; time created if not used. */
261
262     /* Key. */
263     struct flow flow;           /* Flow of the creating subfacet. */
264     struct cls_rule cr;         /* In 'ofproto_dpif's facets classifier. */
265
266     /* These statistics:
267      *
268      *   - Do include packets and bytes sent "by hand", e.g. with
269      *     dpif_execute().
270      *
271      *   - Do include packets and bytes that were obtained from the datapath
272      *     when a subfacet's statistics were reset (e.g. dpif_flow_put() with
273      *     DPIF_FP_ZERO_STATS).
274      *
275      *   - Do not include packets or bytes that can be obtained from the
276      *     datapath for any existing subfacet.
277      */
278     uint64_t packet_count;       /* Number of packets received. */
279     uint64_t byte_count;         /* Number of bytes received. */
280
281     /* Resubmit statistics. */
282     uint64_t prev_packet_count;  /* Number of packets from last stats push. */
283     uint64_t prev_byte_count;    /* Number of bytes from last stats push. */
284     long long int prev_used;     /* Used time from last stats push. */
285
286     /* Accounting. */
287     uint64_t accounted_bytes;    /* Bytes processed by facet_account(). */
288     struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
289     uint16_t tcp_flags;          /* TCP flags seen for this 'rule'. */
290
291     struct xlate_out xout;
292
293     /* Storage for a single subfacet, to reduce malloc() time and space
294      * overhead.  (A facet always has at least one subfacet and in the common
295      * case has exactly one subfacet.  However, 'one_subfacet' may not
296      * always be valid, since it could have been removed after newer
297      * subfacets were pushed onto the 'subfacets' list.) */
298     struct subfacet one_subfacet;
299
300     long long int learn_rl;      /* Rate limiter for facet_learn(). */
301 };
302
303 static struct facet *facet_create(const struct flow_miss *);
304 static void facet_remove(struct facet *);
305 static void facet_free(struct facet *);
306
307 static struct facet *facet_find(struct ofproto_dpif *, const struct flow *);
308 static struct facet *facet_lookup_valid(struct ofproto_dpif *,
309                                         const struct flow *);
310 static bool facet_revalidate(struct facet *);
311 static bool facet_check_consistency(struct facet *);
312
313 static void facet_flush_stats(struct facet *);
314
315 static void facet_reset_counters(struct facet *);
316 static void flow_push_stats(struct ofproto_dpif *, struct flow *,
317                             struct dpif_flow_stats *, bool may_learn);
318 static void facet_push_stats(struct facet *, bool may_learn);
319 static void facet_learn(struct facet *);
320 static void facet_account(struct facet *);
321 static void push_all_stats(void);
322
323 static bool facet_is_controller_flow(struct facet *);
324
325 struct ofport_dpif {
326     struct hmap_node odp_port_node; /* In dpif_backer's "odp_to_ofport_map". */
327     struct ofport up;
328
329     odp_port_t odp_port;
330     struct ofbundle *bundle;    /* Bundle that contains this port, if any. */
331     struct list bundle_node;    /* In struct ofbundle's "ports" list. */
332     struct cfm *cfm;            /* Connectivity Fault Management, if any. */
333     struct bfd *bfd;            /* BFD, if any. */
334     bool may_enable;            /* May be enabled in bonds. */
335     bool is_tunnel;             /* This port is a tunnel. */
336     bool is_layer3;             /* This is a layer 3 port. */
337     long long int carrier_seq;  /* Carrier status changes. */
338     struct ofport_dpif *peer;   /* Peer if patch port. */
339
340     /* Spanning tree. */
341     struct stp_port *stp_port;  /* Spanning Tree Protocol, if any. */
342     enum stp_state stp_state;   /* Always STP_DISABLED if STP not in use. */
343     long long int stp_state_entered;
344
345     /* Queue to DSCP mapping. */
346     struct ofproto_port_queue *qdscp;
347     size_t n_qdscp;
348
349     /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
350      *
351      * This is deprecated.  It is only for compatibility with broken device
352      * drivers in old versions of Linux that do not properly support VLANs when
353      * VLAN devices are not used.  When broken device drivers are no longer in
354      * widespread use, we will delete these interfaces. */
355     ofp_port_t realdev_ofp_port;
356     int vlandev_vid;
357 };
358
359 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
360  *
361  * This is deprecated.  It is only for compatibility with broken device drivers
362  * in old versions of Linux that do not properly support VLANs when VLAN
363  * devices are not used.  When broken device drivers are no longer in
364  * widespread use, we will delete these interfaces. */
365 struct vlan_splinter {
366     struct hmap_node realdev_vid_node;
367     struct hmap_node vlandev_node;
368     ofp_port_t realdev_ofp_port;
369     ofp_port_t vlandev_ofp_port;
370     int vid;
371 };
372
373 static void vsp_remove(struct ofport_dpif *);
374 static void vsp_add(struct ofport_dpif *, ofp_port_t realdev_ofp_port, int vid);
375
376 static odp_port_t ofp_port_to_odp_port(const struct ofproto_dpif *,
377                                        ofp_port_t);
378
379 static ofp_port_t odp_port_to_ofp_port(const struct ofproto_dpif *,
380                                        odp_port_t);
381
382 static struct ofport_dpif *
383 ofport_dpif_cast(const struct ofport *ofport)
384 {
385     return ofport ? CONTAINER_OF(ofport, struct ofport_dpif, up) : NULL;
386 }
387
388 static void port_run(struct ofport_dpif *);
389 static int set_bfd(struct ofport *, const struct smap *);
390 static int set_cfm(struct ofport *, const struct cfm_settings *);
391 static void ofport_update_peer(struct ofport_dpif *);
392 static void run_fast_rl(void);
393 static int run_fast(struct ofproto *);
394
395 struct dpif_completion {
396     struct list list_node;
397     struct ofoperation *op;
398 };
399
400 /* Reasons that we might need to revalidate every facet, and corresponding
401  * coverage counters.
402  *
403  * A value of 0 means that there is no need to revalidate.
404  *
405  * It would be nice to have some cleaner way to integrate with coverage
406  * counters, but with only a few reasons I guess this is good enough for
407  * now. */
408 enum revalidate_reason {
409     REV_RECONFIGURE = 1,       /* Switch configuration changed. */
410     REV_STP,                   /* Spanning tree protocol port status change. */
411     REV_BOND,                  /* Bonding changed. */
412     REV_PORT_TOGGLED,          /* Port enabled or disabled by CFM, LACP, ...*/
413     REV_FLOW_TABLE,            /* Flow table changed. */
414     REV_MAC_LEARNING,          /* Mac learning changed. */
415     REV_INCONSISTENCY          /* Facet self-check failed. */
416 };
417 COVERAGE_DEFINE(rev_reconfigure);
418 COVERAGE_DEFINE(rev_stp);
419 COVERAGE_DEFINE(rev_bond);
420 COVERAGE_DEFINE(rev_port_toggled);
421 COVERAGE_DEFINE(rev_flow_table);
422 COVERAGE_DEFINE(rev_mac_learning);
423 COVERAGE_DEFINE(rev_inconsistency);
424
425 struct avg_subfacet_rates {
426     double add_rate;   /* Moving average of new flows created per minute. */
427     double del_rate;   /* Moving average of flows deleted per minute. */
428 };
429
430 /* All datapaths of a given type share a single dpif backer instance. */
431 struct dpif_backer {
432     char *type;
433     int refcount;
434     struct dpif *dpif;
435     struct udpif *udpif;
436     struct timer next_expiration;
437
438     struct ovs_rwlock odp_to_ofport_lock;
439     struct hmap odp_to_ofport_map OVS_GUARDED; /* ODP port to ofport map. */
440
441     struct simap tnl_backers;      /* Set of dpif ports backing tunnels. */
442
443     /* Facet revalidation flags applying to facets which use this backer. */
444     enum revalidate_reason need_revalidate; /* Revalidate every facet. */
445
446     struct hmap drop_keys; /* Set of dropped odp keys. */
447     bool recv_set_enable; /* Enables or disables receiving packets. */
448
449     struct hmap subfacets;
450     struct governor *governor;
451
452     /* Subfacet statistics.
453      *
454      * These keep track of the total number of subfacets added and deleted and
455      * flow life span.  They are useful for computing the flow rates stats
456      * exposed via "ovs-appctl dpif/show".  The goal is to learn about
457      * traffic patterns in ways that we can use later to improve Open vSwitch
458      * performance in new situations.  */
459     long long int created;           /* Time when it is created. */
460     unsigned max_n_subfacet;         /* Maximum number of flows */
461     unsigned avg_n_subfacet;         /* Average number of flows. */
462     long long int avg_subfacet_life; /* Average life span of subfacets. */
463
464     /* Number of upcall handling threads. */
465     unsigned int n_handler_threads;
466 };
467
468 /* All existing ofproto_backer instances, indexed by ofproto->up.type. */
469 static struct shash all_dpif_backers = SHASH_INITIALIZER(&all_dpif_backers);
470
471 static void drop_key_clear(struct dpif_backer *);
472
473 struct ofproto_dpif {
474     struct hmap_node all_ofproto_dpifs_node; /* In 'all_ofproto_dpifs'. */
475     struct ofproto up;
476     struct dpif_backer *backer;
477
478     /* Special OpenFlow rules. */
479     struct rule_dpif *miss_rule; /* Sends flow table misses to controller. */
480     struct rule_dpif *no_packet_in_rule; /* Drops flow table misses. */
481     struct rule_dpif *drop_frags_rule; /* Used in OFPC_FRAG_DROP mode. */
482
483     /* Bridging. */
484     struct netflow *netflow;
485     struct dpif_sflow *sflow;
486     struct dpif_ipfix *ipfix;
487     struct hmap bundles;        /* Contains "struct ofbundle"s. */
488     struct mac_learning *ml;
489     bool has_bonded_bundles;
490     struct mbridge *mbridge;
491
492     /* Facets. */
493     struct classifier facets;     /* Contains 'struct facet's. */
494     long long int consistency_rl;
495
496     struct ovs_mutex stats_mutex;
497     struct netdev_stats stats OVS_GUARDED; /* To account packets generated and
498                                             * consumed in userspace. */
499
500     /* Spanning tree. */
501     struct stp *stp;
502     long long int stp_last_tick;
503
504     /* VLAN splinters. */
505     struct ovs_mutex vsp_mutex;
506     struct hmap realdev_vid_map OVS_GUARDED; /* (realdev,vid) -> vlandev. */
507     struct hmap vlandev_map OVS_GUARDED;     /* vlandev -> (realdev,vid). */
508
509     /* Ports. */
510     struct sset ports;             /* Set of standard port names. */
511     struct sset ghost_ports;       /* Ports with no datapath port. */
512     struct sset port_poll_set;     /* Queued names for port_poll() reply. */
513     int port_poll_errno;           /* Last errno for port_poll() reply. */
514
515     /* Per ofproto's dpif stats. */
516     uint64_t n_hit;
517     uint64_t n_missed;
518
519     /* Work queues. */
520     struct guarded_list pins;      /* Contains "struct ofputil_packet_in"s. */
521 };
522
523 /* By default, flows in the datapath are wildcarded (megaflows).  They
524  * may be disabled with the "ovs-appctl dpif/disable-megaflows" command. */
525 static bool enable_megaflows = true;
526
527 /* All existing ofproto_dpif instances, indexed by ->up.name. */
528 static struct hmap all_ofproto_dpifs = HMAP_INITIALIZER(&all_ofproto_dpifs);
529
530 static void ofproto_dpif_unixctl_init(void);
531
532 static inline struct ofproto_dpif *
533 ofproto_dpif_cast(const struct ofproto *ofproto)
534 {
535     ovs_assert(ofproto->ofproto_class == &ofproto_dpif_class);
536     return CONTAINER_OF(ofproto, struct ofproto_dpif, up);
537 }
538
539 static struct ofport_dpif *get_ofp_port(const struct ofproto_dpif *ofproto,
540                                         ofp_port_t ofp_port);
541 static void ofproto_trace(struct ofproto_dpif *, const struct flow *,
542                           const struct ofpbuf *packet,
543                           const struct ofpact[], size_t ofpacts_len,
544                           struct ds *);
545
546 /* Upcalls. */
547 static void handle_upcalls(struct dpif_backer *);
548
549 /* Flow expiration. */
550 static int expire(struct dpif_backer *);
551
552 /* NetFlow. */
553 static void send_netflow_active_timeouts(struct ofproto_dpif *);
554
555 /* Global variables. */
556 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
557
558 /* Initial mappings of port to bridge mappings. */
559 static struct shash init_ofp_ports = SHASH_INITIALIZER(&init_ofp_ports);
560
561 /* Executes 'fm'.  The caller retains ownership of 'fm' and everything in
562  * it. */
563 void
564 ofproto_dpif_flow_mod(struct ofproto_dpif *ofproto,
565                       struct ofputil_flow_mod *fm)
566 {
567     ofproto_flow_mod(&ofproto->up, fm);
568 }
569
570 /* Appends 'pin' to the queue of "packet ins" to be sent to the controller.
571  * Takes ownership of 'pin' and pin->packet. */
572 void
573 ofproto_dpif_send_packet_in(struct ofproto_dpif *ofproto,
574                             struct ofproto_packet_in *pin)
575 {
576     if (!guarded_list_push_back(&ofproto->pins, &pin->list_node, 1024)) {
577         COVERAGE_INC(packet_in_overflow);
578         free(CONST_CAST(void *, pin->up.packet));
579         free(pin);
580     }
581 }
582 \f
583 /* Factory functions. */
584
585 static void
586 init(const struct shash *iface_hints)
587 {
588     struct shash_node *node;
589
590     /* Make a local copy, since we don't own 'iface_hints' elements. */
591     SHASH_FOR_EACH(node, iface_hints) {
592         const struct iface_hint *orig_hint = node->data;
593         struct iface_hint *new_hint = xmalloc(sizeof *new_hint);
594
595         new_hint->br_name = xstrdup(orig_hint->br_name);
596         new_hint->br_type = xstrdup(orig_hint->br_type);
597         new_hint->ofp_port = orig_hint->ofp_port;
598
599         shash_add(&init_ofp_ports, node->name, new_hint);
600     }
601 }
602
603 static void
604 enumerate_types(struct sset *types)
605 {
606     dp_enumerate_types(types);
607 }
608
609 static int
610 enumerate_names(const char *type, struct sset *names)
611 {
612     struct ofproto_dpif *ofproto;
613
614     sset_clear(names);
615     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
616         if (strcmp(type, ofproto->up.type)) {
617             continue;
618         }
619         sset_add(names, ofproto->up.name);
620     }
621
622     return 0;
623 }
624
625 static int
626 del(const char *type, const char *name)
627 {
628     struct dpif *dpif;
629     int error;
630
631     error = dpif_open(name, type, &dpif);
632     if (!error) {
633         error = dpif_delete(dpif);
634         dpif_close(dpif);
635     }
636     return error;
637 }
638 \f
639 static const char *
640 port_open_type(const char *datapath_type, const char *port_type)
641 {
642     return dpif_port_open_type(datapath_type, port_type);
643 }
644
645 /* Type functions. */
646
647 static void process_dpif_port_changes(struct dpif_backer *);
648 static void process_dpif_all_ports_changed(struct dpif_backer *);
649 static void process_dpif_port_change(struct dpif_backer *,
650                                      const char *devname);
651 static void process_dpif_port_error(struct dpif_backer *, int error);
652
653 static struct ofproto_dpif *
654 lookup_ofproto_dpif_by_port_name(const char *name)
655 {
656     struct ofproto_dpif *ofproto;
657
658     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
659         if (sset_contains(&ofproto->ports, name)) {
660             return ofproto;
661         }
662     }
663
664     return NULL;
665 }
666
667 static int
668 type_run(const char *type)
669 {
670     static long long int push_timer = LLONG_MIN;
671     struct dpif_backer *backer;
672
673     backer = shash_find_data(&all_dpif_backers, type);
674     if (!backer) {
675         /* This is not necessarily a problem, since backers are only
676          * created on demand. */
677         return 0;
678     }
679
680     dpif_run(backer->dpif);
681
682     /* The most natural place to push facet statistics is when they're pulled
683      * from the datapath.  However, when there are many flows in the datapath,
684      * this expensive operation can occur so frequently, that it reduces our
685      * ability to quickly set up flows.  To reduce the cost, we push statistics
686      * here instead. */
687     if (time_msec() > push_timer) {
688         push_timer = time_msec() + 2000;
689         push_all_stats();
690     }
691
692     /* If vswitchd started with other_config:flow_restore_wait set as "true",
693      * and the configuration has now changed to "false", enable receiving
694      * packets from the datapath. */
695     if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
696         int error;
697
698         backer->recv_set_enable = true;
699
700         error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
701         if (error) {
702             udpif_recv_set(backer->udpif, 0, false);
703             VLOG_ERR("Failed to enable receiving packets in dpif.");
704             return error;
705         }
706         udpif_recv_set(backer->udpif, n_handler_threads,
707                        backer->recv_set_enable);
708         dpif_flow_flush(backer->dpif);
709         backer->need_revalidate = REV_RECONFIGURE;
710     }
711
712     /* If the n_handler_threads is reconfigured, call udpif_recv_set()
713      * to reset the handler threads. */
714     if (backer->n_handler_threads != n_handler_threads) {
715         udpif_recv_set(backer->udpif, n_handler_threads,
716                        backer->recv_set_enable);
717         backer->n_handler_threads = n_handler_threads;
718     }
719
720     if (backer->need_revalidate) {
721         struct ofproto_dpif *ofproto;
722         struct simap_node *node;
723         struct simap tmp_backers;
724
725         /* Handle tunnel garbage collection. */
726         simap_init(&tmp_backers);
727         simap_swap(&backer->tnl_backers, &tmp_backers);
728
729         HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
730             struct ofport_dpif *iter;
731
732             if (backer != ofproto->backer) {
733                 continue;
734             }
735
736             HMAP_FOR_EACH (iter, up.hmap_node, &ofproto->up.ports) {
737                 char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
738                 const char *dp_port;
739
740                 if (!iter->is_tunnel) {
741                     continue;
742                 }
743
744                 dp_port = netdev_vport_get_dpif_port(iter->up.netdev,
745                                                      namebuf, sizeof namebuf);
746                 node = simap_find(&tmp_backers, dp_port);
747                 if (node) {
748                     simap_put(&backer->tnl_backers, dp_port, node->data);
749                     simap_delete(&tmp_backers, node);
750                     node = simap_find(&backer->tnl_backers, dp_port);
751                 } else {
752                     node = simap_find(&backer->tnl_backers, dp_port);
753                     if (!node) {
754                         odp_port_t odp_port = ODPP_NONE;
755
756                         if (!dpif_port_add(backer->dpif, iter->up.netdev,
757                                            &odp_port)) {
758                             simap_put(&backer->tnl_backers, dp_port,
759                                       odp_to_u32(odp_port));
760                             node = simap_find(&backer->tnl_backers, dp_port);
761                         }
762                     }
763                 }
764
765                 iter->odp_port = node ? u32_to_odp(node->data) : ODPP_NONE;
766                 if (tnl_port_reconfigure(iter, iter->up.netdev,
767                                          iter->odp_port)) {
768                     backer->need_revalidate = REV_RECONFIGURE;
769                 }
770             }
771         }
772
773         SIMAP_FOR_EACH (node, &tmp_backers) {
774             dpif_port_del(backer->dpif, u32_to_odp(node->data));
775         }
776         simap_destroy(&tmp_backers);
777
778         switch (backer->need_revalidate) {
779         case REV_RECONFIGURE:   COVERAGE_INC(rev_reconfigure);   break;
780         case REV_STP:           COVERAGE_INC(rev_stp);           break;
781         case REV_BOND:          COVERAGE_INC(rev_bond);          break;
782         case REV_PORT_TOGGLED:  COVERAGE_INC(rev_port_toggled);  break;
783         case REV_FLOW_TABLE:    COVERAGE_INC(rev_flow_table);    break;
784         case REV_MAC_LEARNING:  COVERAGE_INC(rev_mac_learning);  break;
785         case REV_INCONSISTENCY: COVERAGE_INC(rev_inconsistency); break;
786         }
787         backer->need_revalidate = 0;
788
789         /* Clear the drop_keys in case we should now be accepting some
790          * formerly dropped flows. */
791         drop_key_clear(backer);
792
793         HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
794             struct facet *facet, *next;
795             struct ofport_dpif *ofport;
796             struct cls_cursor cursor;
797             struct ofbundle *bundle;
798
799             if (ofproto->backer != backer) {
800                 continue;
801             }
802
803             ovs_rwlock_wrlock(&xlate_rwlock);
804             xlate_ofproto_set(ofproto, ofproto->up.name,
805                               ofproto->backer->dpif, ofproto->miss_rule,
806                               ofproto->no_packet_in_rule, ofproto->ml,
807                               ofproto->stp, ofproto->mbridge,
808                               ofproto->sflow, ofproto->ipfix,
809                               ofproto->up.frag_handling,
810                               ofproto->up.forward_bpdu,
811                               connmgr_has_in_band(ofproto->up.connmgr),
812                               ofproto->netflow != NULL);
813
814             HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
815                 xlate_bundle_set(ofproto, bundle, bundle->name,
816                                  bundle->vlan_mode, bundle->vlan,
817                                  bundle->trunks, bundle->use_priority_tags,
818                                  bundle->bond, bundle->lacp,
819                                  bundle->floodable);
820             }
821
822             HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
823                 int stp_port = ofport->stp_port
824                     ? stp_port_no(ofport->stp_port)
825                     : -1;
826                 xlate_ofport_set(ofproto, ofport->bundle, ofport,
827                                  ofport->up.ofp_port, ofport->odp_port,
828                                  ofport->up.netdev, ofport->cfm,
829                                  ofport->bfd, ofport->peer, stp_port,
830                                  ofport->qdscp, ofport->n_qdscp,
831                                  ofport->up.pp.config, ofport->up.pp.state,
832                                  ofport->is_tunnel, ofport->may_enable);
833             }
834             ovs_rwlock_unlock(&xlate_rwlock);
835
836             /* Only ofproto-dpif cares about the facet classifier so we just
837              * lock cls_cursor_init() to appease the thread safety analysis. */
838             ovs_rwlock_rdlock(&ofproto->facets.rwlock);
839             cls_cursor_init(&cursor, &ofproto->facets, NULL);
840             ovs_rwlock_unlock(&ofproto->facets.rwlock);
841             CLS_CURSOR_FOR_EACH_SAFE (facet, next, cr, &cursor) {
842                 facet_revalidate(facet);
843                 run_fast_rl();
844             }
845         }
846
847         udpif_revalidate(backer->udpif);
848     }
849
850     if (!backer->recv_set_enable) {
851         /* Wake up before a max of 1000ms. */
852         timer_set_duration(&backer->next_expiration, 1000);
853     } else if (timer_expired(&backer->next_expiration)) {
854         int delay = expire(backer);
855         timer_set_duration(&backer->next_expiration, delay);
856     }
857
858     process_dpif_port_changes(backer);
859
860     if (backer->governor) {
861         size_t n_subfacets;
862
863         governor_run(backer->governor);
864
865         /* If the governor has shrunk to its minimum size and the number of
866          * subfacets has dwindled, then drop the governor entirely.
867          *
868          * For hysteresis, the number of subfacets to drop the governor is
869          * smaller than the number needed to trigger its creation. */
870         n_subfacets = hmap_count(&backer->subfacets);
871         if (n_subfacets * 4 < flow_eviction_threshold
872             && governor_is_idle(backer->governor)) {
873             governor_destroy(backer->governor);
874             backer->governor = NULL;
875         }
876     }
877
878     return 0;
879 }
880
881 /* Check for and handle port changes in 'backer''s dpif. */
882 static void
883 process_dpif_port_changes(struct dpif_backer *backer)
884 {
885     for (;;) {
886         char *devname;
887         int error;
888
889         error = dpif_port_poll(backer->dpif, &devname);
890         switch (error) {
891         case EAGAIN:
892             return;
893
894         case ENOBUFS:
895             process_dpif_all_ports_changed(backer);
896             break;
897
898         case 0:
899             process_dpif_port_change(backer, devname);
900             free(devname);
901             break;
902
903         default:
904             process_dpif_port_error(backer, error);
905             break;
906         }
907     }
908 }
909
910 static void
911 process_dpif_all_ports_changed(struct dpif_backer *backer)
912 {
913     struct ofproto_dpif *ofproto;
914     struct dpif_port dpif_port;
915     struct dpif_port_dump dump;
916     struct sset devnames;
917     const char *devname;
918
919     sset_init(&devnames);
920     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
921         if (ofproto->backer == backer) {
922             struct ofport *ofport;
923
924             HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
925                 sset_add(&devnames, netdev_get_name(ofport->netdev));
926             }
927         }
928     }
929     DPIF_PORT_FOR_EACH (&dpif_port, &dump, backer->dpif) {
930         sset_add(&devnames, dpif_port.name);
931     }
932
933     SSET_FOR_EACH (devname, &devnames) {
934         process_dpif_port_change(backer, devname);
935     }
936     sset_destroy(&devnames);
937 }
938
939 static void
940 process_dpif_port_change(struct dpif_backer *backer, const char *devname)
941 {
942     struct ofproto_dpif *ofproto;
943     struct dpif_port port;
944
945     /* Don't report on the datapath's device. */
946     if (!strcmp(devname, dpif_base_name(backer->dpif))) {
947         return;
948     }
949
950     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node,
951                    &all_ofproto_dpifs) {
952         if (simap_contains(&ofproto->backer->tnl_backers, devname)) {
953             return;
954         }
955     }
956
957     ofproto = lookup_ofproto_dpif_by_port_name(devname);
958     if (dpif_port_query_by_name(backer->dpif, devname, &port)) {
959         /* The port was removed.  If we know the datapath,
960          * report it through poll_set().  If we don't, it may be
961          * notifying us of a removal we initiated, so ignore it.
962          * If there's a pending ENOBUFS, let it stand, since
963          * everything will be reevaluated. */
964         if (ofproto && ofproto->port_poll_errno != ENOBUFS) {
965             sset_add(&ofproto->port_poll_set, devname);
966             ofproto->port_poll_errno = 0;
967         }
968     } else if (!ofproto) {
969         /* The port was added, but we don't know with which
970          * ofproto we should associate it.  Delete it. */
971         dpif_port_del(backer->dpif, port.port_no);
972     } else {
973         struct ofport_dpif *ofport;
974
975         ofport = ofport_dpif_cast(shash_find_data(
976                                       &ofproto->up.port_by_name, devname));
977         if (ofport
978             && ofport->odp_port != port.port_no
979             && !odp_port_to_ofport(backer, port.port_no))
980         {
981             /* 'ofport''s datapath port number has changed from
982              * 'ofport->odp_port' to 'port.port_no'.  Update our internal data
983              * structures to match. */
984             ovs_rwlock_wrlock(&backer->odp_to_ofport_lock);
985             hmap_remove(&backer->odp_to_ofport_map, &ofport->odp_port_node);
986             ofport->odp_port = port.port_no;
987             hmap_insert(&backer->odp_to_ofport_map, &ofport->odp_port_node,
988                         hash_odp_port(port.port_no));
989             ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
990             backer->need_revalidate = REV_RECONFIGURE;
991         }
992     }
993     dpif_port_destroy(&port);
994 }
995
996 /* Propagate 'error' to all ofprotos based on 'backer'. */
997 static void
998 process_dpif_port_error(struct dpif_backer *backer, int error)
999 {
1000     struct ofproto_dpif *ofproto;
1001
1002     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
1003         if (ofproto->backer == backer) {
1004             sset_clear(&ofproto->port_poll_set);
1005             ofproto->port_poll_errno = error;
1006         }
1007     }
1008 }
1009
1010 static int
1011 dpif_backer_run_fast(struct dpif_backer *backer)
1012 {
1013     handle_upcalls(backer);
1014
1015     return 0;
1016 }
1017
1018 static int
1019 type_run_fast(const char *type)
1020 {
1021     struct dpif_backer *backer;
1022
1023     backer = shash_find_data(&all_dpif_backers, type);
1024     if (!backer) {
1025         /* This is not necessarily a problem, since backers are only
1026          * created on demand. */
1027         return 0;
1028     }
1029
1030     return dpif_backer_run_fast(backer);
1031 }
1032
1033 static void
1034 run_fast_rl(void)
1035 {
1036     static long long int port_rl = LLONG_MIN;
1037
1038     if (time_msec() >= port_rl) {
1039         struct ofproto_dpif *ofproto;
1040
1041         HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
1042             run_fast(&ofproto->up);
1043         }
1044         port_rl = time_msec() + 200;
1045     }
1046 }
1047
1048 static void
1049 type_wait(const char *type)
1050 {
1051     struct dpif_backer *backer;
1052
1053     backer = shash_find_data(&all_dpif_backers, type);
1054     if (!backer) {
1055         /* This is not necessarily a problem, since backers are only
1056          * created on demand. */
1057         return;
1058     }
1059
1060     if (backer->governor) {
1061         governor_wait(backer->governor);
1062     }
1063
1064     timer_wait(&backer->next_expiration);
1065     dpif_wait(backer->dpif);
1066     udpif_wait(backer->udpif);
1067 }
1068 \f
1069 /* Basic life-cycle. */
1070
1071 static int add_internal_flows(struct ofproto_dpif *);
1072
1073 static struct ofproto *
1074 alloc(void)
1075 {
1076     struct ofproto_dpif *ofproto = xmalloc(sizeof *ofproto);
1077     return &ofproto->up;
1078 }
1079
1080 static void
1081 dealloc(struct ofproto *ofproto_)
1082 {
1083     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1084     free(ofproto);
1085 }
1086
1087 static void
1088 close_dpif_backer(struct dpif_backer *backer)
1089 {
1090     ovs_assert(backer->refcount > 0);
1091
1092     if (--backer->refcount) {
1093         return;
1094     }
1095
1096     drop_key_clear(backer);
1097     hmap_destroy(&backer->drop_keys);
1098
1099     udpif_destroy(backer->udpif);
1100
1101     simap_destroy(&backer->tnl_backers);
1102     ovs_rwlock_destroy(&backer->odp_to_ofport_lock);
1103     hmap_destroy(&backer->odp_to_ofport_map);
1104     shash_find_and_delete(&all_dpif_backers, backer->type);
1105     free(backer->type);
1106     dpif_close(backer->dpif);
1107
1108     ovs_assert(hmap_is_empty(&backer->subfacets));
1109     hmap_destroy(&backer->subfacets);
1110     governor_destroy(backer->governor);
1111
1112     free(backer);
1113 }
1114
1115 /* Datapath port slated for removal from datapath. */
1116 struct odp_garbage {
1117     struct list list_node;
1118     odp_port_t odp_port;
1119 };
1120
1121 static int
1122 open_dpif_backer(const char *type, struct dpif_backer **backerp)
1123 {
1124     struct dpif_backer *backer;
1125     struct dpif_port_dump port_dump;
1126     struct dpif_port port;
1127     struct shash_node *node;
1128     struct list garbage_list;
1129     struct odp_garbage *garbage, *next;
1130     struct sset names;
1131     char *backer_name;
1132     const char *name;
1133     int error;
1134
1135     backer = shash_find_data(&all_dpif_backers, type);
1136     if (backer) {
1137         backer->refcount++;
1138         *backerp = backer;
1139         return 0;
1140     }
1141
1142     backer_name = xasprintf("ovs-%s", type);
1143
1144     /* Remove any existing datapaths, since we assume we're the only
1145      * userspace controlling the datapath. */
1146     sset_init(&names);
1147     dp_enumerate_names(type, &names);
1148     SSET_FOR_EACH(name, &names) {
1149         struct dpif *old_dpif;
1150
1151         /* Don't remove our backer if it exists. */
1152         if (!strcmp(name, backer_name)) {
1153             continue;
1154         }
1155
1156         if (dpif_open(name, type, &old_dpif)) {
1157             VLOG_WARN("couldn't open old datapath %s to remove it", name);
1158         } else {
1159             dpif_delete(old_dpif);
1160             dpif_close(old_dpif);
1161         }
1162     }
1163     sset_destroy(&names);
1164
1165     backer = xmalloc(sizeof *backer);
1166
1167     error = dpif_create_and_open(backer_name, type, &backer->dpif);
1168     free(backer_name);
1169     if (error) {
1170         VLOG_ERR("failed to open datapath of type %s: %s", type,
1171                  ovs_strerror(error));
1172         free(backer);
1173         return error;
1174     }
1175     backer->udpif = udpif_create(backer, backer->dpif);
1176
1177     backer->type = xstrdup(type);
1178     backer->governor = NULL;
1179     backer->refcount = 1;
1180     hmap_init(&backer->odp_to_ofport_map);
1181     ovs_rwlock_init(&backer->odp_to_ofport_lock);
1182     hmap_init(&backer->drop_keys);
1183     hmap_init(&backer->subfacets);
1184     timer_set_duration(&backer->next_expiration, 1000);
1185     backer->need_revalidate = 0;
1186     simap_init(&backer->tnl_backers);
1187     backer->recv_set_enable = !ofproto_get_flow_restore_wait();
1188     *backerp = backer;
1189
1190     if (backer->recv_set_enable) {
1191         dpif_flow_flush(backer->dpif);
1192     }
1193
1194     /* Loop through the ports already on the datapath and remove any
1195      * that we don't need anymore. */
1196     list_init(&garbage_list);
1197     dpif_port_dump_start(&port_dump, backer->dpif);
1198     while (dpif_port_dump_next(&port_dump, &port)) {
1199         node = shash_find(&init_ofp_ports, port.name);
1200         if (!node && strcmp(port.name, dpif_base_name(backer->dpif))) {
1201             garbage = xmalloc(sizeof *garbage);
1202             garbage->odp_port = port.port_no;
1203             list_push_front(&garbage_list, &garbage->list_node);
1204         }
1205     }
1206     dpif_port_dump_done(&port_dump);
1207
1208     LIST_FOR_EACH_SAFE (garbage, next, list_node, &garbage_list) {
1209         dpif_port_del(backer->dpif, garbage->odp_port);
1210         list_remove(&garbage->list_node);
1211         free(garbage);
1212     }
1213
1214     shash_add(&all_dpif_backers, type, backer);
1215
1216     error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
1217     if (error) {
1218         VLOG_ERR("failed to listen on datapath of type %s: %s",
1219                  type, ovs_strerror(error));
1220         close_dpif_backer(backer);
1221         return error;
1222     }
1223     udpif_recv_set(backer->udpif, n_handler_threads,
1224                    backer->recv_set_enable);
1225     backer->n_handler_threads = n_handler_threads;
1226
1227     backer->max_n_subfacet = 0;
1228     backer->created = time_msec();
1229     backer->avg_n_subfacet = 0;
1230     backer->avg_subfacet_life = 0;
1231
1232     return error;
1233 }
1234
1235 static int
1236 construct(struct ofproto *ofproto_)
1237 {
1238     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1239     struct shash_node *node, *next;
1240     int error;
1241
1242     error = open_dpif_backer(ofproto->up.type, &ofproto->backer);
1243     if (error) {
1244         return error;
1245     }
1246
1247     ofproto->netflow = NULL;
1248     ofproto->sflow = NULL;
1249     ofproto->ipfix = NULL;
1250     ofproto->stp = NULL;
1251     hmap_init(&ofproto->bundles);
1252     ofproto->ml = mac_learning_create(MAC_ENTRY_DEFAULT_IDLE_TIME);
1253     ofproto->mbridge = mbridge_create();
1254     ofproto->has_bonded_bundles = false;
1255     ovs_mutex_init(&ofproto->stats_mutex);
1256     ovs_mutex_init(&ofproto->vsp_mutex);
1257
1258     classifier_init(&ofproto->facets, NULL);
1259     ofproto->consistency_rl = LLONG_MIN;
1260
1261     guarded_list_init(&ofproto->pins);
1262
1263     ofproto_dpif_unixctl_init();
1264
1265     hmap_init(&ofproto->vlandev_map);
1266     hmap_init(&ofproto->realdev_vid_map);
1267
1268     sset_init(&ofproto->ports);
1269     sset_init(&ofproto->ghost_ports);
1270     sset_init(&ofproto->port_poll_set);
1271     ofproto->port_poll_errno = 0;
1272
1273     SHASH_FOR_EACH_SAFE (node, next, &init_ofp_ports) {
1274         struct iface_hint *iface_hint = node->data;
1275
1276         if (!strcmp(iface_hint->br_name, ofproto->up.name)) {
1277             /* Check if the datapath already has this port. */
1278             if (dpif_port_exists(ofproto->backer->dpif, node->name)) {
1279                 sset_add(&ofproto->ports, node->name);
1280             }
1281
1282             free(iface_hint->br_name);
1283             free(iface_hint->br_type);
1284             free(iface_hint);
1285             shash_delete(&init_ofp_ports, node);
1286         }
1287     }
1288
1289     hmap_insert(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node,
1290                 hash_string(ofproto->up.name, 0));
1291     memset(&ofproto->stats, 0, sizeof ofproto->stats);
1292
1293     ofproto_init_tables(ofproto_, N_TABLES);
1294     error = add_internal_flows(ofproto);
1295     ofproto->up.tables[TBL_INTERNAL].flags = OFTABLE_HIDDEN | OFTABLE_READONLY;
1296
1297     ofproto->n_hit = 0;
1298     ofproto->n_missed = 0;
1299
1300     return error;
1301 }
1302
1303 static int
1304 add_internal_flow(struct ofproto_dpif *ofproto, int id,
1305                   const struct ofpbuf *ofpacts, struct rule_dpif **rulep)
1306 {
1307     struct ofputil_flow_mod fm;
1308     int error;
1309
1310     match_init_catchall(&fm.match);
1311     fm.priority = 0;
1312     match_set_reg(&fm.match, 0, id);
1313     fm.new_cookie = htonll(0);
1314     fm.cookie = htonll(0);
1315     fm.cookie_mask = htonll(0);
1316     fm.modify_cookie = false;
1317     fm.table_id = TBL_INTERNAL;
1318     fm.command = OFPFC_ADD;
1319     fm.idle_timeout = 0;
1320     fm.hard_timeout = 0;
1321     fm.buffer_id = 0;
1322     fm.out_port = 0;
1323     fm.flags = 0;
1324     fm.ofpacts = ofpacts->data;
1325     fm.ofpacts_len = ofpacts->size;
1326
1327     error = ofproto_flow_mod(&ofproto->up, &fm);
1328     if (error) {
1329         VLOG_ERR_RL(&rl, "failed to add internal flow %d (%s)",
1330                     id, ofperr_to_string(error));
1331         return error;
1332     }
1333
1334     if (rule_dpif_lookup_in_table(ofproto, &fm.match.flow, NULL, TBL_INTERNAL,
1335                                   rulep)) {
1336         rule_dpif_unref(*rulep);
1337     } else {
1338         NOT_REACHED();
1339     }
1340
1341     return 0;
1342 }
1343
1344 static int
1345 add_internal_flows(struct ofproto_dpif *ofproto)
1346 {
1347     struct ofpact_controller *controller;
1348     uint64_t ofpacts_stub[128 / 8];
1349     struct ofpbuf ofpacts;
1350     int error;
1351     int id;
1352
1353     ofpbuf_use_stack(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
1354     id = 1;
1355
1356     controller = ofpact_put_CONTROLLER(&ofpacts);
1357     controller->max_len = UINT16_MAX;
1358     controller->controller_id = 0;
1359     controller->reason = OFPR_NO_MATCH;
1360     ofpact_pad(&ofpacts);
1361
1362     error = add_internal_flow(ofproto, id++, &ofpacts, &ofproto->miss_rule);
1363     if (error) {
1364         return error;
1365     }
1366
1367     ofpbuf_clear(&ofpacts);
1368     error = add_internal_flow(ofproto, id++, &ofpacts,
1369                               &ofproto->no_packet_in_rule);
1370     if (error) {
1371         return error;
1372     }
1373
1374     error = add_internal_flow(ofproto, id++, &ofpacts,
1375                               &ofproto->drop_frags_rule);
1376     return error;
1377 }
1378
1379 static void
1380 destruct(struct ofproto *ofproto_)
1381 {
1382     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1383     struct rule_dpif *rule, *next_rule;
1384     struct ofproto_packet_in *pin, *next_pin;
1385     struct facet *facet, *next_facet;
1386     struct cls_cursor cursor;
1387     struct oftable *table;
1388     struct list pins;
1389
1390     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
1391     cls_cursor_init(&cursor, &ofproto->facets, NULL);
1392     ovs_rwlock_unlock(&ofproto->facets.rwlock);
1393     CLS_CURSOR_FOR_EACH_SAFE (facet, next_facet, cr, &cursor) {
1394         facet_remove(facet);
1395     }
1396
1397     ofproto->backer->need_revalidate = REV_RECONFIGURE;
1398     ovs_rwlock_wrlock(&xlate_rwlock);
1399     xlate_remove_ofproto(ofproto);
1400     ovs_rwlock_unlock(&xlate_rwlock);
1401
1402     /* Discard any flow_miss_batches queued up for 'ofproto', avoiding a
1403      * use-after-free error. */
1404     udpif_revalidate(ofproto->backer->udpif);
1405
1406     hmap_remove(&all_ofproto_dpifs, &ofproto->all_ofproto_dpifs_node);
1407
1408     OFPROTO_FOR_EACH_TABLE (table, &ofproto->up) {
1409         struct cls_cursor cursor;
1410
1411         ovs_rwlock_rdlock(&table->cls.rwlock);
1412         cls_cursor_init(&cursor, &table->cls, NULL);
1413         ovs_rwlock_unlock(&table->cls.rwlock);
1414         CLS_CURSOR_FOR_EACH_SAFE (rule, next_rule, up.cr, &cursor) {
1415             ofproto_rule_delete(&ofproto->up, &rule->up);
1416         }
1417     }
1418
1419     guarded_list_pop_all(&ofproto->pins, &pins);
1420     LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
1421         list_remove(&pin->list_node);
1422         free(CONST_CAST(void *, pin->up.packet));
1423         free(pin);
1424     }
1425     guarded_list_destroy(&ofproto->pins);
1426
1427     mbridge_unref(ofproto->mbridge);
1428
1429     netflow_destroy(ofproto->netflow);
1430     dpif_sflow_unref(ofproto->sflow);
1431     hmap_destroy(&ofproto->bundles);
1432     mac_learning_unref(ofproto->ml);
1433
1434     classifier_destroy(&ofproto->facets);
1435
1436     hmap_destroy(&ofproto->vlandev_map);
1437     hmap_destroy(&ofproto->realdev_vid_map);
1438
1439     sset_destroy(&ofproto->ports);
1440     sset_destroy(&ofproto->ghost_ports);
1441     sset_destroy(&ofproto->port_poll_set);
1442
1443     ovs_mutex_destroy(&ofproto->stats_mutex);
1444     ovs_mutex_destroy(&ofproto->vsp_mutex);
1445
1446     close_dpif_backer(ofproto->backer);
1447 }
1448
1449 static int
1450 run_fast(struct ofproto *ofproto_)
1451 {
1452     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1453     struct ofproto_packet_in *pin, *next_pin;
1454     struct list pins;
1455
1456     /* Do not perform any periodic activity required by 'ofproto' while
1457      * waiting for flow restore to complete. */
1458     if (ofproto_get_flow_restore_wait()) {
1459         return 0;
1460     }
1461
1462     guarded_list_pop_all(&ofproto->pins, &pins);
1463     LIST_FOR_EACH_SAFE (pin, next_pin, list_node, &pins) {
1464         connmgr_send_packet_in(ofproto->up.connmgr, pin);
1465         list_remove(&pin->list_node);
1466         free(CONST_CAST(void *, pin->up.packet));
1467         free(pin);
1468     }
1469
1470     return 0;
1471 }
1472
1473 static int
1474 run(struct ofproto *ofproto_)
1475 {
1476     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1477     struct ofport_dpif *ofport;
1478     struct ofbundle *bundle;
1479     int error;
1480
1481     if (mbridge_need_revalidate(ofproto->mbridge)) {
1482         ofproto->backer->need_revalidate = REV_RECONFIGURE;
1483         ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1484         mac_learning_flush(ofproto->ml);
1485         ovs_rwlock_unlock(&ofproto->ml->rwlock);
1486     }
1487
1488     /* Do not perform any periodic activity below required by 'ofproto' while
1489      * waiting for flow restore to complete. */
1490     if (ofproto_get_flow_restore_wait()) {
1491         return 0;
1492     }
1493
1494     error = run_fast(ofproto_);
1495     if (error) {
1496         return error;
1497     }
1498
1499     if (ofproto->netflow) {
1500         if (netflow_run(ofproto->netflow)) {
1501             send_netflow_active_timeouts(ofproto);
1502         }
1503     }
1504     if (ofproto->sflow) {
1505         dpif_sflow_run(ofproto->sflow);
1506     }
1507     if (ofproto->ipfix) {
1508         dpif_ipfix_run(ofproto->ipfix);
1509     }
1510
1511     HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1512         port_run(ofport);
1513     }
1514     HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1515         bundle_run(bundle);
1516     }
1517
1518     stp_run(ofproto);
1519     ovs_rwlock_wrlock(&ofproto->ml->rwlock);
1520     if (mac_learning_run(ofproto->ml)) {
1521         ofproto->backer->need_revalidate = REV_MAC_LEARNING;
1522     }
1523     ovs_rwlock_unlock(&ofproto->ml->rwlock);
1524
1525     /* Check the consistency of a random facet, to aid debugging. */
1526     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
1527     if (time_msec() >= ofproto->consistency_rl
1528         && !classifier_is_empty(&ofproto->facets)
1529         && !ofproto->backer->need_revalidate) {
1530         struct cls_subtable *table;
1531         struct cls_rule *cr;
1532         struct facet *facet;
1533
1534         ofproto->consistency_rl = time_msec() + 250;
1535
1536         table = CONTAINER_OF(hmap_random_node(&ofproto->facets.subtables),
1537                              struct cls_subtable, hmap_node);
1538         cr = CONTAINER_OF(hmap_random_node(&table->rules), struct cls_rule,
1539                           hmap_node);
1540         facet = CONTAINER_OF(cr, struct facet, cr);
1541
1542         if (!facet_check_consistency(facet)) {
1543             ofproto->backer->need_revalidate = REV_INCONSISTENCY;
1544         }
1545     }
1546     ovs_rwlock_unlock(&ofproto->facets.rwlock);
1547
1548     return 0;
1549 }
1550
1551 static void
1552 wait(struct ofproto *ofproto_)
1553 {
1554     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1555     struct ofbundle *bundle;
1556
1557     if (ofproto_get_flow_restore_wait()) {
1558         return;
1559     }
1560
1561     if (ofproto->sflow) {
1562         dpif_sflow_wait(ofproto->sflow);
1563     }
1564     if (ofproto->ipfix) {
1565         dpif_ipfix_wait(ofproto->ipfix);
1566     }
1567     HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
1568         bundle_wait(bundle);
1569     }
1570     if (ofproto->netflow) {
1571         netflow_wait(ofproto->netflow);
1572     }
1573     ovs_rwlock_rdlock(&ofproto->ml->rwlock);
1574     mac_learning_wait(ofproto->ml);
1575     ovs_rwlock_unlock(&ofproto->ml->rwlock);
1576     stp_wait(ofproto);
1577     if (ofproto->backer->need_revalidate) {
1578         /* Shouldn't happen, but if it does just go around again. */
1579         VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
1580         poll_immediate_wake();
1581     }
1582 }
1583
1584 static void
1585 get_memory_usage(const struct ofproto *ofproto_, struct simap *usage)
1586 {
1587     const struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1588     struct cls_cursor cursor;
1589     size_t n_subfacets = 0;
1590     struct facet *facet;
1591
1592     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
1593     simap_increase(usage, "facets", classifier_count(&ofproto->facets));
1594     ovs_rwlock_unlock(&ofproto->facets.rwlock);
1595
1596     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
1597     cls_cursor_init(&cursor, &ofproto->facets, NULL);
1598     CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
1599         n_subfacets += list_size(&facet->subfacets);
1600     }
1601     ovs_rwlock_unlock(&ofproto->facets.rwlock);
1602     simap_increase(usage, "subfacets", n_subfacets);
1603 }
1604
1605 static void
1606 flush(struct ofproto *ofproto_)
1607 {
1608     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1609     struct subfacet *subfacet, *next_subfacet;
1610     struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
1611     int n_batch;
1612
1613     n_batch = 0;
1614     HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
1615                         &ofproto->backer->subfacets) {
1616         if (subfacet->facet->ofproto != ofproto) {
1617             continue;
1618         }
1619
1620         if (subfacet->path != SF_NOT_INSTALLED) {
1621             batch[n_batch++] = subfacet;
1622             if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
1623                 subfacet_destroy_batch(ofproto->backer, batch, n_batch);
1624                 n_batch = 0;
1625             }
1626         } else {
1627             subfacet_destroy(subfacet);
1628         }
1629     }
1630
1631     if (n_batch > 0) {
1632         subfacet_destroy_batch(ofproto->backer, batch, n_batch);
1633     }
1634 }
1635
1636 static void
1637 get_features(struct ofproto *ofproto_ OVS_UNUSED,
1638              bool *arp_match_ip, enum ofputil_action_bitmap *actions)
1639 {
1640     *arp_match_ip = true;
1641     *actions = (OFPUTIL_A_OUTPUT |
1642                 OFPUTIL_A_SET_VLAN_VID |
1643                 OFPUTIL_A_SET_VLAN_PCP |
1644                 OFPUTIL_A_STRIP_VLAN |
1645                 OFPUTIL_A_SET_DL_SRC |
1646                 OFPUTIL_A_SET_DL_DST |
1647                 OFPUTIL_A_SET_NW_SRC |
1648                 OFPUTIL_A_SET_NW_DST |
1649                 OFPUTIL_A_SET_NW_TOS |
1650                 OFPUTIL_A_SET_TP_SRC |
1651                 OFPUTIL_A_SET_TP_DST |
1652                 OFPUTIL_A_ENQUEUE);
1653 }
1654
1655 static void
1656 get_tables(struct ofproto *ofproto_, struct ofp12_table_stats *ots)
1657 {
1658     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1659     struct dpif_dp_stats s;
1660     uint64_t n_miss, n_no_pkt_in, n_bytes, n_dropped_frags;
1661     uint64_t n_lookup;
1662
1663     strcpy(ots->name, "classifier");
1664
1665     dpif_get_dp_stats(ofproto->backer->dpif, &s);
1666     rule_get_stats(&ofproto->miss_rule->up, &n_miss, &n_bytes);
1667     rule_get_stats(&ofproto->no_packet_in_rule->up, &n_no_pkt_in, &n_bytes);
1668     rule_get_stats(&ofproto->drop_frags_rule->up, &n_dropped_frags, &n_bytes);
1669
1670     n_lookup = s.n_hit + s.n_missed - n_dropped_frags;
1671     ots->lookup_count = htonll(n_lookup);
1672     ots->matched_count = htonll(n_lookup - n_miss - n_no_pkt_in);
1673 }
1674
1675 static struct ofport *
1676 port_alloc(void)
1677 {
1678     struct ofport_dpif *port = xmalloc(sizeof *port);
1679     return &port->up;
1680 }
1681
1682 static void
1683 port_dealloc(struct ofport *port_)
1684 {
1685     struct ofport_dpif *port = ofport_dpif_cast(port_);
1686     free(port);
1687 }
1688
1689 static int
1690 port_construct(struct ofport *port_)
1691 {
1692     struct ofport_dpif *port = ofport_dpif_cast(port_);
1693     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1694     const struct netdev *netdev = port->up.netdev;
1695     char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1696     struct dpif_port dpif_port;
1697     int error;
1698
1699     ofproto->backer->need_revalidate = REV_RECONFIGURE;
1700     port->bundle = NULL;
1701     port->cfm = NULL;
1702     port->bfd = NULL;
1703     port->may_enable = true;
1704     port->stp_port = NULL;
1705     port->stp_state = STP_DISABLED;
1706     port->is_tunnel = false;
1707     port->peer = NULL;
1708     port->qdscp = NULL;
1709     port->n_qdscp = 0;
1710     port->realdev_ofp_port = 0;
1711     port->vlandev_vid = 0;
1712     port->carrier_seq = netdev_get_carrier_resets(netdev);
1713     port->is_layer3 = netdev_vport_is_layer3(netdev);
1714
1715     if (netdev_vport_is_patch(netdev)) {
1716         /* By bailing out here, we don't submit the port to the sFlow module
1717          * to be considered for counter polling export.  This is correct
1718          * because the patch port represents an interface that sFlow considers
1719          * to be "internal" to the switch as a whole, and therefore not an
1720          * candidate for counter polling. */
1721         port->odp_port = ODPP_NONE;
1722         ofport_update_peer(port);
1723         return 0;
1724     }
1725
1726     error = dpif_port_query_by_name(ofproto->backer->dpif,
1727                                     netdev_vport_get_dpif_port(netdev, namebuf,
1728                                                                sizeof namebuf),
1729                                     &dpif_port);
1730     if (error) {
1731         return error;
1732     }
1733
1734     port->odp_port = dpif_port.port_no;
1735
1736     if (netdev_get_tunnel_config(netdev)) {
1737         tnl_port_add(port, port->up.netdev, port->odp_port);
1738         port->is_tunnel = true;
1739     } else {
1740         /* Sanity-check that a mapping doesn't already exist.  This
1741          * shouldn't happen for non-tunnel ports. */
1742         if (odp_port_to_ofp_port(ofproto, port->odp_port) != OFPP_NONE) {
1743             VLOG_ERR("port %s already has an OpenFlow port number",
1744                      dpif_port.name);
1745             dpif_port_destroy(&dpif_port);
1746             return EBUSY;
1747         }
1748
1749         ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1750         hmap_insert(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node,
1751                     hash_odp_port(port->odp_port));
1752         ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1753     }
1754     dpif_port_destroy(&dpif_port);
1755
1756     if (ofproto->sflow) {
1757         dpif_sflow_add_port(ofproto->sflow, port_, port->odp_port);
1758     }
1759
1760     return 0;
1761 }
1762
1763 static void
1764 port_destruct(struct ofport *port_)
1765 {
1766     struct ofport_dpif *port = ofport_dpif_cast(port_);
1767     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1768     const char *devname = netdev_get_name(port->up.netdev);
1769     char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
1770     const char *dp_port_name;
1771
1772     ofproto->backer->need_revalidate = REV_RECONFIGURE;
1773     ovs_rwlock_wrlock(&xlate_rwlock);
1774     xlate_ofport_remove(port);
1775     ovs_rwlock_unlock(&xlate_rwlock);
1776
1777     dp_port_name = netdev_vport_get_dpif_port(port->up.netdev, namebuf,
1778                                               sizeof namebuf);
1779     if (dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
1780         /* The underlying device is still there, so delete it.  This
1781          * happens when the ofproto is being destroyed, since the caller
1782          * assumes that removal of attached ports will happen as part of
1783          * destruction. */
1784         if (!port->is_tunnel) {
1785             dpif_port_del(ofproto->backer->dpif, port->odp_port);
1786         }
1787     }
1788
1789     if (port->peer) {
1790         port->peer->peer = NULL;
1791         port->peer = NULL;
1792     }
1793
1794     if (port->odp_port != ODPP_NONE && !port->is_tunnel) {
1795         ovs_rwlock_wrlock(&ofproto->backer->odp_to_ofport_lock);
1796         hmap_remove(&ofproto->backer->odp_to_ofport_map, &port->odp_port_node);
1797         ovs_rwlock_unlock(&ofproto->backer->odp_to_ofport_lock);
1798     }
1799
1800     tnl_port_del(port);
1801     sset_find_and_delete(&ofproto->ports, devname);
1802     sset_find_and_delete(&ofproto->ghost_ports, devname);
1803     bundle_remove(port_);
1804     set_cfm(port_, NULL);
1805     set_bfd(port_, NULL);
1806     if (ofproto->sflow) {
1807         dpif_sflow_del_port(ofproto->sflow, port->odp_port);
1808     }
1809
1810     free(port->qdscp);
1811 }
1812
1813 static void
1814 port_modified(struct ofport *port_)
1815 {
1816     struct ofport_dpif *port = ofport_dpif_cast(port_);
1817
1818     if (port->bundle && port->bundle->bond) {
1819         bond_slave_set_netdev(port->bundle->bond, port, port->up.netdev);
1820     }
1821
1822     if (port->cfm) {
1823         cfm_set_netdev(port->cfm, port->up.netdev);
1824     }
1825
1826     if (port->bfd) {
1827         bfd_set_netdev(port->bfd, port->up.netdev);
1828     }
1829
1830     ofproto_dpif_monitor_port_update(port, port->bfd, port->cfm,
1831                                      port->up.pp.hw_addr);
1832
1833     if (port->is_tunnel && tnl_port_reconfigure(port, port->up.netdev,
1834                                                 port->odp_port)) {
1835         ofproto_dpif_cast(port->up.ofproto)->backer->need_revalidate =
1836             REV_RECONFIGURE;
1837     }
1838
1839     ofport_update_peer(port);
1840 }
1841
1842 static void
1843 port_reconfigured(struct ofport *port_, enum ofputil_port_config old_config)
1844 {
1845     struct ofport_dpif *port = ofport_dpif_cast(port_);
1846     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
1847     enum ofputil_port_config changed = old_config ^ port->up.pp.config;
1848
1849     if (changed & (OFPUTIL_PC_NO_RECV | OFPUTIL_PC_NO_RECV_STP |
1850                    OFPUTIL_PC_NO_FWD | OFPUTIL_PC_NO_FLOOD |
1851                    OFPUTIL_PC_NO_PACKET_IN)) {
1852         ofproto->backer->need_revalidate = REV_RECONFIGURE;
1853
1854         if (changed & OFPUTIL_PC_NO_FLOOD && port->bundle) {
1855             bundle_update(port->bundle);
1856         }
1857     }
1858 }
1859
1860 static int
1861 set_sflow(struct ofproto *ofproto_,
1862           const struct ofproto_sflow_options *sflow_options)
1863 {
1864     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1865     struct dpif_sflow *ds = ofproto->sflow;
1866
1867     if (sflow_options) {
1868         if (!ds) {
1869             struct ofport_dpif *ofport;
1870
1871             ds = ofproto->sflow = dpif_sflow_create();
1872             HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
1873                 dpif_sflow_add_port(ds, &ofport->up, ofport->odp_port);
1874             }
1875             ofproto->backer->need_revalidate = REV_RECONFIGURE;
1876         }
1877         dpif_sflow_set_options(ds, sflow_options);
1878     } else {
1879         if (ds) {
1880             dpif_sflow_unref(ds);
1881             ofproto->backer->need_revalidate = REV_RECONFIGURE;
1882             ofproto->sflow = NULL;
1883         }
1884     }
1885     return 0;
1886 }
1887
1888 static int
1889 set_ipfix(
1890     struct ofproto *ofproto_,
1891     const struct ofproto_ipfix_bridge_exporter_options *bridge_exporter_options,
1892     const struct ofproto_ipfix_flow_exporter_options *flow_exporters_options,
1893     size_t n_flow_exporters_options)
1894 {
1895     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
1896     struct dpif_ipfix *di = ofproto->ipfix;
1897     bool has_options = bridge_exporter_options || flow_exporters_options;
1898
1899     if (has_options && !di) {
1900         di = ofproto->ipfix = dpif_ipfix_create();
1901     }
1902
1903     if (di) {
1904         /* Call set_options in any case to cleanly flush the flow
1905          * caches in the last exporters that are to be destroyed. */
1906         dpif_ipfix_set_options(
1907             di, bridge_exporter_options, flow_exporters_options,
1908             n_flow_exporters_options);
1909
1910         if (!has_options) {
1911             dpif_ipfix_unref(di);
1912             ofproto->ipfix = NULL;
1913         }
1914     }
1915
1916     return 0;
1917 }
1918
1919 static int
1920 set_cfm(struct ofport *ofport_, const struct cfm_settings *s)
1921 {
1922     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1923     int error = 0;
1924
1925     if (s) {
1926         if (!ofport->cfm) {
1927             struct ofproto_dpif *ofproto;
1928
1929             ofproto = ofproto_dpif_cast(ofport->up.ofproto);
1930             ofproto->backer->need_revalidate = REV_RECONFIGURE;
1931             ofport->cfm = cfm_create(ofport->up.netdev);
1932         }
1933
1934         if (cfm_configure(ofport->cfm, s)) {
1935             error = 0;
1936             goto out;
1937         }
1938
1939         error = EINVAL;
1940     }
1941     cfm_unref(ofport->cfm);
1942     ofport->cfm = NULL;
1943 out:
1944     ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
1945                                      ofport->up.pp.hw_addr);
1946     return error;
1947 }
1948
1949 static bool
1950 get_cfm_status(const struct ofport *ofport_,
1951                struct ofproto_cfm_status *status)
1952 {
1953     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1954
1955     if (ofport->cfm) {
1956         status->faults = cfm_get_fault(ofport->cfm);
1957         status->flap_count = cfm_get_flap_count(ofport->cfm);
1958         status->remote_opstate = cfm_get_opup(ofport->cfm);
1959         status->health = cfm_get_health(ofport->cfm);
1960         cfm_get_remote_mpids(ofport->cfm, &status->rmps, &status->n_rmps);
1961         return true;
1962     } else {
1963         return false;
1964     }
1965 }
1966
1967 static int
1968 set_bfd(struct ofport *ofport_, const struct smap *cfg)
1969 {
1970     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
1971     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1972     struct bfd *old;
1973
1974     old = ofport->bfd;
1975     ofport->bfd = bfd_configure(old, netdev_get_name(ofport->up.netdev),
1976                                 cfg, ofport->up.netdev);
1977     if (ofport->bfd != old) {
1978         ofproto->backer->need_revalidate = REV_RECONFIGURE;
1979     }
1980     ofproto_dpif_monitor_port_update(ofport, ofport->bfd, ofport->cfm,
1981                                      ofport->up.pp.hw_addr);
1982     return 0;
1983 }
1984
1985 static int
1986 get_bfd_status(struct ofport *ofport_, struct smap *smap)
1987 {
1988     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
1989
1990     if (ofport->bfd) {
1991         bfd_get_status(ofport->bfd, smap);
1992         return 0;
1993     } else {
1994         return ENOENT;
1995     }
1996 }
1997 \f
1998 /* Spanning Tree. */
1999
2000 static void
2001 send_bpdu_cb(struct ofpbuf *pkt, int port_num, void *ofproto_)
2002 {
2003     struct ofproto_dpif *ofproto = ofproto_;
2004     struct stp_port *sp = stp_get_port(ofproto->stp, port_num);
2005     struct ofport_dpif *ofport;
2006
2007     ofport = stp_port_get_aux(sp);
2008     if (!ofport) {
2009         VLOG_WARN_RL(&rl, "%s: cannot send BPDU on unknown port %d",
2010                      ofproto->up.name, port_num);
2011     } else {
2012         struct eth_header *eth = pkt->l2;
2013
2014         netdev_get_etheraddr(ofport->up.netdev, eth->eth_src);
2015         if (eth_addr_is_zero(eth->eth_src)) {
2016             VLOG_WARN_RL(&rl, "%s: cannot send BPDU on port %d "
2017                          "with unknown MAC", ofproto->up.name, port_num);
2018         } else {
2019             ofproto_dpif_send_packet(ofport, pkt);
2020         }
2021     }
2022     ofpbuf_delete(pkt);
2023 }
2024
2025 /* Configures STP on 'ofproto_' using the settings defined in 's'. */
2026 static int
2027 set_stp(struct ofproto *ofproto_, const struct ofproto_stp_settings *s)
2028 {
2029     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2030
2031     /* Only revalidate flows if the configuration changed. */
2032     if (!s != !ofproto->stp) {
2033         ofproto->backer->need_revalidate = REV_RECONFIGURE;
2034     }
2035
2036     if (s) {
2037         if (!ofproto->stp) {
2038             ofproto->stp = stp_create(ofproto_->name, s->system_id,
2039                                       send_bpdu_cb, ofproto);
2040             ofproto->stp_last_tick = time_msec();
2041         }
2042
2043         stp_set_bridge_id(ofproto->stp, s->system_id);
2044         stp_set_bridge_priority(ofproto->stp, s->priority);
2045         stp_set_hello_time(ofproto->stp, s->hello_time);
2046         stp_set_max_age(ofproto->stp, s->max_age);
2047         stp_set_forward_delay(ofproto->stp, s->fwd_delay);
2048     }  else {
2049         struct ofport *ofport;
2050
2051         HMAP_FOR_EACH (ofport, hmap_node, &ofproto->up.ports) {
2052             set_stp_port(ofport, NULL);
2053         }
2054
2055         stp_unref(ofproto->stp);
2056         ofproto->stp = NULL;
2057     }
2058
2059     return 0;
2060 }
2061
2062 static int
2063 get_stp_status(struct ofproto *ofproto_, struct ofproto_stp_status *s)
2064 {
2065     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2066
2067     if (ofproto->stp) {
2068         s->enabled = true;
2069         s->bridge_id = stp_get_bridge_id(ofproto->stp);
2070         s->designated_root = stp_get_designated_root(ofproto->stp);
2071         s->root_path_cost = stp_get_root_path_cost(ofproto->stp);
2072     } else {
2073         s->enabled = false;
2074     }
2075
2076     return 0;
2077 }
2078
2079 static void
2080 update_stp_port_state(struct ofport_dpif *ofport)
2081 {
2082     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2083     enum stp_state state;
2084
2085     /* Figure out new state. */
2086     state = ofport->stp_port ? stp_port_get_state(ofport->stp_port)
2087                              : STP_DISABLED;
2088
2089     /* Update state. */
2090     if (ofport->stp_state != state) {
2091         enum ofputil_port_state of_state;
2092         bool fwd_change;
2093
2094         VLOG_DBG_RL(&rl, "port %s: STP state changed from %s to %s",
2095                     netdev_get_name(ofport->up.netdev),
2096                     stp_state_name(ofport->stp_state),
2097                     stp_state_name(state));
2098         if (stp_learn_in_state(ofport->stp_state)
2099                 != stp_learn_in_state(state)) {
2100             /* xxx Learning action flows should also be flushed. */
2101             ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2102             mac_learning_flush(ofproto->ml);
2103             ovs_rwlock_unlock(&ofproto->ml->rwlock);
2104         }
2105         fwd_change = stp_forward_in_state(ofport->stp_state)
2106                         != stp_forward_in_state(state);
2107
2108         ofproto->backer->need_revalidate = REV_STP;
2109         ofport->stp_state = state;
2110         ofport->stp_state_entered = time_msec();
2111
2112         if (fwd_change && ofport->bundle) {
2113             bundle_update(ofport->bundle);
2114         }
2115
2116         /* Update the STP state bits in the OpenFlow port description. */
2117         of_state = ofport->up.pp.state & ~OFPUTIL_PS_STP_MASK;
2118         of_state |= (state == STP_LISTENING ? OFPUTIL_PS_STP_LISTEN
2119                      : state == STP_LEARNING ? OFPUTIL_PS_STP_LEARN
2120                      : state == STP_FORWARDING ? OFPUTIL_PS_STP_FORWARD
2121                      : state == STP_BLOCKING ?  OFPUTIL_PS_STP_BLOCK
2122                      : 0);
2123         ofproto_port_set_state(&ofport->up, of_state);
2124     }
2125 }
2126
2127 /* Configures STP on 'ofport_' using the settings defined in 's'.  The
2128  * caller is responsible for assigning STP port numbers and ensuring
2129  * there are no duplicates. */
2130 static int
2131 set_stp_port(struct ofport *ofport_,
2132              const struct ofproto_port_stp_settings *s)
2133 {
2134     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2135     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2136     struct stp_port *sp = ofport->stp_port;
2137
2138     if (!s || !s->enable) {
2139         if (sp) {
2140             ofport->stp_port = NULL;
2141             stp_port_disable(sp);
2142             update_stp_port_state(ofport);
2143         }
2144         return 0;
2145     } else if (sp && stp_port_no(sp) != s->port_num
2146             && ofport == stp_port_get_aux(sp)) {
2147         /* The port-id changed, so disable the old one if it's not
2148          * already in use by another port. */
2149         stp_port_disable(sp);
2150     }
2151
2152     sp = ofport->stp_port = stp_get_port(ofproto->stp, s->port_num);
2153     stp_port_enable(sp);
2154
2155     stp_port_set_aux(sp, ofport);
2156     stp_port_set_priority(sp, s->priority);
2157     stp_port_set_path_cost(sp, s->path_cost);
2158
2159     update_stp_port_state(ofport);
2160
2161     return 0;
2162 }
2163
2164 static int
2165 get_stp_port_status(struct ofport *ofport_,
2166                     struct ofproto_port_stp_status *s)
2167 {
2168     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2169     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2170     struct stp_port *sp = ofport->stp_port;
2171
2172     if (!ofproto->stp || !sp) {
2173         s->enabled = false;
2174         return 0;
2175     }
2176
2177     s->enabled = true;
2178     s->port_id = stp_port_get_id(sp);
2179     s->state = stp_port_get_state(sp);
2180     s->sec_in_state = (time_msec() - ofport->stp_state_entered) / 1000;
2181     s->role = stp_port_get_role(sp);
2182
2183     return 0;
2184 }
2185
2186 static int
2187 get_stp_port_stats(struct ofport *ofport_,
2188                    struct ofproto_port_stp_stats *s)
2189 {
2190     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2191     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2192     struct stp_port *sp = ofport->stp_port;
2193
2194     if (!ofproto->stp || !sp) {
2195         s->enabled = false;
2196         return 0;
2197     }
2198
2199     s->enabled = true;
2200     stp_port_get_counts(sp, &s->tx_count, &s->rx_count, &s->error_count);
2201
2202     return 0;
2203 }
2204
2205 static void
2206 stp_run(struct ofproto_dpif *ofproto)
2207 {
2208     if (ofproto->stp) {
2209         long long int now = time_msec();
2210         long long int elapsed = now - ofproto->stp_last_tick;
2211         struct stp_port *sp;
2212
2213         if (elapsed > 0) {
2214             stp_tick(ofproto->stp, MIN(INT_MAX, elapsed));
2215             ofproto->stp_last_tick = now;
2216         }
2217         while (stp_get_changed_port(ofproto->stp, &sp)) {
2218             struct ofport_dpif *ofport = stp_port_get_aux(sp);
2219
2220             if (ofport) {
2221                 update_stp_port_state(ofport);
2222             }
2223         }
2224
2225         if (stp_check_and_reset_fdb_flush(ofproto->stp)) {
2226             ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2227             mac_learning_flush(ofproto->ml);
2228             ovs_rwlock_unlock(&ofproto->ml->rwlock);
2229         }
2230     }
2231 }
2232
2233 static void
2234 stp_wait(struct ofproto_dpif *ofproto)
2235 {
2236     if (ofproto->stp) {
2237         poll_timer_wait(1000);
2238     }
2239 }
2240 \f
2241 static int
2242 set_queues(struct ofport *ofport_, const struct ofproto_port_queue *qdscp,
2243            size_t n_qdscp)
2244 {
2245     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
2246     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2247
2248     if (ofport->n_qdscp != n_qdscp
2249         || (n_qdscp && memcmp(ofport->qdscp, qdscp,
2250                               n_qdscp * sizeof *qdscp))) {
2251         ofproto->backer->need_revalidate = REV_RECONFIGURE;
2252         free(ofport->qdscp);
2253         ofport->qdscp = n_qdscp
2254             ? xmemdup(qdscp, n_qdscp * sizeof *qdscp)
2255             : NULL;
2256         ofport->n_qdscp = n_qdscp;
2257     }
2258
2259     return 0;
2260 }
2261 \f
2262 /* Bundles. */
2263
2264 /* Expires all MAC learning entries associated with 'bundle' and forces its
2265  * ofproto to revalidate every flow.
2266  *
2267  * Normally MAC learning entries are removed only from the ofproto associated
2268  * with 'bundle', but if 'all_ofprotos' is true, then the MAC learning entries
2269  * are removed from every ofproto.  When patch ports and SLB bonds are in use
2270  * and a VM migration happens and the gratuitous ARPs are somehow lost, this
2271  * avoids a MAC_ENTRY_IDLE_TIME delay before the migrated VM can communicate
2272  * with the host from which it migrated. */
2273 static void
2274 bundle_flush_macs(struct ofbundle *bundle, bool all_ofprotos)
2275 {
2276     struct ofproto_dpif *ofproto = bundle->ofproto;
2277     struct mac_learning *ml = ofproto->ml;
2278     struct mac_entry *mac, *next_mac;
2279
2280     ofproto->backer->need_revalidate = REV_RECONFIGURE;
2281     ovs_rwlock_wrlock(&ml->rwlock);
2282     LIST_FOR_EACH_SAFE (mac, next_mac, lru_node, &ml->lrus) {
2283         if (mac->port.p == bundle) {
2284             if (all_ofprotos) {
2285                 struct ofproto_dpif *o;
2286
2287                 HMAP_FOR_EACH (o, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2288                     if (o != ofproto) {
2289                         struct mac_entry *e;
2290
2291                         ovs_rwlock_wrlock(&o->ml->rwlock);
2292                         e = mac_learning_lookup(o->ml, mac->mac, mac->vlan);
2293                         if (e) {
2294                             mac_learning_expire(o->ml, e);
2295                         }
2296                         ovs_rwlock_unlock(&o->ml->rwlock);
2297                     }
2298                 }
2299             }
2300
2301             mac_learning_expire(ml, mac);
2302         }
2303     }
2304     ovs_rwlock_unlock(&ml->rwlock);
2305 }
2306
2307 static struct ofbundle *
2308 bundle_lookup(const struct ofproto_dpif *ofproto, void *aux)
2309 {
2310     struct ofbundle *bundle;
2311
2312     HMAP_FOR_EACH_IN_BUCKET (bundle, hmap_node, hash_pointer(aux, 0),
2313                              &ofproto->bundles) {
2314         if (bundle->aux == aux) {
2315             return bundle;
2316         }
2317     }
2318     return NULL;
2319 }
2320
2321 static void
2322 bundle_update(struct ofbundle *bundle)
2323 {
2324     struct ofport_dpif *port;
2325
2326     bundle->floodable = true;
2327     LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2328         if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2329             || port->is_layer3
2330             || !stp_forward_in_state(port->stp_state)) {
2331             bundle->floodable = false;
2332             break;
2333         }
2334     }
2335 }
2336
2337 static void
2338 bundle_del_port(struct ofport_dpif *port)
2339 {
2340     struct ofbundle *bundle = port->bundle;
2341
2342     bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2343
2344     list_remove(&port->bundle_node);
2345     port->bundle = NULL;
2346
2347     if (bundle->lacp) {
2348         lacp_slave_unregister(bundle->lacp, port);
2349     }
2350     if (bundle->bond) {
2351         bond_slave_unregister(bundle->bond, port);
2352     }
2353
2354     bundle_update(bundle);
2355 }
2356
2357 static bool
2358 bundle_add_port(struct ofbundle *bundle, ofp_port_t ofp_port,
2359                 struct lacp_slave_settings *lacp)
2360 {
2361     struct ofport_dpif *port;
2362
2363     port = get_ofp_port(bundle->ofproto, ofp_port);
2364     if (!port) {
2365         return false;
2366     }
2367
2368     if (port->bundle != bundle) {
2369         bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2370         if (port->bundle) {
2371             bundle_remove(&port->up);
2372         }
2373
2374         port->bundle = bundle;
2375         list_push_back(&bundle->ports, &port->bundle_node);
2376         if (port->up.pp.config & OFPUTIL_PC_NO_FLOOD
2377             || port->is_layer3
2378             || !stp_forward_in_state(port->stp_state)) {
2379             bundle->floodable = false;
2380         }
2381     }
2382     if (lacp) {
2383         bundle->ofproto->backer->need_revalidate = REV_RECONFIGURE;
2384         lacp_slave_register(bundle->lacp, port, lacp);
2385     }
2386
2387     return true;
2388 }
2389
2390 static void
2391 bundle_destroy(struct ofbundle *bundle)
2392 {
2393     struct ofproto_dpif *ofproto;
2394     struct ofport_dpif *port, *next_port;
2395
2396     if (!bundle) {
2397         return;
2398     }
2399
2400     ofproto = bundle->ofproto;
2401     mbridge_unregister_bundle(ofproto->mbridge, bundle->aux);
2402
2403     ovs_rwlock_wrlock(&xlate_rwlock);
2404     xlate_bundle_remove(bundle);
2405     ovs_rwlock_unlock(&xlate_rwlock);
2406
2407     LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2408         bundle_del_port(port);
2409     }
2410
2411     bundle_flush_macs(bundle, true);
2412     hmap_remove(&ofproto->bundles, &bundle->hmap_node);
2413     free(bundle->name);
2414     free(bundle->trunks);
2415     lacp_unref(bundle->lacp);
2416     bond_unref(bundle->bond);
2417     free(bundle);
2418 }
2419
2420 static int
2421 bundle_set(struct ofproto *ofproto_, void *aux,
2422            const struct ofproto_bundle_settings *s)
2423 {
2424     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2425     bool need_flush = false;
2426     struct ofport_dpif *port;
2427     struct ofbundle *bundle;
2428     unsigned long *trunks;
2429     int vlan;
2430     size_t i;
2431     bool ok;
2432
2433     if (!s) {
2434         bundle_destroy(bundle_lookup(ofproto, aux));
2435         return 0;
2436     }
2437
2438     ovs_assert(s->n_slaves == 1 || s->bond != NULL);
2439     ovs_assert((s->lacp != NULL) == (s->lacp_slaves != NULL));
2440
2441     bundle = bundle_lookup(ofproto, aux);
2442     if (!bundle) {
2443         bundle = xmalloc(sizeof *bundle);
2444
2445         bundle->ofproto = ofproto;
2446         hmap_insert(&ofproto->bundles, &bundle->hmap_node,
2447                     hash_pointer(aux, 0));
2448         bundle->aux = aux;
2449         bundle->name = NULL;
2450
2451         list_init(&bundle->ports);
2452         bundle->vlan_mode = PORT_VLAN_TRUNK;
2453         bundle->vlan = -1;
2454         bundle->trunks = NULL;
2455         bundle->use_priority_tags = s->use_priority_tags;
2456         bundle->lacp = NULL;
2457         bundle->bond = NULL;
2458
2459         bundle->floodable = true;
2460         mbridge_register_bundle(ofproto->mbridge, bundle);
2461     }
2462
2463     if (!bundle->name || strcmp(s->name, bundle->name)) {
2464         free(bundle->name);
2465         bundle->name = xstrdup(s->name);
2466     }
2467
2468     /* LACP. */
2469     if (s->lacp) {
2470         if (!bundle->lacp) {
2471             ofproto->backer->need_revalidate = REV_RECONFIGURE;
2472             bundle->lacp = lacp_create();
2473         }
2474         lacp_configure(bundle->lacp, s->lacp);
2475     } else {
2476         lacp_unref(bundle->lacp);
2477         bundle->lacp = NULL;
2478     }
2479
2480     /* Update set of ports. */
2481     ok = true;
2482     for (i = 0; i < s->n_slaves; i++) {
2483         if (!bundle_add_port(bundle, s->slaves[i],
2484                              s->lacp ? &s->lacp_slaves[i] : NULL)) {
2485             ok = false;
2486         }
2487     }
2488     if (!ok || list_size(&bundle->ports) != s->n_slaves) {
2489         struct ofport_dpif *next_port;
2490
2491         LIST_FOR_EACH_SAFE (port, next_port, bundle_node, &bundle->ports) {
2492             for (i = 0; i < s->n_slaves; i++) {
2493                 if (s->slaves[i] == port->up.ofp_port) {
2494                     goto found;
2495                 }
2496             }
2497
2498             bundle_del_port(port);
2499         found: ;
2500         }
2501     }
2502     ovs_assert(list_size(&bundle->ports) <= s->n_slaves);
2503
2504     if (list_is_empty(&bundle->ports)) {
2505         bundle_destroy(bundle);
2506         return EINVAL;
2507     }
2508
2509     /* Set VLAN tagging mode */
2510     if (s->vlan_mode != bundle->vlan_mode
2511         || s->use_priority_tags != bundle->use_priority_tags) {
2512         bundle->vlan_mode = s->vlan_mode;
2513         bundle->use_priority_tags = s->use_priority_tags;
2514         need_flush = true;
2515     }
2516
2517     /* Set VLAN tag. */
2518     vlan = (s->vlan_mode == PORT_VLAN_TRUNK ? -1
2519             : s->vlan >= 0 && s->vlan <= 4095 ? s->vlan
2520             : 0);
2521     if (vlan != bundle->vlan) {
2522         bundle->vlan = vlan;
2523         need_flush = true;
2524     }
2525
2526     /* Get trunked VLANs. */
2527     switch (s->vlan_mode) {
2528     case PORT_VLAN_ACCESS:
2529         trunks = NULL;
2530         break;
2531
2532     case PORT_VLAN_TRUNK:
2533         trunks = CONST_CAST(unsigned long *, s->trunks);
2534         break;
2535
2536     case PORT_VLAN_NATIVE_UNTAGGED:
2537     case PORT_VLAN_NATIVE_TAGGED:
2538         if (vlan != 0 && (!s->trunks
2539                           || !bitmap_is_set(s->trunks, vlan)
2540                           || bitmap_is_set(s->trunks, 0))) {
2541             /* Force trunking the native VLAN and prohibit trunking VLAN 0. */
2542             if (s->trunks) {
2543                 trunks = bitmap_clone(s->trunks, 4096);
2544             } else {
2545                 trunks = bitmap_allocate1(4096);
2546             }
2547             bitmap_set1(trunks, vlan);
2548             bitmap_set0(trunks, 0);
2549         } else {
2550             trunks = CONST_CAST(unsigned long *, s->trunks);
2551         }
2552         break;
2553
2554     default:
2555         NOT_REACHED();
2556     }
2557     if (!vlan_bitmap_equal(trunks, bundle->trunks)) {
2558         free(bundle->trunks);
2559         if (trunks == s->trunks) {
2560             bundle->trunks = vlan_bitmap_clone(trunks);
2561         } else {
2562             bundle->trunks = trunks;
2563             trunks = NULL;
2564         }
2565         need_flush = true;
2566     }
2567     if (trunks != s->trunks) {
2568         free(trunks);
2569     }
2570
2571     /* Bonding. */
2572     if (!list_is_short(&bundle->ports)) {
2573         bundle->ofproto->has_bonded_bundles = true;
2574         if (bundle->bond) {
2575             if (bond_reconfigure(bundle->bond, s->bond)) {
2576                 ofproto->backer->need_revalidate = REV_RECONFIGURE;
2577             }
2578         } else {
2579             bundle->bond = bond_create(s->bond);
2580             ofproto->backer->need_revalidate = REV_RECONFIGURE;
2581         }
2582
2583         LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2584             bond_slave_register(bundle->bond, port, port->up.netdev);
2585         }
2586     } else {
2587         bond_unref(bundle->bond);
2588         bundle->bond = NULL;
2589     }
2590
2591     /* If we changed something that would affect MAC learning, un-learn
2592      * everything on this port and force flow revalidation. */
2593     if (need_flush) {
2594         bundle_flush_macs(bundle, false);
2595     }
2596
2597     return 0;
2598 }
2599
2600 static void
2601 bundle_remove(struct ofport *port_)
2602 {
2603     struct ofport_dpif *port = ofport_dpif_cast(port_);
2604     struct ofbundle *bundle = port->bundle;
2605
2606     if (bundle) {
2607         bundle_del_port(port);
2608         if (list_is_empty(&bundle->ports)) {
2609             bundle_destroy(bundle);
2610         } else if (list_is_short(&bundle->ports)) {
2611             bond_unref(bundle->bond);
2612             bundle->bond = NULL;
2613         }
2614     }
2615 }
2616
2617 static void
2618 send_pdu_cb(void *port_, const void *pdu, size_t pdu_size)
2619 {
2620     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 10);
2621     struct ofport_dpif *port = port_;
2622     uint8_t ea[ETH_ADDR_LEN];
2623     int error;
2624
2625     error = netdev_get_etheraddr(port->up.netdev, ea);
2626     if (!error) {
2627         struct ofpbuf packet;
2628         void *packet_pdu;
2629
2630         ofpbuf_init(&packet, 0);
2631         packet_pdu = eth_compose(&packet, eth_addr_lacp, ea, ETH_TYPE_LACP,
2632                                  pdu_size);
2633         memcpy(packet_pdu, pdu, pdu_size);
2634
2635         ofproto_dpif_send_packet(port, &packet);
2636         ofpbuf_uninit(&packet);
2637     } else {
2638         VLOG_ERR_RL(&rl, "port %s: cannot obtain Ethernet address of iface "
2639                     "%s (%s)", port->bundle->name,
2640                     netdev_get_name(port->up.netdev), ovs_strerror(error));
2641     }
2642 }
2643
2644 static void
2645 bundle_send_learning_packets(struct ofbundle *bundle)
2646 {
2647     struct ofproto_dpif *ofproto = bundle->ofproto;
2648     struct ofpbuf *learning_packet;
2649     int error, n_packets, n_errors;
2650     struct mac_entry *e;
2651     struct list packets;
2652
2653     list_init(&packets);
2654     ovs_rwlock_rdlock(&ofproto->ml->rwlock);
2655     LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
2656         if (e->port.p != bundle) {
2657             void *port_void;
2658
2659             learning_packet = bond_compose_learning_packet(bundle->bond,
2660                                                            e->mac, e->vlan,
2661                                                            &port_void);
2662             learning_packet->private_p = port_void;
2663             list_push_back(&packets, &learning_packet->list_node);
2664         }
2665     }
2666     ovs_rwlock_unlock(&ofproto->ml->rwlock);
2667
2668     error = n_packets = n_errors = 0;
2669     LIST_FOR_EACH (learning_packet, list_node, &packets) {
2670         int ret;
2671
2672         ret = ofproto_dpif_send_packet(learning_packet->private_p, learning_packet);
2673         if (ret) {
2674             error = ret;
2675             n_errors++;
2676         }
2677         n_packets++;
2678     }
2679     ofpbuf_list_delete(&packets);
2680
2681     if (n_errors) {
2682         static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
2683         VLOG_WARN_RL(&rl, "bond %s: %d errors sending %d gratuitous learning "
2684                      "packets, last error was: %s",
2685                      bundle->name, n_errors, n_packets, ovs_strerror(error));
2686     } else {
2687         VLOG_DBG("bond %s: sent %d gratuitous learning packets",
2688                  bundle->name, n_packets);
2689     }
2690 }
2691
2692 static void
2693 bundle_run(struct ofbundle *bundle)
2694 {
2695     if (bundle->lacp) {
2696         lacp_run(bundle->lacp, send_pdu_cb);
2697     }
2698     if (bundle->bond) {
2699         struct ofport_dpif *port;
2700
2701         LIST_FOR_EACH (port, bundle_node, &bundle->ports) {
2702             bond_slave_set_may_enable(bundle->bond, port, port->may_enable);
2703         }
2704
2705         if (bond_run(bundle->bond, lacp_status(bundle->lacp))) {
2706             bundle->ofproto->backer->need_revalidate = REV_BOND;
2707         }
2708
2709         if (bond_should_send_learning_packets(bundle->bond)) {
2710             bundle_send_learning_packets(bundle);
2711         }
2712     }
2713 }
2714
2715 static void
2716 bundle_wait(struct ofbundle *bundle)
2717 {
2718     if (bundle->lacp) {
2719         lacp_wait(bundle->lacp);
2720     }
2721     if (bundle->bond) {
2722         bond_wait(bundle->bond);
2723     }
2724 }
2725 \f
2726 /* Mirrors. */
2727
2728 static int
2729 mirror_set__(struct ofproto *ofproto_, void *aux,
2730              const struct ofproto_mirror_settings *s)
2731 {
2732     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2733     struct ofbundle **srcs, **dsts;
2734     int error;
2735     size_t i;
2736
2737     if (!s) {
2738         mirror_destroy(ofproto->mbridge, aux);
2739         return 0;
2740     }
2741
2742     srcs = xmalloc(s->n_srcs * sizeof *srcs);
2743     dsts = xmalloc(s->n_dsts * sizeof *dsts);
2744
2745     for (i = 0; i < s->n_srcs; i++) {
2746         srcs[i] = bundle_lookup(ofproto, s->srcs[i]);
2747     }
2748
2749     for (i = 0; i < s->n_dsts; i++) {
2750         dsts[i] = bundle_lookup(ofproto, s->dsts[i]);
2751     }
2752
2753     error = mirror_set(ofproto->mbridge, aux, s->name, srcs, s->n_srcs, dsts,
2754                        s->n_dsts, s->src_vlans,
2755                        bundle_lookup(ofproto, s->out_bundle), s->out_vlan);
2756     free(srcs);
2757     free(dsts);
2758     return error;
2759 }
2760
2761 static int
2762 mirror_get_stats__(struct ofproto *ofproto, void *aux,
2763                    uint64_t *packets, uint64_t *bytes)
2764 {
2765     push_all_stats();
2766     return mirror_get_stats(ofproto_dpif_cast(ofproto)->mbridge, aux, packets,
2767                             bytes);
2768 }
2769
2770 static int
2771 set_flood_vlans(struct ofproto *ofproto_, unsigned long *flood_vlans)
2772 {
2773     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2774     ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2775     if (mac_learning_set_flood_vlans(ofproto->ml, flood_vlans)) {
2776         mac_learning_flush(ofproto->ml);
2777     }
2778     ovs_rwlock_unlock(&ofproto->ml->rwlock);
2779     return 0;
2780 }
2781
2782 static bool
2783 is_mirror_output_bundle(const struct ofproto *ofproto_, void *aux)
2784 {
2785     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2786     struct ofbundle *bundle = bundle_lookup(ofproto, aux);
2787     return bundle && mirror_bundle_out(ofproto->mbridge, bundle) != 0;
2788 }
2789
2790 static void
2791 forward_bpdu_changed(struct ofproto *ofproto_)
2792 {
2793     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2794     ofproto->backer->need_revalidate = REV_RECONFIGURE;
2795 }
2796
2797 static void
2798 set_mac_table_config(struct ofproto *ofproto_, unsigned int idle_time,
2799                      size_t max_entries)
2800 {
2801     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2802     ovs_rwlock_wrlock(&ofproto->ml->rwlock);
2803     mac_learning_set_idle_time(ofproto->ml, idle_time);
2804     mac_learning_set_max_entries(ofproto->ml, max_entries);
2805     ovs_rwlock_unlock(&ofproto->ml->rwlock);
2806 }
2807 \f
2808 /* Ports. */
2809
2810 static struct ofport_dpif *
2811 get_ofp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
2812 {
2813     struct ofport *ofport = ofproto_get_port(&ofproto->up, ofp_port);
2814     return ofport ? ofport_dpif_cast(ofport) : NULL;
2815 }
2816
2817 static struct ofport_dpif *
2818 get_odp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
2819 {
2820     struct ofport_dpif *port = odp_port_to_ofport(ofproto->backer, odp_port);
2821     return port && &ofproto->up == port->up.ofproto ? port : NULL;
2822 }
2823
2824 static void
2825 ofproto_port_from_dpif_port(struct ofproto_dpif *ofproto,
2826                             struct ofproto_port *ofproto_port,
2827                             struct dpif_port *dpif_port)
2828 {
2829     ofproto_port->name = dpif_port->name;
2830     ofproto_port->type = dpif_port->type;
2831     ofproto_port->ofp_port = odp_port_to_ofp_port(ofproto, dpif_port->port_no);
2832 }
2833
2834 static void
2835 ofport_update_peer(struct ofport_dpif *ofport)
2836 {
2837     const struct ofproto_dpif *ofproto;
2838     struct dpif_backer *backer;
2839     char *peer_name;
2840
2841     if (!netdev_vport_is_patch(ofport->up.netdev)) {
2842         return;
2843     }
2844
2845     backer = ofproto_dpif_cast(ofport->up.ofproto)->backer;
2846     backer->need_revalidate = REV_RECONFIGURE;
2847
2848     if (ofport->peer) {
2849         ofport->peer->peer = NULL;
2850         ofport->peer = NULL;
2851     }
2852
2853     peer_name = netdev_vport_patch_peer(ofport->up.netdev);
2854     if (!peer_name) {
2855         return;
2856     }
2857
2858     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
2859         struct ofport *peer_ofport;
2860         struct ofport_dpif *peer;
2861         char *peer_peer;
2862
2863         if (ofproto->backer != backer) {
2864             continue;
2865         }
2866
2867         peer_ofport = shash_find_data(&ofproto->up.port_by_name, peer_name);
2868         if (!peer_ofport) {
2869             continue;
2870         }
2871
2872         peer = ofport_dpif_cast(peer_ofport);
2873         peer_peer = netdev_vport_patch_peer(peer->up.netdev);
2874         if (peer_peer && !strcmp(netdev_get_name(ofport->up.netdev),
2875                                  peer_peer)) {
2876             ofport->peer = peer;
2877             ofport->peer->peer = ofport;
2878         }
2879         free(peer_peer);
2880
2881         break;
2882     }
2883     free(peer_name);
2884 }
2885
2886 static void
2887 port_run(struct ofport_dpif *ofport)
2888 {
2889     long long int carrier_seq = netdev_get_carrier_resets(ofport->up.netdev);
2890     bool carrier_changed = carrier_seq != ofport->carrier_seq;
2891     bool enable = netdev_get_carrier(ofport->up.netdev);
2892     bool cfm_enable = false;
2893     bool bfd_enable = false;
2894
2895     ofport->carrier_seq = carrier_seq;
2896
2897     if (ofport->cfm) {
2898         int cfm_opup = cfm_get_opup(ofport->cfm);
2899
2900         cfm_enable = !cfm_get_fault(ofport->cfm);
2901
2902         if (cfm_opup >= 0) {
2903             cfm_enable = cfm_enable && cfm_opup;
2904         }
2905     }
2906
2907     if (ofport->bfd) {
2908         bfd_enable = bfd_forwarding(ofport->bfd);
2909     }
2910
2911     if (ofport->bfd || ofport->cfm) {
2912         enable = enable && (cfm_enable || bfd_enable);
2913     }
2914
2915     if (ofport->bundle) {
2916         enable = enable && lacp_slave_may_enable(ofport->bundle->lacp, ofport);
2917         if (carrier_changed) {
2918             lacp_slave_carrier_changed(ofport->bundle->lacp, ofport);
2919         }
2920     }
2921
2922     if (ofport->may_enable != enable) {
2923         struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
2924         ofproto->backer->need_revalidate = REV_PORT_TOGGLED;
2925     }
2926
2927     ofport->may_enable = enable;
2928 }
2929
2930 static int
2931 port_query_by_name(const struct ofproto *ofproto_, const char *devname,
2932                    struct ofproto_port *ofproto_port)
2933 {
2934     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2935     struct dpif_port dpif_port;
2936     int error;
2937
2938     if (sset_contains(&ofproto->ghost_ports, devname)) {
2939         const char *type = netdev_get_type_from_name(devname);
2940
2941         /* We may be called before ofproto->up.port_by_name is populated with
2942          * the appropriate ofport.  For this reason, we must get the name and
2943          * type from the netdev layer directly. */
2944         if (type) {
2945             const struct ofport *ofport;
2946
2947             ofport = shash_find_data(&ofproto->up.port_by_name, devname);
2948             ofproto_port->ofp_port = ofport ? ofport->ofp_port : OFPP_NONE;
2949             ofproto_port->name = xstrdup(devname);
2950             ofproto_port->type = xstrdup(type);
2951             return 0;
2952         }
2953         return ENODEV;
2954     }
2955
2956     if (!sset_contains(&ofproto->ports, devname)) {
2957         return ENODEV;
2958     }
2959     error = dpif_port_query_by_name(ofproto->backer->dpif,
2960                                     devname, &dpif_port);
2961     if (!error) {
2962         ofproto_port_from_dpif_port(ofproto, ofproto_port, &dpif_port);
2963     }
2964     return error;
2965 }
2966
2967 static int
2968 port_add(struct ofproto *ofproto_, struct netdev *netdev)
2969 {
2970     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
2971     const char *devname = netdev_get_name(netdev);
2972     char namebuf[NETDEV_VPORT_NAME_BUFSIZE];
2973     const char *dp_port_name;
2974
2975     if (netdev_vport_is_patch(netdev)) {
2976         sset_add(&ofproto->ghost_ports, netdev_get_name(netdev));
2977         return 0;
2978     }
2979
2980     dp_port_name = netdev_vport_get_dpif_port(netdev, namebuf, sizeof namebuf);
2981     if (!dpif_port_exists(ofproto->backer->dpif, dp_port_name)) {
2982         odp_port_t port_no = ODPP_NONE;
2983         int error;
2984
2985         error = dpif_port_add(ofproto->backer->dpif, netdev, &port_no);
2986         if (error) {
2987             return error;
2988         }
2989         if (netdev_get_tunnel_config(netdev)) {
2990             simap_put(&ofproto->backer->tnl_backers,
2991                       dp_port_name, odp_to_u32(port_no));
2992         }
2993     }
2994
2995     if (netdev_get_tunnel_config(netdev)) {
2996         sset_add(&ofproto->ghost_ports, devname);
2997     } else {
2998         sset_add(&ofproto->ports, devname);
2999     }
3000     return 0;
3001 }
3002
3003 static int
3004 port_del(struct ofproto *ofproto_, ofp_port_t ofp_port)
3005 {
3006     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3007     struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
3008     int error = 0;
3009
3010     if (!ofport) {
3011         return 0;
3012     }
3013
3014     sset_find_and_delete(&ofproto->ghost_ports,
3015                          netdev_get_name(ofport->up.netdev));
3016     ofproto->backer->need_revalidate = REV_RECONFIGURE;
3017     if (!ofport->is_tunnel && !netdev_vport_is_patch(ofport->up.netdev)) {
3018         error = dpif_port_del(ofproto->backer->dpif, ofport->odp_port);
3019         if (!error) {
3020             /* The caller is going to close ofport->up.netdev.  If this is a
3021              * bonded port, then the bond is using that netdev, so remove it
3022              * from the bond.  The client will need to reconfigure everything
3023              * after deleting ports, so then the slave will get re-added. */
3024             bundle_remove(&ofport->up);
3025         }
3026     }
3027     return error;
3028 }
3029
3030 static int
3031 port_get_stats(const struct ofport *ofport_, struct netdev_stats *stats)
3032 {
3033     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3034     int error;
3035
3036     push_all_stats();
3037
3038     error = netdev_get_stats(ofport->up.netdev, stats);
3039
3040     if (!error && ofport_->ofp_port == OFPP_LOCAL) {
3041         struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
3042
3043         ovs_mutex_lock(&ofproto->stats_mutex);
3044         /* ofproto->stats.tx_packets represents packets that we created
3045          * internally and sent to some port (e.g. packets sent with
3046          * ofproto_dpif_send_packet()).  Account for them as if they had
3047          * come from OFPP_LOCAL and got forwarded. */
3048
3049         if (stats->rx_packets != UINT64_MAX) {
3050             stats->rx_packets += ofproto->stats.tx_packets;
3051         }
3052
3053         if (stats->rx_bytes != UINT64_MAX) {
3054             stats->rx_bytes += ofproto->stats.tx_bytes;
3055         }
3056
3057         /* ofproto->stats.rx_packets represents packets that were received on
3058          * some port and we processed internally and dropped (e.g. STP).
3059          * Account for them as if they had been forwarded to OFPP_LOCAL. */
3060
3061         if (stats->tx_packets != UINT64_MAX) {
3062             stats->tx_packets += ofproto->stats.rx_packets;
3063         }
3064
3065         if (stats->tx_bytes != UINT64_MAX) {
3066             stats->tx_bytes += ofproto->stats.rx_bytes;
3067         }
3068         ovs_mutex_unlock(&ofproto->stats_mutex);
3069     }
3070
3071     return error;
3072 }
3073
3074 struct port_dump_state {
3075     uint32_t bucket;
3076     uint32_t offset;
3077     bool ghost;
3078
3079     struct ofproto_port port;
3080     bool has_port;
3081 };
3082
3083 static int
3084 port_dump_start(const struct ofproto *ofproto_ OVS_UNUSED, void **statep)
3085 {
3086     *statep = xzalloc(sizeof(struct port_dump_state));
3087     return 0;
3088 }
3089
3090 static int
3091 port_dump_next(const struct ofproto *ofproto_, void *state_,
3092                struct ofproto_port *port)
3093 {
3094     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3095     struct port_dump_state *state = state_;
3096     const struct sset *sset;
3097     struct sset_node *node;
3098
3099     if (state->has_port) {
3100         ofproto_port_destroy(&state->port);
3101         state->has_port = false;
3102     }
3103     sset = state->ghost ? &ofproto->ghost_ports : &ofproto->ports;
3104     while ((node = sset_at_position(sset, &state->bucket, &state->offset))) {
3105         int error;
3106
3107         error = port_query_by_name(ofproto_, node->name, &state->port);
3108         if (!error) {
3109             *port = state->port;
3110             state->has_port = true;
3111             return 0;
3112         } else if (error != ENODEV) {
3113             return error;
3114         }
3115     }
3116
3117     if (!state->ghost) {
3118         state->ghost = true;
3119         state->bucket = 0;
3120         state->offset = 0;
3121         return port_dump_next(ofproto_, state_, port);
3122     }
3123
3124     return EOF;
3125 }
3126
3127 static int
3128 port_dump_done(const struct ofproto *ofproto_ OVS_UNUSED, void *state_)
3129 {
3130     struct port_dump_state *state = state_;
3131
3132     if (state->has_port) {
3133         ofproto_port_destroy(&state->port);
3134     }
3135     free(state);
3136     return 0;
3137 }
3138
3139 static int
3140 port_poll(const struct ofproto *ofproto_, char **devnamep)
3141 {
3142     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3143
3144     if (ofproto->port_poll_errno) {
3145         int error = ofproto->port_poll_errno;
3146         ofproto->port_poll_errno = 0;
3147         return error;
3148     }
3149
3150     if (sset_is_empty(&ofproto->port_poll_set)) {
3151         return EAGAIN;
3152     }
3153
3154     *devnamep = sset_pop(&ofproto->port_poll_set);
3155     return 0;
3156 }
3157
3158 static void
3159 port_poll_wait(const struct ofproto *ofproto_)
3160 {
3161     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
3162     dpif_port_poll_wait(ofproto->backer->dpif);
3163 }
3164
3165 static int
3166 port_is_lacp_current(const struct ofport *ofport_)
3167 {
3168     const struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
3169     return (ofport->bundle && ofport->bundle->lacp
3170             ? lacp_slave_is_current(ofport->bundle->lacp, ofport)
3171             : -1);
3172 }
3173 \f
3174 /* Upcall handling. */
3175
3176 struct flow_miss_op {
3177     struct dpif_op dpif_op;
3178
3179     uint64_t slow_stub[128 / 8]; /* Buffer for compose_slow_path() */
3180     struct xlate_out xout;
3181     bool xout_garbage;           /* 'xout' needs to be uninitialized? */
3182
3183     struct ofpbuf mask;          /* Flow mask for "put" ops. */
3184     struct odputil_keybuf maskbuf;
3185
3186     /* If this is a "put" op, then a pointer to the subfacet that should
3187      * be marked as uninstalled if the operation fails. */
3188     struct subfacet *subfacet;
3189 };
3190
3191 /* Figures out whether a flow that missed in 'ofproto', whose details are in
3192  * 'miss' masked by 'wc', is likely to be worth tracking in detail in userspace
3193  * and (usually) installing a datapath flow.  The answer is usually "yes" (a
3194  * return value of true).  However, for short flows the cost of bookkeeping is
3195  * much higher than the benefits, so when the datapath holds a large number of
3196  * flows we impose some heuristics to decide which flows are likely to be worth
3197  * tracking. */
3198 static bool
3199 flow_miss_should_make_facet(struct flow_miss *miss)
3200 {
3201     struct dpif_backer *backer = miss->ofproto->backer;
3202     uint32_t hash;
3203
3204     switch (flow_miss_model) {
3205     case OFPROTO_HANDLE_MISS_AUTO:
3206         break;
3207     case OFPROTO_HANDLE_MISS_WITH_FACETS:
3208         return true;
3209     case OFPROTO_HANDLE_MISS_WITHOUT_FACETS:
3210         return false;
3211     }
3212
3213     if (!backer->governor) {
3214         size_t n_subfacets;
3215
3216         n_subfacets = hmap_count(&backer->subfacets);
3217         if (n_subfacets * 2 <= flow_eviction_threshold) {
3218             return true;
3219         }
3220
3221         backer->governor = governor_create();
3222     }
3223
3224     hash = flow_hash_in_wildcards(&miss->flow, &miss->xout.wc, 0);
3225     return governor_should_install_flow(backer->governor, hash,
3226                                         miss->stats.n_packets);
3227 }
3228
3229 /* Handles 'miss', which matches 'facet'.  May add any required datapath
3230  * operations to 'ops', incrementing '*n_ops' for each new op.
3231  *
3232  * All of the packets in 'miss' are considered to have arrived at time
3233  * 'miss->stats.used'.  This is really important only for new facets: if we
3234  * just called time_msec() here, then the new subfacet or its packets could
3235  * look (occasionally) as though it was used some time after the facet was
3236  * used.  That can make a one-packet flow look like it has a nonzero duration,
3237  * which looks odd in e.g. NetFlow statistics. */
3238 static void
3239 handle_flow_miss_with_facet(struct flow_miss *miss, struct facet *facet,
3240                             struct flow_miss_op *ops, size_t *n_ops)
3241 {
3242     enum subfacet_path want_path;
3243     struct subfacet *subfacet;
3244     uint32_t key_hash;
3245
3246     /* Update facet stats. */
3247     facet->packet_count += miss->stats.n_packets;
3248     facet->prev_packet_count += miss->stats.n_packets;
3249     facet->byte_count += miss->stats.n_bytes;
3250     facet->prev_byte_count += miss->stats.n_bytes;
3251
3252     /* Look for an existing subfacet.  If we find one, update its used time. */
3253     key_hash = odp_flow_key_hash(miss->key, miss->key_len);
3254     if (!list_is_empty(&facet->subfacets)) {
3255         subfacet = subfacet_find(miss->ofproto->backer,
3256                                  miss->key, miss->key_len, key_hash);
3257         if (subfacet) {
3258             if (subfacet->facet == facet) {
3259                 subfacet->used = MAX(subfacet->used, miss->stats.used);
3260             } else {
3261                 /* This shouldn't happen. */
3262                 VLOG_ERR_RL(&rl, "subfacet with wrong facet");
3263                 subfacet_destroy(subfacet);
3264                 subfacet = NULL;
3265             }
3266         }
3267     } else {
3268         subfacet = NULL;
3269     }
3270
3271     /* Don't install the flow if it's the result of the "userspace"
3272      * action for an already installed facet.  This can occur when a
3273      * datapath flow with wildcards has a "userspace" action and flows
3274      * sent to userspace result in a different subfacet, which will then
3275      * be rejected as overlapping by the datapath. */
3276     if (miss->upcall_type == DPIF_UC_ACTION
3277         && !list_is_empty(&facet->subfacets)) {
3278         return;
3279     }
3280
3281     /* Create a subfacet, if we don't already have one. */
3282     if (!subfacet) {
3283         subfacet = subfacet_create(facet, miss, key_hash);
3284     }
3285
3286     /* Install the subfacet, if it's not already installed. */
3287     want_path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
3288     if (subfacet->path != want_path) {
3289         struct flow_miss_op *op = &ops[(*n_ops)++];
3290         struct dpif_flow_put *put = &op->dpif_op.u.flow_put;
3291
3292         subfacet->path = want_path;
3293
3294         ofpbuf_use_stack(&op->mask, &op->maskbuf, sizeof op->maskbuf);
3295         if (enable_megaflows) {
3296             odp_flow_key_from_mask(&op->mask, &facet->xout.wc.masks,
3297                                    &miss->flow, UINT32_MAX);
3298         }
3299
3300         op->xout_garbage = false;
3301         op->dpif_op.type = DPIF_OP_FLOW_PUT;
3302         op->subfacet = subfacet;
3303         put->flags = DPIF_FP_CREATE;
3304         put->key = miss->key;
3305         put->key_len = miss->key_len;
3306         put->mask = op->mask.data;
3307         put->mask_len = op->mask.size;
3308
3309         if (want_path == SF_FAST_PATH) {
3310             put->actions = facet->xout.odp_actions.data;
3311             put->actions_len = facet->xout.odp_actions.size;
3312         } else {
3313             compose_slow_path(facet->ofproto, &miss->flow, facet->xout.slow,
3314                               op->slow_stub, sizeof op->slow_stub,
3315                               &put->actions, &put->actions_len);
3316         }
3317         put->stats = NULL;
3318     }
3319 }
3320
3321 /* Handles flow miss 'miss'.  May add any required datapath operations
3322  * to 'ops', incrementing '*n_ops' for each new op. */
3323 static void
3324 handle_flow_miss(struct flow_miss *miss, struct flow_miss_op *ops,
3325                  size_t *n_ops)
3326 {
3327     struct facet *facet;
3328
3329     miss->ofproto->n_missed += miss->stats.n_packets;
3330
3331     facet = facet_lookup_valid(miss->ofproto, &miss->flow);
3332     if (!facet) {
3333         /* There does not exist a bijection between 'struct flow' and datapath
3334          * flow keys with fitness ODP_FIT_TO_LITTLE.  This breaks a fundamental
3335          * assumption used throughout the facet and subfacet handling code.
3336          * Since we have to handle these misses in userspace anyway, we simply
3337          * skip facet creation, avoiding the problem altogether. */
3338         if (miss->key_fitness == ODP_FIT_TOO_LITTLE
3339             || !flow_miss_should_make_facet(miss)) {
3340             return;
3341         }
3342
3343         facet = facet_create(miss);
3344     }
3345     handle_flow_miss_with_facet(miss, facet, ops, n_ops);
3346 }
3347
3348 static struct drop_key *
3349 drop_key_lookup(const struct dpif_backer *backer, const struct nlattr *key,
3350                 size_t key_len)
3351 {
3352     struct drop_key *drop_key;
3353
3354     HMAP_FOR_EACH_WITH_HASH (drop_key, hmap_node, hash_bytes(key, key_len, 0),
3355                              &backer->drop_keys) {
3356         if (drop_key->key_len == key_len
3357             && !memcmp(drop_key->key, key, key_len)) {
3358             return drop_key;
3359         }
3360     }
3361     return NULL;
3362 }
3363
3364 static void
3365 drop_key_clear(struct dpif_backer *backer)
3366 {
3367     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
3368     struct drop_key *drop_key, *next;
3369
3370     HMAP_FOR_EACH_SAFE (drop_key, next, hmap_node, &backer->drop_keys) {
3371         int error;
3372
3373         error = dpif_flow_del(backer->dpif, drop_key->key, drop_key->key_len,
3374                               NULL);
3375         if (error && !VLOG_DROP_WARN(&rl)) {
3376             struct ds ds = DS_EMPTY_INITIALIZER;
3377             odp_flow_key_format(drop_key->key, drop_key->key_len, &ds);
3378             VLOG_WARN("Failed to delete drop key (%s) (%s)",
3379                       ovs_strerror(error), ds_cstr(&ds));
3380             ds_destroy(&ds);
3381         }
3382
3383         hmap_remove(&backer->drop_keys, &drop_key->hmap_node);
3384         drop_key_destroy(drop_key);
3385     }
3386
3387     udpif_drop_key_clear(backer->udpif);
3388 }
3389
3390 static void
3391 handle_flow_misses(struct dpif_backer *backer, struct flow_miss_batch *fmb)
3392 {
3393     struct flow_miss_op flow_miss_ops[FLOW_MISS_MAX_BATCH];
3394     struct dpif_op *dpif_ops[FLOW_MISS_MAX_BATCH];
3395     struct flow_miss *miss;
3396     size_t n_ops, i;
3397
3398     /* Process each element in the to-do list, constructing the set of
3399      * operations to batch. */
3400     n_ops = 0;
3401     HMAP_FOR_EACH (miss, hmap_node, &fmb->misses) {
3402         handle_flow_miss(miss, flow_miss_ops, &n_ops);
3403     }
3404     ovs_assert(n_ops <= ARRAY_SIZE(flow_miss_ops));
3405
3406     /* Execute batch. */
3407     for (i = 0; i < n_ops; i++) {
3408         dpif_ops[i] = &flow_miss_ops[i].dpif_op;
3409     }
3410     dpif_operate(backer->dpif, dpif_ops, n_ops);
3411
3412     for (i = 0; i < n_ops; i++) {
3413         if (dpif_ops[i]->error != 0
3414             && flow_miss_ops[i].dpif_op.type == DPIF_OP_FLOW_PUT
3415             && flow_miss_ops[i].subfacet) {
3416             struct subfacet *subfacet = flow_miss_ops[i].subfacet;
3417
3418             COVERAGE_INC(subfacet_install_fail);
3419
3420             /* Zero-out subfacet counters when installation failed, but
3421              * datapath reported hits.  This should not happen and
3422              * indicates a bug, since if the datapath flow exists, we
3423              * should not be attempting to create a new subfacet.  A
3424              * buggy datapath could trigger this, so just zero out the
3425              * counters and log an error. */
3426             if (subfacet->dp_packet_count || subfacet->dp_byte_count) {
3427                 VLOG_ERR_RL(&rl, "failed to install subfacet for which "
3428                             "datapath reported hits");
3429                 subfacet->dp_packet_count = subfacet->dp_byte_count = 0;
3430             }
3431
3432             subfacet->path = SF_NOT_INSTALLED;
3433         }
3434     }
3435 }
3436
3437 static void
3438 handle_upcalls(struct dpif_backer *backer)
3439 {
3440     struct flow_miss_batch *fmb;
3441     int n_processed;
3442
3443     for (n_processed = 0; n_processed < FLOW_MISS_MAX_BATCH; n_processed++) {
3444         struct drop_key *drop_key = drop_key_next(backer->udpif);
3445         if (!drop_key) {
3446             break;
3447         }
3448
3449         if (!drop_key_lookup(backer, drop_key->key, drop_key->key_len)) {
3450             hmap_insert(&backer->drop_keys, &drop_key->hmap_node,
3451                         hash_bytes(drop_key->key, drop_key->key_len, 0));
3452             dpif_flow_put(backer->dpif, DPIF_FP_CREATE | DPIF_FP_MODIFY,
3453                           drop_key->key, drop_key->key_len,
3454                           NULL, 0, NULL, 0, NULL);
3455         } else {
3456             drop_key_destroy(drop_key);
3457         }
3458     }
3459
3460     fmb = flow_miss_batch_next(backer->udpif);
3461     if (fmb) {
3462         handle_flow_misses(backer, fmb);
3463         flow_miss_batch_destroy(fmb);
3464     }
3465 }
3466 \f
3467 /* Flow expiration. */
3468
3469 static int subfacet_max_idle(const struct dpif_backer *);
3470 static void update_stats(struct dpif_backer *);
3471 static void rule_expire(struct rule_dpif *) OVS_REQUIRES(ofproto_mutex);
3472 static void expire_subfacets(struct dpif_backer *, int dp_max_idle);
3473
3474 /* This function is called periodically by run().  Its job is to collect
3475  * updates for the flows that have been installed into the datapath, most
3476  * importantly when they last were used, and then use that information to
3477  * expire flows that have not been used recently.
3478  *
3479  * Returns the number of milliseconds after which it should be called again. */
3480 static int
3481 expire(struct dpif_backer *backer)
3482 {
3483     struct ofproto_dpif *ofproto;
3484     size_t n_subfacets;
3485     int max_idle;
3486
3487     /* Periodically clear out the drop keys in an effort to keep them
3488      * relatively few. */
3489     drop_key_clear(backer);
3490
3491     /* Update stats for each flow in the backer. */
3492     update_stats(backer);
3493
3494     n_subfacets = hmap_count(&backer->subfacets);
3495     if (n_subfacets) {
3496         struct subfacet *subfacet;
3497         long long int total, now;
3498
3499         total = 0;
3500         now = time_msec();
3501         HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) {
3502             total += now - subfacet->created;
3503         }
3504         backer->avg_subfacet_life += total / n_subfacets;
3505     }
3506     backer->avg_subfacet_life /= 2;
3507
3508     backer->avg_n_subfacet += n_subfacets;
3509     backer->avg_n_subfacet /= 2;
3510
3511     backer->max_n_subfacet = MAX(backer->max_n_subfacet, n_subfacets);
3512
3513     max_idle = subfacet_max_idle(backer);
3514     expire_subfacets(backer, max_idle);
3515
3516     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
3517         struct rule *rule, *next_rule;
3518
3519         if (ofproto->backer != backer) {
3520             continue;
3521         }
3522
3523         /* Expire OpenFlow flows whose idle_timeout or hard_timeout
3524          * has passed. */
3525         ovs_mutex_lock(&ofproto_mutex);
3526         LIST_FOR_EACH_SAFE (rule, next_rule, expirable,
3527                             &ofproto->up.expirable) {
3528             rule_expire(rule_dpif_cast(rule));
3529         }
3530         ovs_mutex_unlock(&ofproto_mutex);
3531
3532         /* All outstanding data in existing flows has been accounted, so it's a
3533          * good time to do bond rebalancing. */
3534         if (ofproto->has_bonded_bundles) {
3535             struct ofbundle *bundle;
3536
3537             HMAP_FOR_EACH (bundle, hmap_node, &ofproto->bundles) {
3538                 if (bundle->bond) {
3539                     bond_rebalance(bundle->bond);
3540                 }
3541             }
3542         }
3543     }
3544
3545     return MIN(max_idle, 1000);
3546 }
3547
3548 /* Updates flow table statistics given that the datapath just reported 'stats'
3549  * as 'subfacet''s statistics. */
3550 static void
3551 update_subfacet_stats(struct subfacet *subfacet,
3552                       const struct dpif_flow_stats *stats)
3553 {
3554     struct facet *facet = subfacet->facet;
3555     struct dpif_flow_stats diff;
3556
3557     diff.tcp_flags = stats->tcp_flags;
3558     diff.used = stats->used;
3559
3560     if (stats->n_packets >= subfacet->dp_packet_count) {
3561         diff.n_packets = stats->n_packets - subfacet->dp_packet_count;
3562     } else {
3563         VLOG_WARN_RL(&rl, "unexpected packet count from the datapath");
3564         diff.n_packets = 0;
3565     }
3566
3567     if (stats->n_bytes >= subfacet->dp_byte_count) {
3568         diff.n_bytes = stats->n_bytes - subfacet->dp_byte_count;
3569     } else {
3570         VLOG_WARN_RL(&rl, "unexpected byte count from datapath");
3571         diff.n_bytes = 0;
3572     }
3573
3574     facet->ofproto->n_hit += diff.n_packets;
3575     subfacet->dp_packet_count = stats->n_packets;
3576     subfacet->dp_byte_count = stats->n_bytes;
3577     subfacet_update_stats(subfacet, &diff);
3578
3579     if (facet->accounted_bytes < facet->byte_count) {
3580         facet_learn(facet);
3581         facet_account(facet);
3582         facet->accounted_bytes = facet->byte_count;
3583     }
3584 }
3585
3586 /* 'key' with length 'key_len' bytes is a flow in 'dpif' that we know nothing
3587  * about, or a flow that shouldn't be installed but was anyway.  Delete it. */
3588 static void
3589 delete_unexpected_flow(struct dpif_backer *backer,
3590                        const struct nlattr *key, size_t key_len)
3591 {
3592     if (!VLOG_DROP_WARN(&rl)) {
3593         struct ds s;
3594
3595         ds_init(&s);
3596         odp_flow_key_format(key, key_len, &s);
3597         VLOG_WARN("unexpected flow: %s", ds_cstr(&s));
3598         ds_destroy(&s);
3599     }
3600
3601     COVERAGE_INC(facet_unexpected);
3602     dpif_flow_del(backer->dpif, key, key_len, NULL);
3603 }
3604
3605 /* Update 'packet_count', 'byte_count', and 'used' members of installed facets.
3606  *
3607  * This function also pushes statistics updates to rules which each facet
3608  * resubmits into.  Generally these statistics will be accurate.  However, if a
3609  * facet changes the rule it resubmits into at some time in between
3610  * update_stats() runs, it is possible that statistics accrued to the
3611  * old rule will be incorrectly attributed to the new rule.  This could be
3612  * avoided by calling update_stats() whenever rules are created or
3613  * deleted.  However, the performance impact of making so many calls to the
3614  * datapath do not justify the benefit of having perfectly accurate statistics.
3615  *
3616  * In addition, this function maintains per ofproto flow hit counts. The patch
3617  * port is not treated specially. e.g. A packet ingress from br0 patched into
3618  * br1 will increase the hit count of br0 by 1, however, does not affect
3619  * the hit or miss counts of br1.
3620  */
3621 static void
3622 update_stats(struct dpif_backer *backer)
3623 {
3624     const struct dpif_flow_stats *stats;
3625     struct dpif_flow_dump dump;
3626     const struct nlattr *key, *mask;
3627     size_t key_len, mask_len;
3628
3629     dpif_flow_dump_start(&dump, backer->dpif);
3630     while (dpif_flow_dump_next(&dump, &key, &key_len,
3631                                &mask, &mask_len, NULL, NULL, &stats)) {
3632         struct subfacet *subfacet;
3633         uint32_t key_hash;
3634
3635         key_hash = odp_flow_key_hash(key, key_len);
3636         subfacet = subfacet_find(backer, key, key_len, key_hash);
3637         switch (subfacet ? subfacet->path : SF_NOT_INSTALLED) {
3638         case SF_FAST_PATH:
3639             update_subfacet_stats(subfacet, stats);
3640             break;
3641
3642         case SF_SLOW_PATH:
3643             /* Stats are updated per-packet. */
3644             break;
3645
3646         case SF_NOT_INSTALLED:
3647         default:
3648             delete_unexpected_flow(backer, key, key_len);
3649             break;
3650         }
3651         run_fast_rl();
3652     }
3653     dpif_flow_dump_done(&dump);
3654 }
3655
3656 /* Calculates and returns the number of milliseconds of idle time after which
3657  * subfacets should expire from the datapath.  When a subfacet expires, we fold
3658  * its statistics into its facet, and when a facet's last subfacet expires, we
3659  * fold its statistic into its rule. */
3660 static int
3661 subfacet_max_idle(const struct dpif_backer *backer)
3662 {
3663     /*
3664      * Idle time histogram.
3665      *
3666      * Most of the time a switch has a relatively small number of subfacets.
3667      * When this is the case we might as well keep statistics for all of them
3668      * in userspace and to cache them in the kernel datapath for performance as
3669      * well.
3670      *
3671      * As the number of subfacets increases, the memory required to maintain
3672      * statistics about them in userspace and in the kernel becomes
3673      * significant.  However, with a large number of subfacets it is likely
3674      * that only a few of them are "heavy hitters" that consume a large amount
3675      * of bandwidth.  At this point, only heavy hitters are worth caching in
3676      * the kernel and maintaining in userspaces; other subfacets we can
3677      * discard.
3678      *
3679      * The technique used to compute the idle time is to build a histogram with
3680      * N_BUCKETS buckets whose width is BUCKET_WIDTH msecs each.  Each subfacet
3681      * that is installed in the kernel gets dropped in the appropriate bucket.
3682      * After the histogram has been built, we compute the cutoff so that only
3683      * the most-recently-used 1% of subfacets (but at least
3684      * flow_eviction_threshold flows) are kept cached.  At least
3685      * the most-recently-used bucket of subfacets is kept, so actually an
3686      * arbitrary number of subfacets can be kept in any given expiration run
3687      * (though the next run will delete most of those unless they receive
3688      * additional data).
3689      *
3690      * This requires a second pass through the subfacets, in addition to the
3691      * pass made by update_stats(), because the former function never looks at
3692      * uninstallable subfacets.
3693      */
3694     enum { BUCKET_WIDTH = 100 };
3695     enum { N_BUCKETS = 5000 / BUCKET_WIDTH };
3696     int buckets[N_BUCKETS] = { 0 };
3697     int total, subtotal, bucket;
3698     struct subfacet *subfacet;
3699     long long int now;
3700     int i;
3701
3702     total = hmap_count(&backer->subfacets);
3703     if (total <= flow_eviction_threshold) {
3704         return N_BUCKETS * BUCKET_WIDTH;
3705     }
3706
3707     /* Build histogram. */
3708     now = time_msec();
3709     HMAP_FOR_EACH (subfacet, hmap_node, &backer->subfacets) {
3710         long long int idle = now - subfacet->used;
3711         int bucket = (idle <= 0 ? 0
3712                       : idle >= BUCKET_WIDTH * N_BUCKETS ? N_BUCKETS - 1
3713                       : (unsigned int) idle / BUCKET_WIDTH);
3714         buckets[bucket]++;
3715     }
3716
3717     /* Find the first bucket whose flows should be expired. */
3718     subtotal = bucket = 0;
3719     do {
3720         subtotal += buckets[bucket++];
3721     } while (bucket < N_BUCKETS &&
3722              subtotal < MAX(flow_eviction_threshold, total / 100));
3723
3724     if (VLOG_IS_DBG_ENABLED()) {
3725         struct ds s;
3726
3727         ds_init(&s);
3728         ds_put_cstr(&s, "keep");
3729         for (i = 0; i < N_BUCKETS; i++) {
3730             if (i == bucket) {
3731                 ds_put_cstr(&s, ", drop");
3732             }
3733             if (buckets[i]) {
3734                 ds_put_format(&s, " %d:%d", i * BUCKET_WIDTH, buckets[i]);
3735             }
3736         }
3737         VLOG_INFO("%s (msec:count)", ds_cstr(&s));
3738         ds_destroy(&s);
3739     }
3740
3741     return bucket * BUCKET_WIDTH;
3742 }
3743
3744 static void
3745 expire_subfacets(struct dpif_backer *backer, int dp_max_idle)
3746 {
3747     /* Cutoff time for most flows. */
3748     long long int normal_cutoff = time_msec() - dp_max_idle;
3749
3750     /* We really want to keep flows for special protocols around, so use a more
3751      * conservative cutoff. */
3752     long long int special_cutoff = time_msec() - 10000;
3753
3754     struct subfacet *subfacet, *next_subfacet;
3755     struct subfacet *batch[SUBFACET_DESTROY_MAX_BATCH];
3756     int n_batch;
3757
3758     n_batch = 0;
3759     HMAP_FOR_EACH_SAFE (subfacet, next_subfacet, hmap_node,
3760                         &backer->subfacets) {
3761         long long int cutoff;
3762
3763         cutoff = (subfacet->facet->xout.slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP
3764                                                 | SLOW_STP)
3765                   ? special_cutoff
3766                   : normal_cutoff);
3767         if (subfacet->used < cutoff) {
3768             if (subfacet->path != SF_NOT_INSTALLED) {
3769                 batch[n_batch++] = subfacet;
3770                 if (n_batch >= SUBFACET_DESTROY_MAX_BATCH) {
3771                     subfacet_destroy_batch(backer, batch, n_batch);
3772                     n_batch = 0;
3773                 }
3774             } else {
3775                 subfacet_destroy(subfacet);
3776             }
3777         }
3778     }
3779
3780     if (n_batch > 0) {
3781         subfacet_destroy_batch(backer, batch, n_batch);
3782     }
3783 }
3784
3785 /* If 'rule' is an OpenFlow rule, that has expired according to OpenFlow rules,
3786  * then delete it entirely. */
3787 static void
3788 rule_expire(struct rule_dpif *rule)
3789     OVS_REQUIRES(ofproto_mutex)
3790 {
3791     uint16_t idle_timeout, hard_timeout;
3792     long long int now = time_msec();
3793     int reason;
3794
3795     ovs_assert(!rule->up.pending);
3796
3797     /* Has 'rule' expired? */
3798     ovs_mutex_lock(&rule->up.mutex);
3799     hard_timeout = rule->up.hard_timeout;
3800     idle_timeout = rule->up.idle_timeout;
3801     if (hard_timeout && now > rule->up.modified + hard_timeout * 1000) {
3802         reason = OFPRR_HARD_TIMEOUT;
3803     } else if (idle_timeout && now > rule->up.used + idle_timeout * 1000) {
3804         reason = OFPRR_IDLE_TIMEOUT;
3805     } else {
3806         reason = -1;
3807     }
3808     ovs_mutex_unlock(&rule->up.mutex);
3809
3810     if (reason >= 0) {
3811         COVERAGE_INC(ofproto_dpif_expired);
3812         ofproto_rule_expire(&rule->up, reason);
3813     }
3814 }
3815 \f
3816 /* Facets. */
3817
3818 /* Creates and returns a new facet based on 'miss'.
3819  *
3820  * The caller must already have determined that no facet with an identical
3821  * 'miss->flow' exists in 'miss->ofproto'.
3822  *
3823  * 'rule' and 'xout' must have been created based on 'miss'.
3824  *
3825  * 'facet'' statistics are initialized based on 'stats'.
3826  *
3827  * The facet will initially have no subfacets.  The caller should create (at
3828  * least) one subfacet with subfacet_create(). */
3829 static struct facet *
3830 facet_create(const struct flow_miss *miss)
3831 {
3832     struct ofproto_dpif *ofproto = miss->ofproto;
3833     struct facet *facet;
3834     struct match match;
3835
3836     COVERAGE_INC(facet_create);
3837     facet = xzalloc(sizeof *facet);
3838     facet->ofproto = miss->ofproto;
3839     facet->used = miss->stats.used;
3840     facet->flow = miss->flow;
3841     facet->learn_rl = time_msec() + 500;
3842
3843     list_init(&facet->subfacets);
3844     netflow_flow_init(&facet->nf_flow);
3845     netflow_flow_update_time(ofproto->netflow, &facet->nf_flow, facet->used);
3846
3847     xlate_out_copy(&facet->xout, &miss->xout);
3848
3849     match_init(&match, &facet->flow, &facet->xout.wc);
3850     cls_rule_init(&facet->cr, &match, OFP_DEFAULT_PRIORITY);
3851     ovs_rwlock_wrlock(&ofproto->facets.rwlock);
3852     classifier_insert(&ofproto->facets, &facet->cr);
3853     ovs_rwlock_unlock(&ofproto->facets.rwlock);
3854
3855     facet->nf_flow.output_iface = facet->xout.nf_output_iface;
3856     return facet;
3857 }
3858
3859 static void
3860 facet_free(struct facet *facet)
3861 {
3862     if (facet) {
3863         xlate_out_uninit(&facet->xout);
3864         free(facet);
3865     }
3866 }
3867
3868 /* Executes, within 'ofproto', the actions in 'rule' or 'ofpacts' on 'packet'.
3869  * 'flow' must reflect the data in 'packet'. */
3870 int
3871 ofproto_dpif_execute_actions(struct ofproto_dpif *ofproto,
3872                              const struct flow *flow,
3873                              struct rule_dpif *rule,
3874                              const struct ofpact *ofpacts, size_t ofpacts_len,
3875                              struct ofpbuf *packet)
3876 {
3877     struct odputil_keybuf keybuf;
3878     struct dpif_flow_stats stats;
3879     struct xlate_out xout;
3880     struct xlate_in xin;
3881     ofp_port_t in_port;
3882     struct ofpbuf key;
3883     int error;
3884
3885     ovs_assert((rule != NULL) != (ofpacts != NULL));
3886
3887     dpif_flow_stats_extract(flow, packet, time_msec(), &stats);
3888     if (rule) {
3889         rule_dpif_credit_stats(rule, &stats);
3890     }
3891
3892     xlate_in_init(&xin, ofproto, flow, rule, stats.tcp_flags, packet);
3893     xin.ofpacts = ofpacts;
3894     xin.ofpacts_len = ofpacts_len;
3895     xin.resubmit_stats = &stats;
3896     xlate_actions(&xin, &xout);
3897
3898     ofpbuf_use_stack(&key, &keybuf, sizeof keybuf);
3899     in_port = flow->in_port.ofp_port;
3900     if (in_port == OFPP_NONE) {
3901         in_port = OFPP_LOCAL;
3902     }
3903     odp_flow_key_from_flow(&key, flow, ofp_port_to_odp_port(ofproto, in_port));
3904
3905     error = dpif_execute(ofproto->backer->dpif, key.data, key.size,
3906                          xout.odp_actions.data, xout.odp_actions.size, packet,
3907                          (xout.slow & SLOW_ACTION) != 0);
3908     xlate_out_uninit(&xout);
3909
3910     return error;
3911 }
3912
3913 /* Remove 'facet' from its ofproto and free up the associated memory:
3914  *
3915  *   - If 'facet' was installed in the datapath, uninstalls it and updates its
3916  *     rule's statistics, via subfacet_uninstall().
3917  *
3918  *   - Removes 'facet' from its rule and from ofproto->facets.
3919  */
3920 static void
3921 facet_remove(struct facet *facet)
3922 {
3923     struct subfacet *subfacet, *next_subfacet;
3924
3925     COVERAGE_INC(facet_remove);
3926     ovs_assert(!list_is_empty(&facet->subfacets));
3927
3928     /* First uninstall all of the subfacets to get final statistics. */
3929     LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
3930         subfacet_uninstall(subfacet);
3931     }
3932
3933     /* Flush the final stats to the rule.
3934      *
3935      * This might require us to have at least one subfacet around so that we
3936      * can use its actions for accounting in facet_account(), which is why we
3937      * have uninstalled but not yet destroyed the subfacets. */
3938     facet_flush_stats(facet);
3939
3940     /* Now we're really all done so destroy everything. */
3941     LIST_FOR_EACH_SAFE (subfacet, next_subfacet, list_node,
3942                         &facet->subfacets) {
3943         subfacet_destroy__(subfacet);
3944     }
3945     ovs_rwlock_wrlock(&facet->ofproto->facets.rwlock);
3946     classifier_remove(&facet->ofproto->facets, &facet->cr);
3947     ovs_rwlock_unlock(&facet->ofproto->facets.rwlock);
3948     cls_rule_destroy(&facet->cr);
3949     facet_free(facet);
3950 }
3951
3952 /* Feed information from 'facet' back into the learning table to keep it in
3953  * sync with what is actually flowing through the datapath. */
3954 static void
3955 facet_learn(struct facet *facet)
3956 {
3957     long long int now = time_msec();
3958
3959     if (!facet->xout.has_fin_timeout && now < facet->learn_rl) {
3960         return;
3961     }
3962
3963     facet->learn_rl = now + 500;
3964
3965     if (!facet->xout.has_learn
3966         && !facet->xout.has_normal
3967         && (!facet->xout.has_fin_timeout
3968             || !(facet->tcp_flags & (TCP_FIN | TCP_RST)))) {
3969         return;
3970     }
3971
3972     facet_push_stats(facet, true);
3973 }
3974
3975 static void
3976 facet_account(struct facet *facet)
3977 {
3978     const struct nlattr *a;
3979     unsigned int left;
3980     ovs_be16 vlan_tci;
3981     uint64_t n_bytes;
3982
3983     if (!facet->xout.has_normal || !facet->ofproto->has_bonded_bundles) {
3984         return;
3985     }
3986     n_bytes = facet->byte_count - facet->accounted_bytes;
3987
3988     /* This loop feeds byte counters to bond_account() for rebalancing to use
3989      * as a basis.  We also need to track the actual VLAN on which the packet
3990      * is going to be sent to ensure that it matches the one passed to
3991      * bond_choose_output_slave().  (Otherwise, we will account to the wrong
3992      * hash bucket.)
3993      *
3994      * We use the actions from an arbitrary subfacet because they should all
3995      * be equally valid for our purpose. */
3996     vlan_tci = facet->flow.vlan_tci;
3997     NL_ATTR_FOR_EACH_UNSAFE (a, left, facet->xout.odp_actions.data,
3998                              facet->xout.odp_actions.size) {
3999         const struct ovs_action_push_vlan *vlan;
4000         struct ofport_dpif *port;
4001
4002         switch (nl_attr_type(a)) {
4003         case OVS_ACTION_ATTR_OUTPUT:
4004             port = get_odp_port(facet->ofproto, nl_attr_get_odp_port(a));
4005             if (port && port->bundle && port->bundle->bond) {
4006                 bond_account(port->bundle->bond, &facet->flow,
4007                              vlan_tci_to_vid(vlan_tci), n_bytes);
4008             }
4009             break;
4010
4011         case OVS_ACTION_ATTR_POP_VLAN:
4012             vlan_tci = htons(0);
4013             break;
4014
4015         case OVS_ACTION_ATTR_PUSH_VLAN:
4016             vlan = nl_attr_get(a);
4017             vlan_tci = vlan->vlan_tci;
4018             break;
4019         }
4020     }
4021 }
4022
4023 /* Returns true if the only action for 'facet' is to send to the controller.
4024  * (We don't report NetFlow expiration messages for such facets because they
4025  * are just part of the control logic for the network, not real traffic). */
4026 static bool
4027 facet_is_controller_flow(struct facet *facet)
4028 {
4029     if (facet) {
4030         struct ofproto_dpif *ofproto = facet->ofproto;
4031         const struct ofpact *ofpacts;
4032         struct rule_actions *actions;
4033         struct rule_dpif *rule;
4034         size_t ofpacts_len;
4035         bool is_controller;
4036
4037         rule_dpif_lookup(ofproto, &facet->flow, NULL, &rule);
4038         actions = rule_dpif_get_actions(rule);
4039         rule_dpif_unref(rule);
4040
4041         ofpacts_len = actions->ofpacts_len;
4042         ofpacts = actions->ofpacts;
4043         is_controller = ofpacts_len > 0
4044             && ofpacts->type == OFPACT_CONTROLLER
4045             && ofpact_next(ofpacts) >= ofpact_end(ofpacts, ofpacts_len);
4046         rule_actions_unref(actions);
4047
4048         return is_controller;
4049     }
4050     return false;
4051 }
4052
4053 /* Folds all of 'facet''s statistics into its rule.  Also updates the
4054  * accounting ofhook and emits a NetFlow expiration if appropriate.  All of
4055  * 'facet''s statistics in the datapath should have been zeroed and folded into
4056  * its packet and byte counts before this function is called. */
4057 static void
4058 facet_flush_stats(struct facet *facet)
4059 {
4060     struct ofproto_dpif *ofproto = facet->ofproto;
4061     struct subfacet *subfacet;
4062
4063     LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4064         ovs_assert(!subfacet->dp_byte_count);
4065         ovs_assert(!subfacet->dp_packet_count);
4066     }
4067
4068     facet_push_stats(facet, false);
4069     if (facet->accounted_bytes < facet->byte_count) {
4070         facet_account(facet);
4071         facet->accounted_bytes = facet->byte_count;
4072     }
4073
4074     if (ofproto->netflow && !facet_is_controller_flow(facet)) {
4075         struct ofexpired expired;
4076         expired.flow = facet->flow;
4077         expired.packet_count = facet->packet_count;
4078         expired.byte_count = facet->byte_count;
4079         expired.used = facet->used;
4080         netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
4081     }
4082
4083     /* Reset counters to prevent double counting if 'facet' ever gets
4084      * reinstalled. */
4085     facet_reset_counters(facet);
4086
4087     netflow_flow_clear(&facet->nf_flow);
4088     facet->tcp_flags = 0;
4089 }
4090
4091 /* Searches 'ofproto''s table of facets for one which would be responsible for
4092  * 'flow'.  Returns it if found, otherwise a null pointer.
4093  *
4094  * The returned facet might need revalidation; use facet_lookup_valid()
4095  * instead if that is important. */
4096 static struct facet *
4097 facet_find(struct ofproto_dpif *ofproto, const struct flow *flow)
4098 {
4099     struct cls_rule *cr;
4100
4101     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
4102     cr = classifier_lookup(&ofproto->facets, flow, NULL);
4103     ovs_rwlock_unlock(&ofproto->facets.rwlock);
4104     return cr ? CONTAINER_OF(cr, struct facet, cr) : NULL;
4105 }
4106
4107 /* Searches 'ofproto''s table of facets for one capable that covers
4108  * 'flow'.  Returns it if found, otherwise a null pointer.
4109  *
4110  * The returned facet is guaranteed to be valid. */
4111 static struct facet *
4112 facet_lookup_valid(struct ofproto_dpif *ofproto, const struct flow *flow)
4113 {
4114     struct facet *facet;
4115
4116     facet = facet_find(ofproto, flow);
4117     if (facet
4118         && ofproto->backer->need_revalidate
4119         && !facet_revalidate(facet)) {
4120         return NULL;
4121     }
4122
4123     return facet;
4124 }
4125
4126 static bool
4127 facet_check_consistency(struct facet *facet)
4128 {
4129     static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 15);
4130
4131     struct xlate_out xout;
4132     struct xlate_in xin;
4133     bool ok;
4134
4135     /* Check the datapath actions for consistency. */
4136     xlate_in_init(&xin, facet->ofproto, &facet->flow, NULL, 0, NULL);
4137     xlate_actions(&xin, &xout);
4138
4139     ok = ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)
4140         && facet->xout.slow == xout.slow;
4141     if (!ok && !VLOG_DROP_WARN(&rl)) {
4142         struct ds s = DS_EMPTY_INITIALIZER;
4143
4144         flow_format(&s, &facet->flow);
4145         ds_put_cstr(&s, ": inconsistency in facet");
4146
4147         if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
4148             ds_put_cstr(&s, " (actions were: ");
4149             format_odp_actions(&s, facet->xout.odp_actions.data,
4150                                facet->xout.odp_actions.size);
4151             ds_put_cstr(&s, ") (correct actions: ");
4152             format_odp_actions(&s, xout.odp_actions.data,
4153                                xout.odp_actions.size);
4154             ds_put_char(&s, ')');
4155         }
4156
4157         if (facet->xout.slow != xout.slow) {
4158             ds_put_format(&s, " slow path incorrect. should be %d", xout.slow);
4159         }
4160
4161         ds_destroy(&s);
4162     }
4163     xlate_out_uninit(&xout);
4164
4165     return ok;
4166 }
4167
4168 /* Re-searches the classifier for 'facet':
4169  *
4170  *   - If the rule found is different from 'facet''s current rule, moves
4171  *     'facet' to the new rule and recompiles its actions.
4172  *
4173  *   - If the rule found is the same as 'facet''s current rule, leaves 'facet'
4174  *     where it is and recompiles its actions anyway.
4175  *
4176  *   - If any of 'facet''s subfacets correspond to a new flow according to
4177  *     xlate_receive(), 'facet' is removed.
4178  *
4179  *   Returns true if 'facet' is still valid.  False if 'facet' was removed. */
4180 static bool
4181 facet_revalidate(struct facet *facet)
4182 {
4183     struct ofproto_dpif *ofproto = facet->ofproto;
4184     struct rule_dpif *new_rule;
4185     struct subfacet *subfacet;
4186     struct flow_wildcards wc;
4187     struct xlate_out xout;
4188     struct xlate_in xin;
4189
4190     COVERAGE_INC(facet_revalidate);
4191
4192     /* Check that child subfacets still correspond to this facet.  Tunnel
4193      * configuration changes could cause a subfacet's OpenFlow in_port to
4194      * change. */
4195     LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
4196         struct ofproto_dpif *recv_ofproto;
4197         struct flow recv_flow;
4198         int error;
4199
4200         error = xlate_receive(ofproto->backer, NULL, subfacet->key,
4201                               subfacet->key_len, &recv_flow, NULL,
4202                               &recv_ofproto, NULL);
4203         if (error
4204             || recv_ofproto != ofproto
4205             || facet != facet_find(ofproto, &recv_flow)) {
4206             facet_remove(facet);
4207             return false;
4208         }
4209     }
4210
4211     flow_wildcards_init_catchall(&wc);
4212     rule_dpif_lookup(ofproto, &facet->flow, &wc, &new_rule);
4213
4214     /* Calculate new datapath actions.
4215      *
4216      * We do not modify any 'facet' state yet, because we might need to, e.g.,
4217      * emit a NetFlow expiration and, if so, we need to have the old state
4218      * around to properly compose it. */
4219     xlate_in_init(&xin, ofproto, &facet->flow, new_rule, 0, NULL);
4220     xlate_actions(&xin, &xout);
4221     flow_wildcards_or(&xout.wc, &xout.wc, &wc);
4222
4223     /* A facet's slow path reason should only change under dramatic
4224      * circumstances.  Rather than try to update everything, it's simpler to
4225      * remove the facet and start over.
4226      *
4227      * More importantly, if a facet's wildcards change, it will be relatively
4228      * difficult to figure out if its subfacets still belong to it, and if not
4229      * which facet they may belong to.  Again, to avoid the complexity, we
4230      * simply give up instead. */
4231     if (facet->xout.slow != xout.slow
4232         || memcmp(&facet->xout.wc, &xout.wc, sizeof xout.wc)) {
4233         facet_remove(facet);
4234         xlate_out_uninit(&xout);
4235         rule_dpif_unref(new_rule);
4236         return false;
4237     }
4238
4239     if (!ofpbuf_equal(&facet->xout.odp_actions, &xout.odp_actions)) {
4240         LIST_FOR_EACH(subfacet, list_node, &facet->subfacets) {
4241             if (subfacet->path == SF_FAST_PATH) {
4242                 struct dpif_flow_stats stats;
4243
4244                 subfacet_install(subfacet, &xout.odp_actions, &stats);
4245                 subfacet_update_stats(subfacet, &stats);
4246             }
4247         }
4248
4249         facet_flush_stats(facet);
4250
4251         ofpbuf_clear(&facet->xout.odp_actions);
4252         ofpbuf_put(&facet->xout.odp_actions, xout.odp_actions.data,
4253                    xout.odp_actions.size);
4254     }
4255
4256     /* Update 'facet' now that we've taken care of all the old state. */
4257     facet->xout.slow = xout.slow;
4258     facet->xout.has_learn = xout.has_learn;
4259     facet->xout.has_normal = xout.has_normal;
4260     facet->xout.has_fin_timeout = xout.has_fin_timeout;
4261     facet->xout.nf_output_iface = xout.nf_output_iface;
4262     facet->xout.mirrors = xout.mirrors;
4263     facet->nf_flow.output_iface = facet->xout.nf_output_iface;
4264
4265     ovs_mutex_lock(&new_rule->up.mutex);
4266     facet->used = MAX(facet->used, new_rule->up.created);
4267     ovs_mutex_unlock(&new_rule->up.mutex);
4268
4269     xlate_out_uninit(&xout);
4270     rule_dpif_unref(new_rule);
4271     return true;
4272 }
4273
4274 static void
4275 facet_reset_counters(struct facet *facet)
4276 {
4277     facet->packet_count = 0;
4278     facet->byte_count = 0;
4279     facet->prev_packet_count = 0;
4280     facet->prev_byte_count = 0;
4281     facet->accounted_bytes = 0;
4282 }
4283
4284 static void
4285 flow_push_stats(struct ofproto_dpif *ofproto, struct flow *flow,
4286                 struct dpif_flow_stats *stats, bool may_learn)
4287 {
4288     struct ofport_dpif *in_port;
4289     struct xlate_in xin;
4290
4291     in_port = get_ofp_port(ofproto, flow->in_port.ofp_port);
4292     if (in_port && in_port->is_tunnel) {
4293         netdev_vport_inc_rx(in_port->up.netdev, stats);
4294         if (in_port->bfd) {
4295             bfd_account_rx(in_port->bfd, stats);
4296         }
4297     }
4298
4299     xlate_in_init(&xin, ofproto, flow, NULL, stats->tcp_flags, NULL);
4300     xin.resubmit_stats = stats;
4301     xin.may_learn = may_learn;
4302     xlate_actions_for_side_effects(&xin);
4303 }
4304
4305 static void
4306 facet_push_stats(struct facet *facet, bool may_learn)
4307 {
4308     struct dpif_flow_stats stats;
4309
4310     ovs_assert(facet->packet_count >= facet->prev_packet_count);
4311     ovs_assert(facet->byte_count >= facet->prev_byte_count);
4312     ovs_assert(facet->used >= facet->prev_used);
4313
4314     stats.n_packets = facet->packet_count - facet->prev_packet_count;
4315     stats.n_bytes = facet->byte_count - facet->prev_byte_count;
4316     stats.used = facet->used;
4317     stats.tcp_flags = facet->tcp_flags;
4318
4319     if (may_learn || stats.n_packets || facet->used > facet->prev_used) {
4320         facet->prev_packet_count = facet->packet_count;
4321         facet->prev_byte_count = facet->byte_count;
4322         facet->prev_used = facet->used;
4323
4324         netflow_flow_update_time(facet->ofproto->netflow, &facet->nf_flow,
4325                                  facet->used);
4326         netflow_flow_update_flags(&facet->nf_flow, facet->tcp_flags);
4327         mirror_update_stats(facet->ofproto->mbridge, facet->xout.mirrors,
4328                             stats.n_packets, stats.n_bytes);
4329         flow_push_stats(facet->ofproto, &facet->flow, &stats, may_learn);
4330     }
4331 }
4332
4333 static void
4334 push_all_stats__(bool run_fast)
4335 {
4336     static long long int rl = LLONG_MIN;
4337     struct ofproto_dpif *ofproto;
4338
4339     if (time_msec() < rl) {
4340         return;
4341     }
4342
4343     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
4344         struct cls_cursor cursor;
4345         struct facet *facet;
4346
4347         ovs_rwlock_rdlock(&ofproto->facets.rwlock);
4348         cls_cursor_init(&cursor, &ofproto->facets, NULL);
4349         CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
4350             facet_push_stats(facet, false);
4351             if (run_fast) {
4352                 run_fast_rl();
4353             }
4354         }
4355         ovs_rwlock_unlock(&ofproto->facets.rwlock);
4356     }
4357
4358     rl = time_msec() + 100;
4359 }
4360
4361 static void
4362 push_all_stats(void)
4363 {
4364     push_all_stats__(true);
4365 }
4366
4367 void
4368 rule_dpif_credit_stats(struct rule_dpif *rule,
4369                        const struct dpif_flow_stats *stats)
4370 {
4371     ovs_mutex_lock(&rule->stats_mutex);
4372     rule->packet_count += stats->n_packets;
4373     rule->byte_count += stats->n_bytes;
4374     rule->up.used = MAX(rule->up.used, stats->used);
4375     ovs_mutex_unlock(&rule->stats_mutex);
4376 }
4377
4378 bool
4379 rule_dpif_is_fail_open(const struct rule_dpif *rule)
4380 {
4381     return is_fail_open_rule(&rule->up);
4382 }
4383
4384 bool
4385 rule_dpif_is_table_miss(const struct rule_dpif *rule)
4386 {
4387     return rule_is_table_miss(&rule->up);
4388 }
4389
4390 ovs_be64
4391 rule_dpif_get_flow_cookie(const struct rule_dpif *rule)
4392     OVS_REQUIRES(rule->up.mutex)
4393 {
4394     return rule->up.flow_cookie;
4395 }
4396
4397 void
4398 rule_dpif_reduce_timeouts(struct rule_dpif *rule, uint16_t idle_timeout,
4399                      uint16_t hard_timeout)
4400 {
4401     ofproto_rule_reduce_timeouts(&rule->up, idle_timeout, hard_timeout);
4402 }
4403
4404 /* Returns 'rule''s actions.  The caller owns a reference on the returned
4405  * actions and must eventually release it (with rule_actions_unref()) to avoid
4406  * a memory leak. */
4407 struct rule_actions *
4408 rule_dpif_get_actions(const struct rule_dpif *rule)
4409 {
4410     return rule_get_actions(&rule->up);
4411 }
4412 \f
4413 /* Subfacets. */
4414
4415 static struct subfacet *
4416 subfacet_find(struct dpif_backer *backer, const struct nlattr *key,
4417               size_t key_len, uint32_t key_hash)
4418 {
4419     struct subfacet *subfacet;
4420
4421     HMAP_FOR_EACH_WITH_HASH (subfacet, hmap_node, key_hash,
4422                              &backer->subfacets) {
4423         if (subfacet->key_len == key_len
4424             && !memcmp(key, subfacet->key, key_len)) {
4425             return subfacet;
4426         }
4427     }
4428
4429     return NULL;
4430 }
4431
4432 /* Creates and returns a new subfacet within 'facet' for the flow in 'miss'.
4433  * 'key_hash' must be a hash over miss->key.  The caller must have already
4434  * ensured that no subfacet subfacet already exists. */
4435 static struct subfacet *
4436 subfacet_create(struct facet *facet, struct flow_miss *miss, uint32_t key_hash)
4437 {
4438     struct dpif_backer *backer = miss->ofproto->backer;
4439     const struct nlattr *key = miss->key;
4440     size_t key_len = miss->key_len;
4441     struct subfacet *subfacet;
4442
4443     subfacet = (list_is_empty(&facet->subfacets)
4444                 ? &facet->one_subfacet
4445                 : xmalloc(sizeof *subfacet));
4446
4447     COVERAGE_INC(subfacet_create);
4448     hmap_insert(&backer->subfacets, &subfacet->hmap_node, key_hash);
4449     list_push_back(&facet->subfacets, &subfacet->list_node);
4450     subfacet->facet = facet;
4451     subfacet->key = xmemdup(key, key_len);
4452     subfacet->key_len = key_len;
4453     subfacet->used = miss->stats.used;
4454     subfacet->created = subfacet->used;
4455     subfacet->dp_packet_count = 0;
4456     subfacet->dp_byte_count = 0;
4457     subfacet->path = SF_NOT_INSTALLED;
4458     subfacet->backer = backer;
4459
4460     return subfacet;
4461 }
4462
4463 /* Uninstalls 'subfacet' from the datapath, if it is installed, removes it from
4464  * its facet within 'ofproto', and frees it. */
4465 static void
4466 subfacet_destroy__(struct subfacet *subfacet)
4467 {
4468     struct facet *facet = subfacet->facet;
4469
4470     COVERAGE_INC(subfacet_destroy);
4471     subfacet_uninstall(subfacet);
4472     hmap_remove(&subfacet->backer->subfacets, &subfacet->hmap_node);
4473     list_remove(&subfacet->list_node);
4474     free(subfacet->key);
4475     if (subfacet != &facet->one_subfacet) {
4476         free(subfacet);
4477     }
4478 }
4479
4480 /* Destroys 'subfacet', as with subfacet_destroy__(), and then if this was the
4481  * last remaining subfacet in its facet destroys the facet too. */
4482 static void
4483 subfacet_destroy(struct subfacet *subfacet)
4484 {
4485     struct facet *facet = subfacet->facet;
4486
4487     if (list_is_singleton(&facet->subfacets)) {
4488         /* facet_remove() needs at least one subfacet (it will remove it). */
4489         facet_remove(facet);
4490     } else {
4491         subfacet_destroy__(subfacet);
4492     }
4493 }
4494
4495 static void
4496 subfacet_destroy_batch(struct dpif_backer *backer,
4497                        struct subfacet **subfacets, int n)
4498 {
4499     struct dpif_op ops[SUBFACET_DESTROY_MAX_BATCH];
4500     struct dpif_op *opsp[SUBFACET_DESTROY_MAX_BATCH];
4501     struct dpif_flow_stats stats[SUBFACET_DESTROY_MAX_BATCH];
4502     int i;
4503
4504     for (i = 0; i < n; i++) {
4505         ops[i].type = DPIF_OP_FLOW_DEL;
4506         ops[i].u.flow_del.key = subfacets[i]->key;
4507         ops[i].u.flow_del.key_len = subfacets[i]->key_len;
4508         ops[i].u.flow_del.stats = &stats[i];
4509         opsp[i] = &ops[i];
4510     }
4511
4512     dpif_operate(backer->dpif, opsp, n);
4513     for (i = 0; i < n; i++) {
4514         subfacet_reset_dp_stats(subfacets[i], &stats[i]);
4515         subfacets[i]->path = SF_NOT_INSTALLED;
4516         subfacet_destroy(subfacets[i]);
4517         run_fast_rl();
4518     }
4519 }
4520
4521 /* Updates 'subfacet''s datapath flow, setting its actions to 'actions_len'
4522  * bytes of actions in 'actions'.  If 'stats' is non-null, statistics counters
4523  * in the datapath will be zeroed and 'stats' will be updated with traffic new
4524  * since 'subfacet' was last updated.
4525  *
4526  * Returns 0 if successful, otherwise a positive errno value. */
4527 static int
4528 subfacet_install(struct subfacet *subfacet, const struct ofpbuf *odp_actions,
4529                  struct dpif_flow_stats *stats)
4530 {
4531     struct facet *facet = subfacet->facet;
4532     enum subfacet_path path = facet->xout.slow ? SF_SLOW_PATH : SF_FAST_PATH;
4533     const struct nlattr *actions = odp_actions->data;
4534     size_t actions_len = odp_actions->size;
4535     struct odputil_keybuf maskbuf;
4536     struct ofpbuf mask;
4537
4538     uint64_t slow_path_stub[128 / 8];
4539     enum dpif_flow_put_flags flags;
4540     int ret;
4541
4542     flags = subfacet->path == SF_NOT_INSTALLED ? DPIF_FP_CREATE
4543                                                : DPIF_FP_MODIFY;
4544     if (stats) {
4545         flags |= DPIF_FP_ZERO_STATS;
4546     }
4547
4548     if (path == SF_SLOW_PATH) {
4549         compose_slow_path(facet->ofproto, &facet->flow, facet->xout.slow,
4550                           slow_path_stub, sizeof slow_path_stub,
4551                           &actions, &actions_len);
4552     }
4553
4554     ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
4555     if (enable_megaflows) {
4556         odp_flow_key_from_mask(&mask, &facet->xout.wc.masks,
4557                                &facet->flow, UINT32_MAX);
4558     }
4559
4560     ret = dpif_flow_put(subfacet->backer->dpif, flags, subfacet->key,
4561                         subfacet->key_len,  mask.data, mask.size,
4562                         actions, actions_len, stats);
4563
4564     if (stats) {
4565         subfacet_reset_dp_stats(subfacet, stats);
4566     }
4567
4568     if (ret) {
4569         COVERAGE_INC(subfacet_install_fail);
4570     } else {
4571         subfacet->path = path;
4572     }
4573     return ret;
4574 }
4575
4576 /* If 'subfacet' is installed in the datapath, uninstalls it. */
4577 static void
4578 subfacet_uninstall(struct subfacet *subfacet)
4579 {
4580     if (subfacet->path != SF_NOT_INSTALLED) {
4581         struct ofproto_dpif *ofproto = subfacet->facet->ofproto;
4582         struct dpif_flow_stats stats;
4583         int error;
4584
4585         error = dpif_flow_del(ofproto->backer->dpif, subfacet->key,
4586                               subfacet->key_len, &stats);
4587         subfacet_reset_dp_stats(subfacet, &stats);
4588         if (!error) {
4589             subfacet_update_stats(subfacet, &stats);
4590         }
4591         subfacet->path = SF_NOT_INSTALLED;
4592     } else {
4593         ovs_assert(subfacet->dp_packet_count == 0);
4594         ovs_assert(subfacet->dp_byte_count == 0);
4595     }
4596 }
4597
4598 /* Resets 'subfacet''s datapath statistics counters.  This should be called
4599  * when 'subfacet''s statistics are cleared in the datapath.  If 'stats' is
4600  * non-null, it should contain the statistics returned by dpif when 'subfacet'
4601  * was reset in the datapath.  'stats' will be modified to include only
4602  * statistics new since 'subfacet' was last updated. */
4603 static void
4604 subfacet_reset_dp_stats(struct subfacet *subfacet,
4605                         struct dpif_flow_stats *stats)
4606 {
4607     if (stats
4608         && subfacet->dp_packet_count <= stats->n_packets
4609         && subfacet->dp_byte_count <= stats->n_bytes) {
4610         stats->n_packets -= subfacet->dp_packet_count;
4611         stats->n_bytes -= subfacet->dp_byte_count;
4612     }
4613
4614     subfacet->dp_packet_count = 0;
4615     subfacet->dp_byte_count = 0;
4616 }
4617
4618 /* Folds the statistics from 'stats' into the counters in 'subfacet'.
4619  *
4620  * Because of the meaning of a subfacet's counters, it only makes sense to do
4621  * this if 'stats' are not tracked in the datapath, that is, if 'stats'
4622  * represents a packet that was sent by hand or if it represents statistics
4623  * that have been cleared out of the datapath. */
4624 static void
4625 subfacet_update_stats(struct subfacet *subfacet,
4626                       const struct dpif_flow_stats *stats)
4627 {
4628     if (stats->n_packets || stats->used > subfacet->used) {
4629         struct facet *facet = subfacet->facet;
4630
4631         subfacet->used = MAX(subfacet->used, stats->used);
4632         facet->used = MAX(facet->used, stats->used);
4633         facet->packet_count += stats->n_packets;
4634         facet->byte_count += stats->n_bytes;
4635         facet->tcp_flags |= stats->tcp_flags;
4636     }
4637 }
4638 \f
4639 /* Rules. */
4640
4641 /* Lookup 'flow' in 'ofproto''s classifier.  If 'wc' is non-null, sets
4642  * the fields that were relevant as part of the lookup. */
4643 void
4644 rule_dpif_lookup(struct ofproto_dpif *ofproto, const struct flow *flow,
4645                  struct flow_wildcards *wc, struct rule_dpif **rule)
4646 {
4647     struct ofport_dpif *port;
4648
4649     if (rule_dpif_lookup_in_table(ofproto, flow, wc, 0, rule)) {
4650         return;
4651     }
4652     port = get_ofp_port(ofproto, flow->in_port.ofp_port);
4653     if (!port) {
4654         VLOG_WARN_RL(&rl, "packet-in on unknown OpenFlow port %"PRIu16,
4655                      flow->in_port.ofp_port);
4656     }
4657
4658     choose_miss_rule(port ? port->up.pp.config : 0, ofproto->miss_rule,
4659                      ofproto->no_packet_in_rule, rule);
4660 }
4661
4662 bool
4663 rule_dpif_lookup_in_table(struct ofproto_dpif *ofproto,
4664                           const struct flow *flow, struct flow_wildcards *wc,
4665                           uint8_t table_id, struct rule_dpif **rule)
4666 {
4667     const struct cls_rule *cls_rule;
4668     struct classifier *cls;
4669     bool frag;
4670
4671     *rule = NULL;
4672     if (table_id >= N_TABLES) {
4673         return false;
4674     }
4675
4676     if (wc) {
4677         memset(&wc->masks.dl_type, 0xff, sizeof wc->masks.dl_type);
4678         wc->masks.nw_frag |= FLOW_NW_FRAG_MASK;
4679     }
4680
4681     cls = &ofproto->up.tables[table_id].cls;
4682     ovs_rwlock_rdlock(&cls->rwlock);
4683     frag = (flow->nw_frag & FLOW_NW_FRAG_ANY) != 0;
4684     if (frag && ofproto->up.frag_handling == OFPC_FRAG_NORMAL) {
4685         /* We must pretend that transport ports are unavailable. */
4686         struct flow ofpc_normal_flow = *flow;
4687         ofpc_normal_flow.tp_src = htons(0);
4688         ofpc_normal_flow.tp_dst = htons(0);
4689         cls_rule = classifier_lookup(cls, &ofpc_normal_flow, wc);
4690     } else if (frag && ofproto->up.frag_handling == OFPC_FRAG_DROP) {
4691         cls_rule = &ofproto->drop_frags_rule->up.cr;
4692         /* Frag mask in wc already set above. */
4693     } else {
4694         cls_rule = classifier_lookup(cls, flow, wc);
4695     }
4696
4697     *rule = rule_dpif_cast(rule_from_cls_rule(cls_rule));
4698     rule_dpif_ref(*rule);
4699     ovs_rwlock_unlock(&cls->rwlock);
4700
4701     return *rule != NULL;
4702 }
4703
4704 /* Given a port configuration (specified as zero if there's no port), chooses
4705  * which of 'miss_rule' and 'no_packet_in_rule' should be used in case of a
4706  * flow table miss. */
4707 void
4708 choose_miss_rule(enum ofputil_port_config config, struct rule_dpif *miss_rule,
4709                  struct rule_dpif *no_packet_in_rule, struct rule_dpif **rule)
4710 {
4711     *rule = config & OFPUTIL_PC_NO_PACKET_IN ? no_packet_in_rule : miss_rule;
4712     rule_dpif_ref(*rule);
4713 }
4714
4715 void
4716 rule_dpif_ref(struct rule_dpif *rule)
4717 {
4718     if (rule) {
4719         ofproto_rule_ref(&rule->up);
4720     }
4721 }
4722
4723 void
4724 rule_dpif_unref(struct rule_dpif *rule)
4725 {
4726     if (rule) {
4727         ofproto_rule_unref(&rule->up);
4728     }
4729 }
4730
4731 static void
4732 complete_operation(struct rule_dpif *rule)
4733     OVS_REQUIRES(ofproto_mutex)
4734 {
4735     struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4736
4737     ofproto->backer->need_revalidate = REV_FLOW_TABLE;
4738     ofoperation_complete(rule->up.pending, 0);
4739 }
4740
4741 static struct rule_dpif *rule_dpif_cast(const struct rule *rule)
4742 {
4743     return rule ? CONTAINER_OF(rule, struct rule_dpif, up) : NULL;
4744 }
4745
4746 static struct rule *
4747 rule_alloc(void)
4748 {
4749     struct rule_dpif *rule = xmalloc(sizeof *rule);
4750     return &rule->up;
4751 }
4752
4753 static void
4754 rule_dealloc(struct rule *rule_)
4755 {
4756     struct rule_dpif *rule = rule_dpif_cast(rule_);
4757     free(rule);
4758 }
4759
4760 static enum ofperr
4761 rule_construct(struct rule *rule_)
4762 {
4763     struct rule_dpif *rule = rule_dpif_cast(rule_);
4764     ovs_mutex_init(&rule->stats_mutex);
4765     ovs_mutex_lock(&rule->stats_mutex);
4766     rule->packet_count = 0;
4767     rule->byte_count = 0;
4768     ovs_mutex_unlock(&rule->stats_mutex);
4769     return 0;
4770 }
4771
4772 static void
4773 rule_insert(struct rule *rule_)
4774     OVS_REQUIRES(ofproto_mutex)
4775 {
4776     struct rule_dpif *rule = rule_dpif_cast(rule_);
4777     complete_operation(rule);
4778 }
4779
4780 static void
4781 rule_delete(struct rule *rule_)
4782     OVS_REQUIRES(ofproto_mutex)
4783 {
4784     struct rule_dpif *rule = rule_dpif_cast(rule_);
4785     complete_operation(rule);
4786 }
4787
4788 static void
4789 rule_destruct(struct rule *rule_)
4790 {
4791     struct rule_dpif *rule = rule_dpif_cast(rule_);
4792     ovs_mutex_destroy(&rule->stats_mutex);
4793 }
4794
4795 static void
4796 rule_get_stats(struct rule *rule_, uint64_t *packets, uint64_t *bytes)
4797 {
4798     struct rule_dpif *rule = rule_dpif_cast(rule_);
4799
4800     /* push_all_stats() can handle flow misses which, when using the learn
4801      * action, can cause rules to be added and deleted.  This can corrupt our
4802      * caller's datastructures which assume that rule_get_stats() doesn't have
4803      * an impact on the flow table. To be safe, we disable miss handling. */
4804     push_all_stats__(false);
4805
4806     /* Start from historical data for 'rule' itself that are no longer tracked
4807      * in facets.  This counts, for example, facets that have expired. */
4808     ovs_mutex_lock(&rule->stats_mutex);
4809     *packets = rule->packet_count;
4810     *bytes = rule->byte_count;
4811     ovs_mutex_unlock(&rule->stats_mutex);
4812 }
4813
4814 static void
4815 rule_dpif_execute(struct rule_dpif *rule, const struct flow *flow,
4816                   struct ofpbuf *packet)
4817 {
4818     struct ofproto_dpif *ofproto = ofproto_dpif_cast(rule->up.ofproto);
4819
4820     ofproto_dpif_execute_actions(ofproto, flow, rule, NULL, 0, packet);
4821 }
4822
4823 static enum ofperr
4824 rule_execute(struct rule *rule, const struct flow *flow,
4825              struct ofpbuf *packet)
4826 {
4827     rule_dpif_execute(rule_dpif_cast(rule), flow, packet);
4828     ofpbuf_delete(packet);
4829     return 0;
4830 }
4831
4832 static void
4833 rule_modify_actions(struct rule *rule_, bool reset_counters)
4834     OVS_REQUIRES(ofproto_mutex)
4835 {
4836     struct rule_dpif *rule = rule_dpif_cast(rule_);
4837
4838     if (reset_counters) {
4839         ovs_mutex_lock(&rule->stats_mutex);
4840         rule->packet_count = 0;
4841         rule->byte_count = 0;
4842         ovs_mutex_unlock(&rule->stats_mutex);
4843     }
4844
4845     complete_operation(rule);
4846 }
4847
4848 static struct group_dpif *group_dpif_cast(const struct ofgroup *group)
4849 {
4850     return group ? CONTAINER_OF(group, struct group_dpif, up) : NULL;
4851 }
4852
4853 static struct ofgroup *
4854 group_alloc(void)
4855 {
4856     struct group_dpif *group = xzalloc(sizeof *group);
4857     return &group->up;
4858 }
4859
4860 static void
4861 group_dealloc(struct ofgroup *group_)
4862 {
4863     struct group_dpif *group = group_dpif_cast(group_);
4864     free(group);
4865 }
4866
4867 static void
4868 group_construct_stats(struct group_dpif *group)
4869     OVS_REQUIRES(group->stats_mutex)
4870 {
4871     group->packet_count = 0;
4872     group->byte_count = 0;
4873     if (!group->bucket_stats) {
4874         group->bucket_stats = xcalloc(group->up.n_buckets,
4875                                       sizeof *group->bucket_stats);
4876     } else {
4877         memset(group->bucket_stats, 0, group->up.n_buckets *
4878                sizeof *group->bucket_stats);
4879     }
4880 }
4881
4882 static enum ofperr
4883 group_construct(struct ofgroup *group_)
4884 {
4885     struct group_dpif *group = group_dpif_cast(group_);
4886     ovs_mutex_init(&group->stats_mutex);
4887     ovs_mutex_lock(&group->stats_mutex);
4888     group_construct_stats(group);
4889     ovs_mutex_unlock(&group->stats_mutex);
4890     return 0;
4891 }
4892
4893 static void
4894 group_destruct__(struct group_dpif *group)
4895     OVS_REQUIRES(group->stats_mutex)
4896 {
4897     free(group->bucket_stats);
4898     group->bucket_stats = NULL;
4899 }
4900
4901 static void
4902 group_destruct(struct ofgroup *group_)
4903 {
4904     struct group_dpif *group = group_dpif_cast(group_);
4905     ovs_mutex_lock(&group->stats_mutex);
4906     group_destruct__(group);
4907     ovs_mutex_unlock(&group->stats_mutex);
4908     ovs_mutex_destroy(&group->stats_mutex);
4909 }
4910
4911 static enum ofperr
4912 group_modify(struct ofgroup *group_, struct ofgroup *victim_)
4913 {
4914     struct group_dpif *group = group_dpif_cast(group_);
4915     struct group_dpif *victim = group_dpif_cast(victim_);
4916
4917     ovs_mutex_lock(&group->stats_mutex);
4918     if (victim->up.n_buckets < group->up.n_buckets) {
4919         group_destruct__(group);
4920     }
4921     group_construct_stats(group);
4922     ovs_mutex_unlock(&group->stats_mutex);
4923
4924     return 0;
4925 }
4926
4927 static enum ofperr
4928 group_get_stats(const struct ofgroup *group_, struct ofputil_group_stats *ogs)
4929 {
4930     struct group_dpif *group = group_dpif_cast(group_);
4931
4932     /* Start from historical data for 'group' itself that are no longer tracked
4933      * in facets.  This counts, for example, facets that have expired. */
4934     ovs_mutex_lock(&group->stats_mutex);
4935     ogs->packet_count = group->packet_count;
4936     ogs->byte_count = group->byte_count;
4937     memcpy(ogs->bucket_stats, group->bucket_stats,
4938            group->up.n_buckets * sizeof *group->bucket_stats);
4939     ovs_mutex_unlock(&group->stats_mutex);
4940
4941     return 0;
4942 }
4943
4944 bool
4945 group_dpif_lookup(struct ofproto_dpif *ofproto, uint32_t group_id,
4946                   struct group_dpif **group)
4947     OVS_TRY_RDLOCK(true, (*group)->up.rwlock)
4948 {
4949     struct ofgroup *ofgroup;
4950     bool found;
4951
4952     *group = NULL;
4953     found = ofproto_group_lookup(&ofproto->up, group_id, &ofgroup);
4954     *group = found ?  group_dpif_cast(ofgroup) : NULL;
4955
4956     return found;
4957 }
4958
4959 void
4960 group_dpif_release(struct group_dpif *group)
4961     OVS_RELEASES(group->up.rwlock)
4962 {
4963     ofproto_group_release(&group->up);
4964 }
4965
4966 void
4967 group_dpif_get_buckets(const struct group_dpif *group,
4968                        const struct list **buckets)
4969 {
4970     *buckets = &group->up.buckets;
4971 }
4972
4973 enum ofp11_group_type
4974 group_dpif_get_type(const struct group_dpif *group)
4975 {
4976     return group->up.type;
4977 }
4978 \f
4979 /* Sends 'packet' out 'ofport'.
4980  * May modify 'packet'.
4981  * Returns 0 if successful, otherwise a positive errno value. */
4982 int
4983 ofproto_dpif_send_packet(const struct ofport_dpif *ofport, struct ofpbuf *packet)
4984 {
4985     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport->up.ofproto);
4986     int error;
4987
4988     error = xlate_send_packet(ofport, packet);
4989
4990     ovs_mutex_lock(&ofproto->stats_mutex);
4991     ofproto->stats.tx_packets++;
4992     ofproto->stats.tx_bytes += packet->size;
4993     ovs_mutex_unlock(&ofproto->stats_mutex);
4994     return error;
4995 }
4996
4997 /* Composes an ODP action for a "slow path" action for 'flow' within 'ofproto'.
4998  * The action will state 'slow' as the reason that the action is in the slow
4999  * path.  (This is purely informational: it allows a human viewing "ovs-dpctl
5000  * dump-flows" output to see why a flow is in the slow path.)
5001  *
5002  * The 'stub_size' bytes in 'stub' will be used to store the action.
5003  * 'stub_size' must be large enough for the action.
5004  *
5005  * The action and its size will be stored in '*actionsp' and '*actions_lenp',
5006  * respectively. */
5007 static void
5008 compose_slow_path(const struct ofproto_dpif *ofproto, const struct flow *flow,
5009                   enum slow_path_reason slow,
5010                   uint64_t *stub, size_t stub_size,
5011                   const struct nlattr **actionsp, size_t *actions_lenp)
5012 {
5013     union user_action_cookie cookie;
5014     struct ofpbuf buf;
5015
5016     cookie.type = USER_ACTION_COOKIE_SLOW_PATH;
5017     cookie.slow_path.unused = 0;
5018     cookie.slow_path.reason = slow;
5019
5020     ofpbuf_use_stack(&buf, stub, stub_size);
5021     if (slow & (SLOW_CFM | SLOW_BFD | SLOW_LACP | SLOW_STP)) {
5022         uint32_t pid = dpif_port_get_pid(ofproto->backer->dpif,
5023                                          ODPP_NONE);
5024         odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf);
5025     } else {
5026         odp_port_t odp_port;
5027         uint32_t pid;
5028
5029         odp_port = ofp_port_to_odp_port(ofproto, flow->in_port.ofp_port);
5030         pid = dpif_port_get_pid(ofproto->backer->dpif, odp_port);
5031         odp_put_userspace_action(pid, &cookie, sizeof cookie.slow_path, &buf);
5032     }
5033     *actionsp = buf.data;
5034     *actions_lenp = buf.size;
5035 }
5036 \f
5037 static bool
5038 set_frag_handling(struct ofproto *ofproto_,
5039                   enum ofp_config_flags frag_handling)
5040 {
5041     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5042     if (frag_handling != OFPC_FRAG_REASM) {
5043         ofproto->backer->need_revalidate = REV_RECONFIGURE;
5044         return true;
5045     } else {
5046         return false;
5047     }
5048 }
5049
5050 static enum ofperr
5051 packet_out(struct ofproto *ofproto_, struct ofpbuf *packet,
5052            const struct flow *flow,
5053            const struct ofpact *ofpacts, size_t ofpacts_len)
5054 {
5055     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5056
5057     ofproto_dpif_execute_actions(ofproto, flow, NULL, ofpacts,
5058                                  ofpacts_len, packet);
5059     return 0;
5060 }
5061 \f
5062 /* NetFlow. */
5063
5064 static int
5065 set_netflow(struct ofproto *ofproto_,
5066             const struct netflow_options *netflow_options)
5067 {
5068     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5069
5070     if (netflow_options) {
5071         if (!ofproto->netflow) {
5072             ofproto->netflow = netflow_create();
5073             ofproto->backer->need_revalidate = REV_RECONFIGURE;
5074         }
5075         return netflow_set_options(ofproto->netflow, netflow_options);
5076     } else if (ofproto->netflow) {
5077         ofproto->backer->need_revalidate = REV_RECONFIGURE;
5078         netflow_destroy(ofproto->netflow);
5079         ofproto->netflow = NULL;
5080     }
5081
5082     return 0;
5083 }
5084
5085 static void
5086 get_netflow_ids(const struct ofproto *ofproto_,
5087                 uint8_t *engine_type, uint8_t *engine_id)
5088 {
5089     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
5090
5091     dpif_get_netflow_ids(ofproto->backer->dpif, engine_type, engine_id);
5092 }
5093
5094 static void
5095 send_active_timeout(struct ofproto_dpif *ofproto, struct facet *facet)
5096 {
5097     if (!facet_is_controller_flow(facet) &&
5098         netflow_active_timeout_expired(ofproto->netflow, &facet->nf_flow)) {
5099         struct subfacet *subfacet;
5100         struct ofexpired expired;
5101
5102         LIST_FOR_EACH (subfacet, list_node, &facet->subfacets) {
5103             if (subfacet->path == SF_FAST_PATH) {
5104                 struct dpif_flow_stats stats;
5105
5106                 subfacet_install(subfacet, &facet->xout.odp_actions,
5107                                  &stats);
5108                 subfacet_update_stats(subfacet, &stats);
5109             }
5110         }
5111
5112         expired.flow = facet->flow;
5113         expired.packet_count = facet->packet_count;
5114         expired.byte_count = facet->byte_count;
5115         expired.used = facet->used;
5116         netflow_expire(ofproto->netflow, &facet->nf_flow, &expired);
5117     }
5118 }
5119
5120 static void
5121 send_netflow_active_timeouts(struct ofproto_dpif *ofproto)
5122 {
5123     struct cls_cursor cursor;
5124     struct facet *facet;
5125
5126     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
5127     cls_cursor_init(&cursor, &ofproto->facets, NULL);
5128     CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
5129         send_active_timeout(ofproto, facet);
5130     }
5131     ovs_rwlock_unlock(&ofproto->facets.rwlock);
5132 }
5133 \f
5134 static struct ofproto_dpif *
5135 ofproto_dpif_lookup(const char *name)
5136 {
5137     struct ofproto_dpif *ofproto;
5138
5139     HMAP_FOR_EACH_WITH_HASH (ofproto, all_ofproto_dpifs_node,
5140                              hash_string(name, 0), &all_ofproto_dpifs) {
5141         if (!strcmp(ofproto->up.name, name)) {
5142             return ofproto;
5143         }
5144     }
5145     return NULL;
5146 }
5147
5148 static void
5149 ofproto_unixctl_fdb_flush(struct unixctl_conn *conn, int argc,
5150                           const char *argv[], void *aux OVS_UNUSED)
5151 {
5152     struct ofproto_dpif *ofproto;
5153
5154     if (argc > 1) {
5155         ofproto = ofproto_dpif_lookup(argv[1]);
5156         if (!ofproto) {
5157             unixctl_command_reply_error(conn, "no such bridge");
5158             return;
5159         }
5160         ovs_rwlock_wrlock(&ofproto->ml->rwlock);
5161         mac_learning_flush(ofproto->ml);
5162         ovs_rwlock_unlock(&ofproto->ml->rwlock);
5163     } else {
5164         HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5165             ovs_rwlock_wrlock(&ofproto->ml->rwlock);
5166             mac_learning_flush(ofproto->ml);
5167             ovs_rwlock_unlock(&ofproto->ml->rwlock);
5168         }
5169     }
5170
5171     unixctl_command_reply(conn, "table successfully flushed");
5172 }
5173
5174 static struct ofport_dpif *
5175 ofbundle_get_a_port(const struct ofbundle *bundle)
5176 {
5177     return CONTAINER_OF(list_front(&bundle->ports), struct ofport_dpif,
5178                         bundle_node);
5179 }
5180
5181 static void
5182 ofproto_unixctl_fdb_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
5183                          const char *argv[], void *aux OVS_UNUSED)
5184 {
5185     struct ds ds = DS_EMPTY_INITIALIZER;
5186     const struct ofproto_dpif *ofproto;
5187     const struct mac_entry *e;
5188
5189     ofproto = ofproto_dpif_lookup(argv[1]);
5190     if (!ofproto) {
5191         unixctl_command_reply_error(conn, "no such bridge");
5192         return;
5193     }
5194
5195     ds_put_cstr(&ds, " port  VLAN  MAC                Age\n");
5196     ovs_rwlock_rdlock(&ofproto->ml->rwlock);
5197     LIST_FOR_EACH (e, lru_node, &ofproto->ml->lrus) {
5198         struct ofbundle *bundle = e->port.p;
5199         char name[OFP_MAX_PORT_NAME_LEN];
5200
5201         ofputil_port_to_string(ofbundle_get_a_port(bundle)->up.ofp_port,
5202                                name, sizeof name);
5203         ds_put_format(&ds, "%5s  %4d  "ETH_ADDR_FMT"  %3d\n",
5204                       name, e->vlan, ETH_ADDR_ARGS(e->mac),
5205                       mac_entry_age(ofproto->ml, e));
5206     }
5207     ovs_rwlock_unlock(&ofproto->ml->rwlock);
5208     unixctl_command_reply(conn, ds_cstr(&ds));
5209     ds_destroy(&ds);
5210 }
5211
5212 struct trace_ctx {
5213     struct xlate_out xout;
5214     struct xlate_in xin;
5215     struct flow flow;
5216     struct ds *result;
5217 };
5218
5219 static void
5220 trace_format_rule(struct ds *result, int level, const struct rule_dpif *rule)
5221 {
5222     struct rule_actions *actions;
5223     ovs_be64 cookie;
5224
5225     ds_put_char_multiple(result, '\t', level);
5226     if (!rule) {
5227         ds_put_cstr(result, "No match\n");
5228         return;
5229     }
5230
5231     ovs_mutex_lock(&rule->up.mutex);
5232     cookie = rule->up.flow_cookie;
5233     ovs_mutex_unlock(&rule->up.mutex);
5234
5235     ds_put_format(result, "Rule: table=%"PRIu8" cookie=%#"PRIx64" ",
5236                   rule ? rule->up.table_id : 0, ntohll(cookie));
5237     cls_rule_format(&rule->up.cr, result);
5238     ds_put_char(result, '\n');
5239
5240     actions = rule_dpif_get_actions(rule);
5241
5242     ds_put_char_multiple(result, '\t', level);
5243     ds_put_cstr(result, "OpenFlow actions=");
5244     ofpacts_format(actions->ofpacts, actions->ofpacts_len, result);
5245     ds_put_char(result, '\n');
5246
5247     rule_actions_unref(actions);
5248 }
5249
5250 static void
5251 trace_format_flow(struct ds *result, int level, const char *title,
5252                   struct trace_ctx *trace)
5253 {
5254     ds_put_char_multiple(result, '\t', level);
5255     ds_put_format(result, "%s: ", title);
5256     if (flow_equal(&trace->xin.flow, &trace->flow)) {
5257         ds_put_cstr(result, "unchanged");
5258     } else {
5259         flow_format(result, &trace->xin.flow);
5260         trace->flow = trace->xin.flow;
5261     }
5262     ds_put_char(result, '\n');
5263 }
5264
5265 static void
5266 trace_format_regs(struct ds *result, int level, const char *title,
5267                   struct trace_ctx *trace)
5268 {
5269     size_t i;
5270
5271     ds_put_char_multiple(result, '\t', level);
5272     ds_put_format(result, "%s:", title);
5273     for (i = 0; i < FLOW_N_REGS; i++) {
5274         ds_put_format(result, " reg%"PRIuSIZE"=0x%"PRIx32, i, trace->flow.regs[i]);
5275     }
5276     ds_put_char(result, '\n');
5277 }
5278
5279 static void
5280 trace_format_odp(struct ds *result, int level, const char *title,
5281                  struct trace_ctx *trace)
5282 {
5283     struct ofpbuf *odp_actions = &trace->xout.odp_actions;
5284
5285     ds_put_char_multiple(result, '\t', level);
5286     ds_put_format(result, "%s: ", title);
5287     format_odp_actions(result, odp_actions->data, odp_actions->size);
5288     ds_put_char(result, '\n');
5289 }
5290
5291 static void
5292 trace_resubmit(struct xlate_in *xin, struct rule_dpif *rule, int recurse)
5293 {
5294     struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
5295     struct ds *result = trace->result;
5296
5297     ds_put_char(result, '\n');
5298     trace_format_flow(result, recurse + 1, "Resubmitted flow", trace);
5299     trace_format_regs(result, recurse + 1, "Resubmitted regs", trace);
5300     trace_format_odp(result,  recurse + 1, "Resubmitted  odp", trace);
5301     trace_format_rule(result, recurse + 1, rule);
5302 }
5303
5304 static void
5305 trace_report(struct xlate_in *xin, const char *s, int recurse)
5306 {
5307     struct trace_ctx *trace = CONTAINER_OF(xin, struct trace_ctx, xin);
5308     struct ds *result = trace->result;
5309
5310     ds_put_char_multiple(result, '\t', recurse);
5311     ds_put_cstr(result, s);
5312     ds_put_char(result, '\n');
5313 }
5314
5315 /* Parses the 'argc' elements of 'argv', ignoring argv[0].  The following
5316  * forms are supported:
5317  *
5318  *     - [dpname] odp_flow [-generate | packet]
5319  *     - bridge br_flow [-generate | packet]
5320  *
5321  * On success, initializes '*ofprotop' and 'flow' and returns NULL.  On failure
5322  * returns a nonnull error message. */
5323 static const char *
5324 parse_flow_and_packet(int argc, const char *argv[],
5325                       struct ofproto_dpif **ofprotop, struct flow *flow,
5326                       struct ofpbuf **packetp)
5327 {
5328     const struct dpif_backer *backer = NULL;
5329     const char *error = NULL;
5330     struct simap port_names = SIMAP_INITIALIZER(&port_names);
5331     struct ofpbuf *packet;
5332     struct ofpbuf odp_key;
5333     struct ofpbuf odp_mask;
5334
5335     ofpbuf_init(&odp_key, 0);
5336     ofpbuf_init(&odp_mask, 0);
5337
5338     /* Handle "-generate" or a hex string as the last argument. */
5339     if (!strcmp(argv[argc - 1], "-generate")) {
5340         packet = ofpbuf_new(0);
5341         argc--;
5342     } else {
5343         error = eth_from_hex(argv[argc - 1], &packet);
5344         if (!error) {
5345             argc--;
5346         } else if (argc == 4) {
5347             /* The 3-argument form must end in "-generate' or a hex string. */
5348             goto exit;
5349         }
5350     }
5351
5352     /* odp_flow can have its in_port specified as a name instead of port no.
5353      * We do not yet know whether a given flow is a odp_flow or a br_flow.
5354      * But, to know whether a flow is odp_flow through odp_flow_from_string(),
5355      * we need to create a simap of name to port no. */
5356     if (argc == 3) {
5357         const char *dp_type;
5358         if (!strncmp(argv[1], "ovs-", 4)) {
5359             dp_type = argv[1] + 4;
5360         } else {
5361             dp_type = argv[1];
5362         }
5363         backer = shash_find_data(&all_dpif_backers, dp_type);
5364     } else if (argc == 2) {
5365         struct shash_node *node;
5366         if (shash_count(&all_dpif_backers) == 1) {
5367             node = shash_first(&all_dpif_backers);
5368             backer = node->data;
5369         }
5370     } else {
5371         error = "Syntax error";
5372         goto exit;
5373     }
5374     if (backer && backer->dpif) {
5375         struct dpif_port dpif_port;
5376         struct dpif_port_dump port_dump;
5377         DPIF_PORT_FOR_EACH (&dpif_port, &port_dump, backer->dpif) {
5378             simap_put(&port_names, dpif_port.name,
5379                       odp_to_u32(dpif_port.port_no));
5380         }
5381     }
5382
5383     /* Parse the flow and determine whether a datapath or
5384      * bridge is specified. If function odp_flow_key_from_string()
5385      * returns 0, the flow is a odp_flow. If function
5386      * parse_ofp_exact_flow() returns 0, the flow is a br_flow. */
5387     if (!odp_flow_from_string(argv[argc - 1], &port_names,
5388                               &odp_key, &odp_mask)) {
5389         if (!backer) {
5390             error = "Cannot find the datapath";
5391             goto exit;
5392         }
5393
5394         if (xlate_receive(backer, NULL, odp_key.data, odp_key.size, flow,
5395                           NULL, ofprotop, NULL)) {
5396             error = "Invalid datapath flow";
5397             goto exit;
5398         }
5399     } else if (!parse_ofp_exact_flow(flow, NULL, argv[argc - 1], NULL)) {
5400         if (argc != 3) {
5401             error = "Must specify bridge name";
5402             goto exit;
5403         }
5404
5405         *ofprotop = ofproto_dpif_lookup(argv[1]);
5406         if (!*ofprotop) {
5407             error = "Unknown bridge name";
5408             goto exit;
5409         }
5410     } else {
5411         error = "Bad flow syntax";
5412         goto exit;
5413     }
5414
5415     /* Generate a packet, if requested. */
5416     if (packet) {
5417         if (!packet->size) {
5418             flow_compose(packet, flow);
5419         } else {
5420             union flow_in_port in_port = flow->in_port;
5421
5422             /* Use the metadata from the flow and the packet argument
5423              * to reconstruct the flow. */
5424             flow_extract(packet, flow->skb_priority, flow->pkt_mark, NULL,
5425                          &in_port, flow);
5426         }
5427     }
5428
5429     error = NULL;
5430
5431 exit:
5432     if (error) {
5433         ofpbuf_delete(packet);
5434         packet = NULL;
5435     }
5436     *packetp = packet;
5437     ofpbuf_uninit(&odp_key);
5438     ofpbuf_uninit(&odp_mask);
5439     simap_destroy(&port_names);
5440     return error;
5441 }
5442
5443 static void
5444 ofproto_unixctl_trace(struct unixctl_conn *conn, int argc, const char *argv[],
5445                       void *aux OVS_UNUSED)
5446 {
5447     struct ofproto_dpif *ofproto;
5448     struct ofpbuf *packet;
5449     const char *error;
5450     struct flow flow;
5451
5452     error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
5453     if (!error) {
5454         struct ds result;
5455
5456         ds_init(&result);
5457         ofproto_trace(ofproto, &flow, packet, NULL, 0, &result);
5458         unixctl_command_reply(conn, ds_cstr(&result));
5459         ds_destroy(&result);
5460         ofpbuf_delete(packet);
5461     } else {
5462         unixctl_command_reply_error(conn, error);
5463     }
5464 }
5465
5466 static void
5467 ofproto_unixctl_trace_actions(struct unixctl_conn *conn, int argc,
5468                               const char *argv[], void *aux OVS_UNUSED)
5469 {
5470     enum ofputil_protocol usable_protocols;
5471     struct ofproto_dpif *ofproto;
5472     bool enforce_consistency;
5473     struct ofpbuf ofpacts;
5474     struct ofpbuf *packet;
5475     struct ds result;
5476     struct flow flow;
5477     uint16_t in_port;
5478
5479     /* Three kinds of error return values! */
5480     enum ofperr retval;
5481     const char *error;
5482     char *rw_error;
5483
5484     packet = NULL;
5485     ds_init(&result);
5486     ofpbuf_init(&ofpacts, 0);
5487
5488     /* Parse actions. */
5489     rw_error = parse_ofpacts(argv[--argc], &ofpacts, &usable_protocols);
5490     if (rw_error) {
5491         unixctl_command_reply_error(conn, rw_error);
5492         free(rw_error);
5493         goto exit;
5494     }
5495
5496     /* OpenFlow 1.1 and later suggest that the switch enforces certain forms of
5497      * consistency between the flow and the actions.  With -consistent, we
5498      * enforce consistency even for a flow supported in OpenFlow 1.0. */
5499     if (!strcmp(argv[1], "-consistent")) {
5500         enforce_consistency = true;
5501         argv++;
5502         argc--;
5503     } else {
5504         enforce_consistency = false;
5505     }
5506
5507     error = parse_flow_and_packet(argc, argv, &ofproto, &flow, &packet);
5508     if (error) {
5509         unixctl_command_reply_error(conn, error);
5510         goto exit;
5511     }
5512
5513     /* Do the same checks as handle_packet_out() in ofproto.c.
5514      *
5515      * We pass a 'table_id' of 0 to ofproto_check_ofpacts(), which isn't
5516      * strictly correct because these actions aren't in any table, but it's OK
5517      * because it 'table_id' is used only to check goto_table instructions, but
5518      * packet-outs take a list of actions and therefore it can't include
5519      * instructions.
5520      *
5521      * We skip the "meter" check here because meter is an instruction, not an
5522      * action, and thus cannot appear in ofpacts. */
5523     in_port = ofp_to_u16(flow.in_port.ofp_port);
5524     if (in_port >= ofproto->up.max_ports && in_port < ofp_to_u16(OFPP_MAX)) {
5525         unixctl_command_reply_error(conn, "invalid in_port");
5526         goto exit;
5527     }
5528     if (enforce_consistency) {
5529         retval = ofpacts_check_consistency(ofpacts.data, ofpacts.size, &flow,
5530                                            u16_to_ofp(ofproto->up.max_ports),
5531                                            0, 0, usable_protocols);
5532     } else {
5533         retval = ofpacts_check(ofpacts.data, ofpacts.size, &flow,
5534                                u16_to_ofp(ofproto->up.max_ports), 0, 0,
5535                                &usable_protocols);
5536     }
5537
5538     if (retval) {
5539         ds_clear(&result);
5540         ds_put_format(&result, "Bad actions: %s", ofperr_to_string(retval));
5541         unixctl_command_reply_error(conn, ds_cstr(&result));
5542         goto exit;
5543     }
5544
5545     ofproto_trace(ofproto, &flow, packet, ofpacts.data, ofpacts.size, &result);
5546     unixctl_command_reply(conn, ds_cstr(&result));
5547
5548 exit:
5549     ds_destroy(&result);
5550     ofpbuf_delete(packet);
5551     ofpbuf_uninit(&ofpacts);
5552 }
5553
5554 /* Implements a "trace" through 'ofproto''s flow table, appending a textual
5555  * description of the results to 'ds'.
5556  *
5557  * The trace follows a packet with the specified 'flow' through the flow
5558  * table.  'packet' may be nonnull to trace an actual packet, with consequent
5559  * side effects (if it is nonnull then its flow must be 'flow').
5560  *
5561  * If 'ofpacts' is nonnull then its 'ofpacts_len' bytes specify the actions to
5562  * trace, otherwise the actions are determined by a flow table lookup. */
5563 static void
5564 ofproto_trace(struct ofproto_dpif *ofproto, const struct flow *flow,
5565               const struct ofpbuf *packet,
5566               const struct ofpact ofpacts[], size_t ofpacts_len,
5567               struct ds *ds)
5568 {
5569     struct rule_dpif *rule;
5570     struct flow_wildcards wc;
5571
5572     ds_put_format(ds, "Bridge: %s\n", ofproto->up.name);
5573     ds_put_cstr(ds, "Flow: ");
5574     flow_format(ds, flow);
5575     ds_put_char(ds, '\n');
5576
5577     flow_wildcards_init_catchall(&wc);
5578     if (ofpacts) {
5579         rule = NULL;
5580     } else {
5581         rule_dpif_lookup(ofproto, flow, &wc, &rule);
5582
5583         trace_format_rule(ds, 0, rule);
5584         if (rule == ofproto->miss_rule) {
5585             ds_put_cstr(ds, "\nNo match, flow generates \"packet in\"s.\n");
5586         } else if (rule == ofproto->no_packet_in_rule) {
5587             ds_put_cstr(ds, "\nNo match, packets dropped because "
5588                         "OFPPC_NO_PACKET_IN is set on in_port.\n");
5589         } else if (rule == ofproto->drop_frags_rule) {
5590             ds_put_cstr(ds, "\nPackets dropped because they are IP fragments "
5591                         "and the fragment handling mode is \"drop\".\n");
5592         }
5593     }
5594
5595     if (rule || ofpacts) {
5596         uint64_t odp_actions_stub[1024 / 8];
5597         struct ofpbuf odp_actions;
5598         struct trace_ctx trace;
5599         struct match match;
5600         uint16_t tcp_flags;
5601
5602         tcp_flags = packet ? packet_get_tcp_flags(packet, flow) : 0;
5603         trace.result = ds;
5604         trace.flow = *flow;
5605         ofpbuf_use_stub(&odp_actions,
5606                         odp_actions_stub, sizeof odp_actions_stub);
5607         xlate_in_init(&trace.xin, ofproto, flow, rule, tcp_flags, packet);
5608         if (ofpacts) {
5609             trace.xin.ofpacts = ofpacts;
5610             trace.xin.ofpacts_len = ofpacts_len;
5611         }
5612         trace.xin.resubmit_hook = trace_resubmit;
5613         trace.xin.report_hook = trace_report;
5614
5615         xlate_actions(&trace.xin, &trace.xout);
5616         flow_wildcards_or(&trace.xout.wc, &trace.xout.wc, &wc);
5617
5618         ds_put_char(ds, '\n');
5619         trace_format_flow(ds, 0, "Final flow", &trace);
5620
5621         match_init(&match, flow, &trace.xout.wc);
5622         ds_put_cstr(ds, "Relevant fields: ");
5623         match_format(&match, ds, OFP_DEFAULT_PRIORITY);
5624         ds_put_char(ds, '\n');
5625
5626         ds_put_cstr(ds, "Datapath actions: ");
5627         format_odp_actions(ds, trace.xout.odp_actions.data,
5628                            trace.xout.odp_actions.size);
5629
5630         if (trace.xout.slow) {
5631             enum slow_path_reason slow;
5632
5633             ds_put_cstr(ds, "\nThis flow is handled by the userspace "
5634                         "slow path because it:");
5635
5636             slow = trace.xout.slow;
5637             while (slow) {
5638                 enum slow_path_reason bit = rightmost_1bit(slow);
5639
5640                 ds_put_format(ds, "\n\t- %s.",
5641                               slow_path_reason_to_explanation(bit));
5642
5643                 slow &= ~bit;
5644             }
5645         }
5646
5647         xlate_out_uninit(&trace.xout);
5648     }
5649
5650     rule_dpif_unref(rule);
5651 }
5652
5653 /* Runs a self-check of flow translations in 'ofproto'.  Appends a message to
5654  * 'reply' describing the results. */
5655 static void
5656 ofproto_dpif_self_check__(struct ofproto_dpif *ofproto, struct ds *reply)
5657 {
5658     struct cls_cursor cursor;
5659     struct facet *facet;
5660     int errors;
5661
5662     errors = 0;
5663     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
5664     cls_cursor_init(&cursor, &ofproto->facets, NULL);
5665     CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
5666         if (!facet_check_consistency(facet)) {
5667             errors++;
5668         }
5669     }
5670     ovs_rwlock_unlock(&ofproto->facets.rwlock);
5671     if (errors) {
5672         ofproto->backer->need_revalidate = REV_INCONSISTENCY;
5673     }
5674
5675     if (errors) {
5676         ds_put_format(reply, "%s: self-check failed (%d errors)\n",
5677                       ofproto->up.name, errors);
5678     } else {
5679         ds_put_format(reply, "%s: self-check passed\n", ofproto->up.name);
5680     }
5681 }
5682
5683 static void
5684 ofproto_dpif_self_check(struct unixctl_conn *conn,
5685                         int argc, const char *argv[], void *aux OVS_UNUSED)
5686 {
5687     struct ds reply = DS_EMPTY_INITIALIZER;
5688     struct ofproto_dpif *ofproto;
5689
5690     if (argc > 1) {
5691         ofproto = ofproto_dpif_lookup(argv[1]);
5692         if (!ofproto) {
5693             unixctl_command_reply_error(conn, "Unknown ofproto (use "
5694                                         "ofproto/list for help)");
5695             return;
5696         }
5697         ofproto_dpif_self_check__(ofproto, &reply);
5698     } else {
5699         HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5700             ofproto_dpif_self_check__(ofproto, &reply);
5701         }
5702     }
5703
5704     unixctl_command_reply(conn, ds_cstr(&reply));
5705     ds_destroy(&reply);
5706 }
5707
5708 /* Store the current ofprotos in 'ofproto_shash'.  Returns a sorted list
5709  * of the 'ofproto_shash' nodes.  It is the responsibility of the caller
5710  * to destroy 'ofproto_shash' and free the returned value. */
5711 static const struct shash_node **
5712 get_ofprotos(struct shash *ofproto_shash)
5713 {
5714     const struct ofproto_dpif *ofproto;
5715
5716     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5717         char *name = xasprintf("%s@%s", ofproto->up.type, ofproto->up.name);
5718         shash_add_nocopy(ofproto_shash, name, ofproto);
5719     }
5720
5721     return shash_sort(ofproto_shash);
5722 }
5723
5724 static void
5725 ofproto_unixctl_dpif_dump_dps(struct unixctl_conn *conn, int argc OVS_UNUSED,
5726                               const char *argv[] OVS_UNUSED,
5727                               void *aux OVS_UNUSED)
5728 {
5729     struct ds ds = DS_EMPTY_INITIALIZER;
5730     struct shash ofproto_shash;
5731     const struct shash_node **sorted_ofprotos;
5732     int i;
5733
5734     shash_init(&ofproto_shash);
5735     sorted_ofprotos = get_ofprotos(&ofproto_shash);
5736     for (i = 0; i < shash_count(&ofproto_shash); i++) {
5737         const struct shash_node *node = sorted_ofprotos[i];
5738         ds_put_format(&ds, "%s\n", node->name);
5739     }
5740
5741     shash_destroy(&ofproto_shash);
5742     free(sorted_ofprotos);
5743
5744     unixctl_command_reply(conn, ds_cstr(&ds));
5745     ds_destroy(&ds);
5746 }
5747
5748 static void
5749 dpif_show_backer(const struct dpif_backer *backer, struct ds *ds)
5750 {
5751     const struct shash_node **ofprotos;
5752     struct ofproto_dpif *ofproto;
5753     struct shash ofproto_shash;
5754     uint64_t n_hit, n_missed;
5755     size_t i;
5756
5757     n_hit = n_missed = 0;
5758     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5759         if (ofproto->backer == backer) {
5760             n_missed += ofproto->n_missed;
5761             n_hit += ofproto->n_hit;
5762         }
5763     }
5764
5765     ds_put_format(ds, "%s: hit:%"PRIu64" missed:%"PRIu64"\n",
5766                   dpif_name(backer->dpif), n_hit, n_missed);
5767     ds_put_format(ds, "\tflows: cur: %"PRIuSIZE", avg: %u, max: %u,"
5768                   " life span: %lldms\n", hmap_count(&backer->subfacets),
5769                   backer->avg_n_subfacet, backer->max_n_subfacet,
5770                   backer->avg_subfacet_life);
5771
5772     shash_init(&ofproto_shash);
5773     ofprotos = get_ofprotos(&ofproto_shash);
5774     for (i = 0; i < shash_count(&ofproto_shash); i++) {
5775         struct ofproto_dpif *ofproto = ofprotos[i]->data;
5776         const struct shash_node **ports;
5777         size_t j;
5778
5779         if (ofproto->backer != backer) {
5780             continue;
5781         }
5782
5783         ds_put_format(ds, "\t%s: hit:%"PRIu64" missed:%"PRIu64"\n",
5784                       ofproto->up.name, ofproto->n_hit, ofproto->n_missed);
5785
5786         ports = shash_sort(&ofproto->up.port_by_name);
5787         for (j = 0; j < shash_count(&ofproto->up.port_by_name); j++) {
5788             const struct shash_node *node = ports[j];
5789             struct ofport *ofport = node->data;
5790             struct smap config;
5791             odp_port_t odp_port;
5792
5793             ds_put_format(ds, "\t\t%s %u/", netdev_get_name(ofport->netdev),
5794                           ofport->ofp_port);
5795
5796             odp_port = ofp_port_to_odp_port(ofproto, ofport->ofp_port);
5797             if (odp_port != ODPP_NONE) {
5798                 ds_put_format(ds, "%"PRIu32":", odp_port);
5799             } else {
5800                 ds_put_cstr(ds, "none:");
5801             }
5802
5803             ds_put_format(ds, " (%s", netdev_get_type(ofport->netdev));
5804
5805             smap_init(&config);
5806             if (!netdev_get_config(ofport->netdev, &config)) {
5807                 const struct smap_node **nodes;
5808                 size_t i;
5809
5810                 nodes = smap_sort(&config);
5811                 for (i = 0; i < smap_count(&config); i++) {
5812                     const struct smap_node *node = nodes[i];
5813                     ds_put_format(ds, "%c %s=%s", i ? ',' : ':',
5814                                   node->key, node->value);
5815                 }
5816                 free(nodes);
5817             }
5818             smap_destroy(&config);
5819
5820             ds_put_char(ds, ')');
5821             ds_put_char(ds, '\n');
5822         }
5823         free(ports);
5824     }
5825     shash_destroy(&ofproto_shash);
5826     free(ofprotos);
5827 }
5828
5829 static void
5830 ofproto_unixctl_dpif_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
5831                           const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
5832 {
5833     struct ds ds = DS_EMPTY_INITIALIZER;
5834     const struct shash_node **backers;
5835     int i;
5836
5837     backers = shash_sort(&all_dpif_backers);
5838     for (i = 0; i < shash_count(&all_dpif_backers); i++) {
5839         dpif_show_backer(backers[i]->data, &ds);
5840     }
5841     free(backers);
5842
5843     unixctl_command_reply(conn, ds_cstr(&ds));
5844     ds_destroy(&ds);
5845 }
5846
5847 /* Dump the megaflow (facet) cache.  This is useful to check the
5848  * correctness of flow wildcarding, since the same mechanism is used for
5849  * both xlate caching and kernel wildcarding.
5850  *
5851  * It's important to note that in the output the flow description uses
5852  * OpenFlow (OFP) ports, but the actions use datapath (ODP) ports.
5853  *
5854  * This command is only needed for advanced debugging, so it's not
5855  * documented in the man page. */
5856 static void
5857 ofproto_unixctl_dpif_dump_megaflows(struct unixctl_conn *conn,
5858                                     int argc OVS_UNUSED, const char *argv[],
5859                                     void *aux OVS_UNUSED)
5860 {
5861     struct ds ds = DS_EMPTY_INITIALIZER;
5862     const struct ofproto_dpif *ofproto;
5863     long long int now = time_msec();
5864     struct cls_cursor cursor;
5865     struct facet *facet;
5866
5867     ofproto = ofproto_dpif_lookup(argv[1]);
5868     if (!ofproto) {
5869         unixctl_command_reply_error(conn, "no such bridge");
5870         return;
5871     }
5872
5873     ovs_rwlock_rdlock(&ofproto->facets.rwlock);
5874     cls_cursor_init(&cursor, &ofproto->facets, NULL);
5875     CLS_CURSOR_FOR_EACH (facet, cr, &cursor) {
5876         cls_rule_format(&facet->cr, &ds);
5877         ds_put_cstr(&ds, ", ");
5878         ds_put_format(&ds, "n_subfacets:%"PRIuSIZE", ", list_size(&facet->subfacets));
5879         ds_put_format(&ds, "used:%.3fs, ", (now - facet->used) / 1000.0);
5880         ds_put_cstr(&ds, "Datapath actions: ");
5881         if (facet->xout.slow) {
5882             uint64_t slow_path_stub[128 / 8];
5883             const struct nlattr *actions;
5884             size_t actions_len;
5885
5886             compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
5887                               slow_path_stub, sizeof slow_path_stub,
5888                               &actions, &actions_len);
5889             format_odp_actions(&ds, actions, actions_len);
5890         } else {
5891             format_odp_actions(&ds, facet->xout.odp_actions.data,
5892                                facet->xout.odp_actions.size);
5893         }
5894         ds_put_cstr(&ds, "\n");
5895     }
5896     ovs_rwlock_unlock(&ofproto->facets.rwlock);
5897
5898     ds_chomp(&ds, '\n');
5899     unixctl_command_reply(conn, ds_cstr(&ds));
5900     ds_destroy(&ds);
5901 }
5902
5903 /* Disable using the megaflows.
5904  *
5905  * This command is only needed for advanced debugging, so it's not
5906  * documented in the man page. */
5907 static void
5908 ofproto_unixctl_dpif_disable_megaflows(struct unixctl_conn *conn,
5909                                        int argc OVS_UNUSED,
5910                                        const char *argv[] OVS_UNUSED,
5911                                        void *aux OVS_UNUSED)
5912 {
5913     struct ofproto_dpif *ofproto;
5914
5915     enable_megaflows = false;
5916
5917     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5918         flush(&ofproto->up);
5919     }
5920
5921     unixctl_command_reply(conn, "megaflows disabled");
5922 }
5923
5924 /* Re-enable using megaflows.
5925  *
5926  * This command is only needed for advanced debugging, so it's not
5927  * documented in the man page. */
5928 static void
5929 ofproto_unixctl_dpif_enable_megaflows(struct unixctl_conn *conn,
5930                                       int argc OVS_UNUSED,
5931                                       const char *argv[] OVS_UNUSED,
5932                                       void *aux OVS_UNUSED)
5933 {
5934     struct ofproto_dpif *ofproto;
5935
5936     enable_megaflows = true;
5937
5938     HMAP_FOR_EACH (ofproto, all_ofproto_dpifs_node, &all_ofproto_dpifs) {
5939         flush(&ofproto->up);
5940     }
5941
5942     unixctl_command_reply(conn, "megaflows enabled");
5943 }
5944
5945 static void
5946 ofproto_unixctl_dpif_dump_flows(struct unixctl_conn *conn,
5947                                 int argc OVS_UNUSED, const char *argv[],
5948                                 void *aux OVS_UNUSED)
5949 {
5950     struct ds ds = DS_EMPTY_INITIALIZER;
5951     const struct ofproto_dpif *ofproto;
5952     struct subfacet *subfacet;
5953
5954     ofproto = ofproto_dpif_lookup(argv[1]);
5955     if (!ofproto) {
5956         unixctl_command_reply_error(conn, "no such bridge");
5957         return;
5958     }
5959
5960     update_stats(ofproto->backer);
5961
5962     HMAP_FOR_EACH (subfacet, hmap_node, &ofproto->backer->subfacets) {
5963         struct facet *facet = subfacet->facet;
5964         struct odputil_keybuf maskbuf;
5965         struct ofpbuf mask;
5966
5967         if (facet->ofproto != ofproto) {
5968             continue;
5969         }
5970
5971         ofpbuf_use_stack(&mask, &maskbuf, sizeof maskbuf);
5972         if (enable_megaflows) {
5973             odp_flow_key_from_mask(&mask, &facet->xout.wc.masks,
5974                                    &facet->flow, UINT32_MAX);
5975         }
5976
5977         odp_flow_format(subfacet->key, subfacet->key_len,
5978                         mask.data, mask.size, NULL, &ds, false);
5979
5980         ds_put_format(&ds, ", packets:%"PRIu64", bytes:%"PRIu64", used:",
5981                       subfacet->dp_packet_count, subfacet->dp_byte_count);
5982         if (subfacet->used) {
5983             ds_put_format(&ds, "%.3fs",
5984                           (time_msec() - subfacet->used) / 1000.0);
5985         } else {
5986             ds_put_format(&ds, "never");
5987         }
5988         if (subfacet->facet->tcp_flags) {
5989             ds_put_cstr(&ds, ", flags:");
5990             packet_format_tcp_flags(&ds, subfacet->facet->tcp_flags);
5991         }
5992
5993         ds_put_cstr(&ds, ", actions:");
5994         if (facet->xout.slow) {
5995             uint64_t slow_path_stub[128 / 8];
5996             const struct nlattr *actions;
5997             size_t actions_len;
5998
5999             compose_slow_path(ofproto, &facet->flow, facet->xout.slow,
6000                               slow_path_stub, sizeof slow_path_stub,
6001                               &actions, &actions_len);
6002             format_odp_actions(&ds, actions, actions_len);
6003         } else {
6004             format_odp_actions(&ds, facet->xout.odp_actions.data,
6005                                facet->xout.odp_actions.size);
6006         }
6007         ds_put_char(&ds, '\n');
6008     }
6009
6010     unixctl_command_reply(conn, ds_cstr(&ds));
6011     ds_destroy(&ds);
6012 }
6013
6014 static void
6015 ofproto_unixctl_dpif_del_flows(struct unixctl_conn *conn,
6016                                int argc OVS_UNUSED, const char *argv[],
6017                                void *aux OVS_UNUSED)
6018 {
6019     struct ds ds = DS_EMPTY_INITIALIZER;
6020     struct ofproto_dpif *ofproto;
6021
6022     ofproto = ofproto_dpif_lookup(argv[1]);
6023     if (!ofproto) {
6024         unixctl_command_reply_error(conn, "no such bridge");
6025         return;
6026     }
6027
6028     flush(&ofproto->up);
6029
6030     unixctl_command_reply(conn, ds_cstr(&ds));
6031     ds_destroy(&ds);
6032 }
6033
6034 static void
6035 ofproto_dpif_unixctl_init(void)
6036 {
6037     static bool registered;
6038     if (registered) {
6039         return;
6040     }
6041     registered = true;
6042
6043     unixctl_command_register(
6044         "ofproto/trace",
6045         "{[dp_name] odp_flow | bridge br_flow} [-generate|packet]",
6046         1, 3, ofproto_unixctl_trace, NULL);
6047     unixctl_command_register(
6048         "ofproto/trace-packet-out",
6049         "[-consistent] {[dp_name] odp_flow | bridge br_flow} [-generate|packet] actions",
6050         2, 6, ofproto_unixctl_trace_actions, NULL);
6051     unixctl_command_register("fdb/flush", "[bridge]", 0, 1,
6052                              ofproto_unixctl_fdb_flush, NULL);
6053     unixctl_command_register("fdb/show", "bridge", 1, 1,
6054                              ofproto_unixctl_fdb_show, NULL);
6055     unixctl_command_register("ofproto/self-check", "[bridge]", 0, 1,
6056                              ofproto_dpif_self_check, NULL);
6057     unixctl_command_register("dpif/dump-dps", "", 0, 0,
6058                              ofproto_unixctl_dpif_dump_dps, NULL);
6059     unixctl_command_register("dpif/show", "", 0, 0, ofproto_unixctl_dpif_show,
6060                              NULL);
6061     unixctl_command_register("dpif/dump-flows", "bridge", 1, 1,
6062                              ofproto_unixctl_dpif_dump_flows, NULL);
6063     unixctl_command_register("dpif/del-flows", "bridge", 1, 1,
6064                              ofproto_unixctl_dpif_del_flows, NULL);
6065     unixctl_command_register("dpif/dump-megaflows", "bridge", 1, 1,
6066                              ofproto_unixctl_dpif_dump_megaflows, NULL);
6067     unixctl_command_register("dpif/disable-megaflows", "", 0, 0,
6068                              ofproto_unixctl_dpif_disable_megaflows, NULL);
6069     unixctl_command_register("dpif/enable-megaflows", "", 0, 0,
6070                              ofproto_unixctl_dpif_enable_megaflows, NULL);
6071 }
6072 \f
6073 /* Linux VLAN device support (e.g. "eth0.10" for VLAN 10.)
6074  *
6075  * This is deprecated.  It is only for compatibility with broken device drivers
6076  * in old versions of Linux that do not properly support VLANs when VLAN
6077  * devices are not used.  When broken device drivers are no longer in
6078  * widespread use, we will delete these interfaces. */
6079
6080 static int
6081 set_realdev(struct ofport *ofport_, ofp_port_t realdev_ofp_port, int vid)
6082 {
6083     struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofport_->ofproto);
6084     struct ofport_dpif *ofport = ofport_dpif_cast(ofport_);
6085
6086     if (realdev_ofp_port == ofport->realdev_ofp_port
6087         && vid == ofport->vlandev_vid) {
6088         return 0;
6089     }
6090
6091     ofproto->backer->need_revalidate = REV_RECONFIGURE;
6092
6093     if (ofport->realdev_ofp_port) {
6094         vsp_remove(ofport);
6095     }
6096     if (realdev_ofp_port && ofport->bundle) {
6097         /* vlandevs are enslaved to their realdevs, so they are not allowed to
6098          * themselves be part of a bundle. */
6099         bundle_set(ofport->up.ofproto, ofport->bundle, NULL);
6100     }
6101
6102     ofport->realdev_ofp_port = realdev_ofp_port;
6103     ofport->vlandev_vid = vid;
6104
6105     if (realdev_ofp_port) {
6106         vsp_add(ofport, realdev_ofp_port, vid);
6107     }
6108
6109     return 0;
6110 }
6111
6112 static uint32_t
6113 hash_realdev_vid(ofp_port_t realdev_ofp_port, int vid)
6114 {
6115     return hash_2words(ofp_to_u16(realdev_ofp_port), vid);
6116 }
6117
6118 bool
6119 ofproto_has_vlan_splinters(const struct ofproto_dpif *ofproto)
6120     OVS_EXCLUDED(ofproto->vsp_mutex)
6121 {
6122     bool ret;
6123
6124     ovs_mutex_lock(&ofproto->vsp_mutex);
6125     ret = !hmap_is_empty(&ofproto->realdev_vid_map);
6126     ovs_mutex_unlock(&ofproto->vsp_mutex);
6127     return ret;
6128 }
6129
6130 static ofp_port_t
6131 vsp_realdev_to_vlandev__(const struct ofproto_dpif *ofproto,
6132                          ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
6133     OVS_REQUIRES(ofproto->vsp_mutex)
6134 {
6135     if (!hmap_is_empty(&ofproto->realdev_vid_map)) {
6136         int vid = vlan_tci_to_vid(vlan_tci);
6137         const struct vlan_splinter *vsp;
6138
6139         HMAP_FOR_EACH_WITH_HASH (vsp, realdev_vid_node,
6140                                  hash_realdev_vid(realdev_ofp_port, vid),
6141                                  &ofproto->realdev_vid_map) {
6142             if (vsp->realdev_ofp_port == realdev_ofp_port
6143                 && vsp->vid == vid) {
6144                 return vsp->vlandev_ofp_port;
6145             }
6146         }
6147     }
6148     return realdev_ofp_port;
6149 }
6150
6151 /* Returns the OFP port number of the Linux VLAN device that corresponds to
6152  * 'vlan_tci' on the network device with port number 'realdev_ofp_port' in
6153  * 'struct ofport_dpif'.  For example, given 'realdev_ofp_port' of eth0 and
6154  * 'vlan_tci' 9, it would return the port number of eth0.9.
6155  *
6156  * Unless VLAN splinters are enabled for port 'realdev_ofp_port', this
6157  * function just returns its 'realdev_ofp_port' argument. */
6158 ofp_port_t
6159 vsp_realdev_to_vlandev(const struct ofproto_dpif *ofproto,
6160                        ofp_port_t realdev_ofp_port, ovs_be16 vlan_tci)
6161     OVS_EXCLUDED(ofproto->vsp_mutex)
6162 {
6163     ofp_port_t ret;
6164
6165     ovs_mutex_lock(&ofproto->vsp_mutex);
6166     ret = vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, vlan_tci);
6167     ovs_mutex_unlock(&ofproto->vsp_mutex);
6168     return ret;
6169 }
6170
6171 static struct vlan_splinter *
6172 vlandev_find(const struct ofproto_dpif *ofproto, ofp_port_t vlandev_ofp_port)
6173 {
6174     struct vlan_splinter *vsp;
6175
6176     HMAP_FOR_EACH_WITH_HASH (vsp, vlandev_node,
6177                              hash_ofp_port(vlandev_ofp_port),
6178                              &ofproto->vlandev_map) {
6179         if (vsp->vlandev_ofp_port == vlandev_ofp_port) {
6180             return vsp;
6181         }
6182     }
6183
6184     return NULL;
6185 }
6186
6187 /* Returns the OpenFlow port number of the "real" device underlying the Linux
6188  * VLAN device with OpenFlow port number 'vlandev_ofp_port' and stores the
6189  * VLAN VID of the Linux VLAN device in '*vid'.  For example, given
6190  * 'vlandev_ofp_port' of eth0.9, it would return the OpenFlow port number of
6191  * eth0 and store 9 in '*vid'.
6192  *
6193  * Returns 0 and does not modify '*vid' if 'vlandev_ofp_port' is not a Linux
6194  * VLAN device.  Unless VLAN splinters are enabled, this is what this function
6195  * always does.*/
6196 static ofp_port_t
6197 vsp_vlandev_to_realdev(const struct ofproto_dpif *ofproto,
6198                        ofp_port_t vlandev_ofp_port, int *vid)
6199     OVS_REQUIRES(ofproto->vsp_mutex)
6200 {
6201     if (!hmap_is_empty(&ofproto->vlandev_map)) {
6202         const struct vlan_splinter *vsp;
6203
6204         vsp = vlandev_find(ofproto, vlandev_ofp_port);
6205         if (vsp) {
6206             if (vid) {
6207                 *vid = vsp->vid;
6208             }
6209             return vsp->realdev_ofp_port;
6210         }
6211     }
6212     return 0;
6213 }
6214
6215 /* Given 'flow', a flow representing a packet received on 'ofproto', checks
6216  * whether 'flow->in_port' represents a Linux VLAN device.  If so, changes
6217  * 'flow->in_port' to the "real" device backing the VLAN device, sets
6218  * 'flow->vlan_tci' to the VLAN VID, and returns true.  Otherwise (which is
6219  * always the case unless VLAN splinters are enabled), returns false without
6220  * making any changes. */
6221 bool
6222 vsp_adjust_flow(const struct ofproto_dpif *ofproto, struct flow *flow)
6223     OVS_EXCLUDED(ofproto->vsp_mutex)
6224 {
6225     ofp_port_t realdev;
6226     int vid;
6227
6228     ovs_mutex_lock(&ofproto->vsp_mutex);
6229     realdev = vsp_vlandev_to_realdev(ofproto, flow->in_port.ofp_port, &vid);
6230     ovs_mutex_unlock(&ofproto->vsp_mutex);
6231     if (!realdev) {
6232         return false;
6233     }
6234
6235     /* Cause the flow to be processed as if it came in on the real device with
6236      * the VLAN device's VLAN ID. */
6237     flow->in_port.ofp_port = realdev;
6238     flow->vlan_tci = htons((vid & VLAN_VID_MASK) | VLAN_CFI);
6239     return true;
6240 }
6241
6242 static void
6243 vsp_remove(struct ofport_dpif *port)
6244 {
6245     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6246     struct vlan_splinter *vsp;
6247
6248     ovs_mutex_lock(&ofproto->vsp_mutex);
6249     vsp = vlandev_find(ofproto, port->up.ofp_port);
6250     if (vsp) {
6251         hmap_remove(&ofproto->vlandev_map, &vsp->vlandev_node);
6252         hmap_remove(&ofproto->realdev_vid_map, &vsp->realdev_vid_node);
6253         free(vsp);
6254
6255         port->realdev_ofp_port = 0;
6256     } else {
6257         VLOG_ERR("missing vlan device record");
6258     }
6259     ovs_mutex_unlock(&ofproto->vsp_mutex);
6260 }
6261
6262 static void
6263 vsp_add(struct ofport_dpif *port, ofp_port_t realdev_ofp_port, int vid)
6264 {
6265     struct ofproto_dpif *ofproto = ofproto_dpif_cast(port->up.ofproto);
6266
6267     ovs_mutex_lock(&ofproto->vsp_mutex);
6268     if (!vsp_vlandev_to_realdev(ofproto, port->up.ofp_port, NULL)
6269         && (vsp_realdev_to_vlandev__(ofproto, realdev_ofp_port, htons(vid))
6270             == realdev_ofp_port)) {
6271         struct vlan_splinter *vsp;
6272
6273         vsp = xmalloc(sizeof *vsp);
6274         vsp->realdev_ofp_port = realdev_ofp_port;
6275         vsp->vlandev_ofp_port = port->up.ofp_port;
6276         vsp->vid = vid;
6277
6278         port->realdev_ofp_port = realdev_ofp_port;
6279
6280         hmap_insert(&ofproto->vlandev_map, &vsp->vlandev_node,
6281                     hash_ofp_port(port->up.ofp_port));
6282         hmap_insert(&ofproto->realdev_vid_map, &vsp->realdev_vid_node,
6283                     hash_realdev_vid(realdev_ofp_port, vid));
6284     } else {
6285         VLOG_ERR("duplicate vlan device record");
6286     }
6287     ovs_mutex_unlock(&ofproto->vsp_mutex);
6288 }
6289
6290 static odp_port_t
6291 ofp_port_to_odp_port(const struct ofproto_dpif *ofproto, ofp_port_t ofp_port)
6292 {
6293     const struct ofport_dpif *ofport = get_ofp_port(ofproto, ofp_port);
6294     return ofport ? ofport->odp_port : ODPP_NONE;
6295 }
6296
6297 struct ofport_dpif *
6298 odp_port_to_ofport(const struct dpif_backer *backer, odp_port_t odp_port)
6299 {
6300     struct ofport_dpif *port;
6301
6302     ovs_rwlock_rdlock(&backer->odp_to_ofport_lock);
6303     HMAP_FOR_EACH_IN_BUCKET (port, odp_port_node, hash_odp_port(odp_port),
6304                              &backer->odp_to_ofport_map) {
6305         if (port->odp_port == odp_port) {
6306             ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
6307             return port;
6308         }
6309     }
6310
6311     ovs_rwlock_unlock(&backer->odp_to_ofport_lock);
6312     return NULL;
6313 }
6314
6315 static ofp_port_t
6316 odp_port_to_ofp_port(const struct ofproto_dpif *ofproto, odp_port_t odp_port)
6317 {
6318     struct ofport_dpif *port;
6319
6320     port = odp_port_to_ofport(ofproto->backer, odp_port);
6321     if (port && &ofproto->up == port->up.ofproto) {
6322         return port->up.ofp_port;
6323     } else {
6324         return OFPP_NONE;
6325     }
6326 }
6327
6328 const struct ofproto_class ofproto_dpif_class = {
6329     init,
6330     enumerate_types,
6331     enumerate_names,
6332     del,
6333     port_open_type,
6334     type_run,
6335     type_run_fast,
6336     type_wait,
6337     alloc,
6338     construct,
6339     destruct,
6340     dealloc,
6341     run,
6342     run_fast,
6343     wait,
6344     get_memory_usage,
6345     flush,
6346     get_features,
6347     get_tables,
6348     port_alloc,
6349     port_construct,
6350     port_destruct,
6351     port_dealloc,
6352     port_modified,
6353     port_reconfigured,
6354     port_query_by_name,
6355     port_add,
6356     port_del,
6357     port_get_stats,
6358     port_dump_start,
6359     port_dump_next,
6360     port_dump_done,
6361     port_poll,
6362     port_poll_wait,
6363     port_is_lacp_current,
6364     NULL,                       /* rule_choose_table */
6365     rule_alloc,
6366     rule_construct,
6367     rule_insert,
6368     rule_delete,
6369     rule_destruct,
6370     rule_dealloc,
6371     rule_get_stats,
6372     rule_execute,
6373     rule_modify_actions,
6374     set_frag_handling,
6375     packet_out,
6376     set_netflow,
6377     get_netflow_ids,
6378     set_sflow,
6379     set_ipfix,
6380     set_cfm,
6381     get_cfm_status,
6382     set_bfd,
6383     get_bfd_status,
6384     set_stp,
6385     get_stp_status,
6386     set_stp_port,
6387     get_stp_port_status,
6388     get_stp_port_stats,
6389     set_queues,
6390     bundle_set,
6391     bundle_remove,
6392     mirror_set__,
6393     mirror_get_stats__,
6394     set_flood_vlans,
6395     is_mirror_output_bundle,
6396     forward_bpdu_changed,
6397     set_mac_table_config,
6398     set_realdev,
6399     NULL,                       /* meter_get_features */
6400     NULL,                       /* meter_set */
6401     NULL,                       /* meter_get */
6402     NULL,                       /* meter_del */
6403     group_alloc,                /* group_alloc */
6404     group_construct,            /* group_construct */
6405     group_destruct,             /* group_destruct */
6406     group_dealloc,              /* group_dealloc */
6407     group_modify,               /* group_modify */
6408     group_get_stats,            /* group_get_stats */
6409 };