2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include "ofp-actions.h"
29 #include "ofproto/ofproto-provider.h"
30 #include "ofproto/ofproto-dpif.h"
31 #include "connectivity.h"
33 #include "dynamic-string.h"
42 #include "poll-loop.h"
50 VLOG_DEFINE_THIS_MODULE(bond);
52 static struct ovs_rwlock rwlock = OVS_RWLOCK_INITIALIZER;
53 static struct hmap all_bonds__ = HMAP_INITIALIZER(&all_bonds__);
54 static struct hmap *const all_bonds OVS_GUARDED_BY(rwlock) = &all_bonds__;
56 /* Bit-mask for hashing a flow down to a bucket. */
57 #define BOND_MASK 0xff
58 #define BOND_BUCKETS (BOND_MASK + 1)
59 #define RECIRC_RULE_PRIORITY 20 /* Priority level for internal rules */
61 /* A hash bucket for mapping a flow to a slave.
62 * "struct bond" has an array of BOND_BUCKETS of these. */
64 struct bond_slave *slave; /* Assigned slave, NULL if unassigned. */
65 uint64_t tx_bytes /* Count of bytes recently transmitted. */
66 OVS_GUARDED_BY(rwlock);
67 struct list list_node; /* In bond_slave's 'entries' list. */
71 * 'pr_rule' is the post-recirculation rule for this entry.
72 * 'pr_tx_bytes' is the most recently seen statistics for 'pr_rule', which
73 * is used to determine delta (applied to 'tx_bytes' above.) */
75 uint64_t pr_tx_bytes OVS_GUARDED_BY(rwlock);
78 /* A bond slave, that is, one of the links comprising a bond. */
80 struct hmap_node hmap_node; /* In struct bond's slaves hmap. */
81 struct list list_node; /* In struct bond's enabled_slaves list. */
82 struct bond *bond; /* The bond that contains this slave. */
83 void *aux; /* Client-provided handle for this slave. */
85 struct netdev *netdev; /* Network device, owned by the client. */
86 unsigned int change_seq; /* Tracks changes in 'netdev'. */
87 ofp_port_t ofp_port; /* Open flow port number */
88 char *name; /* Name (a copy of netdev_get_name(netdev)). */
91 long long delay_expires; /* Time after which 'enabled' may change. */
92 bool enabled; /* May be chosen for flows? */
93 bool may_enable; /* Client considers this slave bondable. */
95 /* Rebalancing info. Used only by bond_rebalance(). */
96 struct list bal_node; /* In bond_rebalance()'s 'bals' list. */
97 struct list entries; /* 'struct bond_entry's assigned here. */
98 uint64_t tx_bytes; /* Sum across 'tx_bytes' of entries. */
101 /* A bond, that is, a set of network devices grouped to improve performance or
104 struct hmap_node hmap_node; /* In 'all_bonds' hmap. */
105 char *name; /* Name provided by client. */
106 struct ofproto_dpif *ofproto; /* The bridge this bond belongs to. */
113 * Any reader or writer of 'enabled_slaves' must hold 'mutex'.
114 * (To prevent the bond_slave from disappearing they must also hold
116 struct ovs_mutex mutex OVS_ACQ_AFTER(rwlock);
117 struct list enabled_slaves OVS_GUARDED; /* Contains struct bond_slaves. */
120 enum bond_mode balance; /* Balancing mode, one of BM_*. */
121 struct bond_slave *active_slave;
122 int updelay, downdelay; /* Delay before slave goes up/down, in ms. */
123 enum lacp_status lacp_status; /* Status of LACP negotiations. */
124 bool bond_revalidate; /* True if flows need revalidation. */
125 uint32_t basis; /* Basis for flow hash function. */
127 /* SLB specific bonding info. */
128 struct bond_entry *hash; /* An array of BOND_BUCKETS elements. */
129 int rebalance_interval; /* Interval between rebalances, in ms. */
130 long long int next_rebalance; /* Next rebalancing time. */
131 bool send_learning_packets;
132 uint32_t recirc_id; /* Non zero if recirculation can be used.*/
133 struct hmap pr_rule_ops; /* Helps to maintain post recirculation rules.*/
135 /* Legacy compatibility. */
136 long long int next_fake_iface_update; /* LLONG_MAX if disabled. */
137 bool lacp_fallback_ab; /* Fallback to active-backup on LACP failure. */
139 struct ovs_refcount ref_cnt;
142 /* What to do with an bond_recirc_rule. */
144 ADD, /* Add the rule to ofproto's flow table. */
145 DEL, /* Delete the rule from the ofproto's flow table. */
148 /* A rule to add to or delete from ofproto's internal flow table. */
149 struct bond_pr_rule_op {
150 struct hmap_node hmap_node;
152 ofp_port_t out_ofport;
154 struct rule *pr_rule;
157 static void bond_entry_reset(struct bond *) OVS_REQ_WRLOCK(rwlock);
158 static struct bond_slave *bond_slave_lookup(struct bond *, const void *slave_)
159 OVS_REQ_RDLOCK(rwlock);
160 static void bond_enable_slave(struct bond_slave *, bool enable)
161 OVS_REQ_WRLOCK(rwlock);
162 static void bond_link_status_update(struct bond_slave *)
163 OVS_REQ_WRLOCK(rwlock);
164 static void bond_choose_active_slave(struct bond *)
165 OVS_REQ_WRLOCK(rwlock);
166 static unsigned int bond_hash_src(const uint8_t mac[ETH_ADDR_LEN],
167 uint16_t vlan, uint32_t basis);
168 static unsigned int bond_hash_tcp(const struct flow *, uint16_t vlan,
170 static struct bond_entry *lookup_bond_entry(const struct bond *,
173 OVS_REQ_RDLOCK(rwlock);
174 static struct bond_slave *get_enabled_slave(struct bond *)
175 OVS_REQ_RDLOCK(rwlock);
176 static struct bond_slave *choose_output_slave(const struct bond *,
178 struct flow_wildcards *,
180 OVS_REQ_RDLOCK(rwlock);
181 static void bond_update_fake_slave_stats(struct bond *)
182 OVS_REQ_RDLOCK(rwlock);
184 /* Attempts to parse 's' as the name of a bond balancing mode. If successful,
185 * stores the mode in '*balance' and returns true. Otherwise returns false
186 * without modifying '*balance'. */
188 bond_mode_from_string(enum bond_mode *balance, const char *s)
190 if (!strcmp(s, bond_mode_to_string(BM_TCP))) {
192 } else if (!strcmp(s, bond_mode_to_string(BM_SLB))) {
194 } else if (!strcmp(s, bond_mode_to_string(BM_AB))) {
202 /* Returns a string representing 'balance'. */
204 bond_mode_to_string(enum bond_mode balance) {
207 return "balance-tcp";
209 return "balance-slb";
211 return "active-backup";
217 /* Creates and returns a new bond whose configuration is initially taken from
220 * The caller should register each slave on the new bond by calling
221 * bond_slave_register(). */
223 bond_create(const struct bond_settings *s, struct ofproto_dpif *ofproto)
227 bond = xzalloc(sizeof *bond);
228 bond->ofproto = ofproto;
229 hmap_init(&bond->slaves);
230 list_init(&bond->enabled_slaves);
231 ovs_mutex_init(&bond->mutex);
232 bond->next_fake_iface_update = LLONG_MAX;
233 ovs_refcount_init(&bond->ref_cnt);
236 hmap_init(&bond->pr_rule_ops);
238 bond_reconfigure(bond, s);
243 bond_ref(const struct bond *bond_)
245 struct bond *bond = CONST_CAST(struct bond *, bond_);
248 ovs_refcount_ref(&bond->ref_cnt);
255 bond_unref(struct bond *bond)
257 struct bond_slave *slave, *next_slave;
258 struct bond_pr_rule_op *pr_op, *next_op;
260 if (!bond || ovs_refcount_unref(&bond->ref_cnt) != 1) {
264 ovs_rwlock_wrlock(&rwlock);
265 hmap_remove(all_bonds, &bond->hmap_node);
266 ovs_rwlock_unlock(&rwlock);
268 HMAP_FOR_EACH_SAFE (slave, next_slave, hmap_node, &bond->slaves) {
269 hmap_remove(&bond->slaves, &slave->hmap_node);
270 /* Client owns 'slave->netdev'. */
274 hmap_destroy(&bond->slaves);
276 ovs_mutex_destroy(&bond->mutex);
280 HMAP_FOR_EACH_SAFE(pr_op, next_op, hmap_node, &bond->pr_rule_ops) {
281 hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node);
284 hmap_destroy(&bond->pr_rule_ops);
286 if (bond->recirc_id) {
287 ofproto_dpif_free_recirc_id(bond->ofproto, bond->recirc_id);
294 add_pr_rule(struct bond *bond, const struct match *match,
295 ofp_port_t out_ofport, struct rule *rule)
297 uint32_t hash = match_hash(match, 0);
298 struct bond_pr_rule_op *pr_op;
300 HMAP_FOR_EACH_WITH_HASH(pr_op, hmap_node, hash, &bond->pr_rule_ops) {
301 if (match_equal(&pr_op->match, match)) {
303 pr_op->out_ofport = out_ofport;
304 pr_op->pr_rule = rule;
309 pr_op = xmalloc(sizeof *pr_op);
310 pr_op->match = *match;
312 pr_op->out_ofport = out_ofport;
313 pr_op->pr_rule = rule;
314 hmap_insert(&bond->pr_rule_ops, &pr_op->hmap_node, hash);
318 update_recirc_rules(struct bond *bond)
321 struct bond_pr_rule_op *pr_op, *next_op;
322 uint64_t ofpacts_stub[128 / 8];
323 struct ofpbuf ofpacts;
326 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
328 HMAP_FOR_EACH(pr_op, hmap_node, &bond->pr_rule_ops) {
332 if ((bond->hash == NULL) || (!bond->recirc_id)) {
336 for (i = 0; i < BOND_BUCKETS; i++) {
337 struct bond_slave *slave = bond->hash[i].slave;
340 match_init_catchall(&match);
341 match_set_recirc_id(&match, bond->recirc_id);
342 /* recirc_id -> metadata to speed up look ups. */
343 match_set_metadata(&match, htonll(bond->recirc_id));
344 match_set_dp_hash_masked(&match, i, BOND_MASK);
346 add_pr_rule(bond, &match, slave->ofp_port,
347 bond->hash[i].pr_rule);
351 HMAP_FOR_EACH_SAFE(pr_op, next_op, hmap_node, &bond->pr_rule_ops) {
356 ofpbuf_clear(&ofpacts);
357 ofpact_put_OUTPUT(&ofpacts)->port = pr_op->out_ofport;
358 error = ofproto_dpif_add_internal_flow(bond->ofproto,
360 RECIRC_RULE_PRIORITY,
363 char *err_s = match_to_string(&pr_op->match,
364 RECIRC_RULE_PRIORITY);
366 VLOG_ERR("failed to add post recirculation flow %s", err_s);
368 pr_op->pr_rule = NULL;
370 pr_op->pr_rule = rule;
375 error = ofproto_dpif_delete_internal_flow(bond->ofproto,
377 RECIRC_RULE_PRIORITY);
379 char *err_s = match_to_string(&pr_op->match,
380 RECIRC_RULE_PRIORITY);
382 VLOG_ERR("failed to remove post recirculation flow %s", err_s);
386 hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node);
387 pr_op->pr_rule = NULL;
393 ofpbuf_uninit(&ofpacts);
397 /* Updates 'bond''s overall configuration to 's'.
399 * The caller should register each slave on 'bond' by calling
400 * bond_slave_register(). This is optional if none of the slaves'
401 * configuration has changed. In any case it can't hurt.
403 * Returns true if the configuration has changed in such a way that requires
407 bond_reconfigure(struct bond *bond, const struct bond_settings *s)
409 bool revalidate = false;
411 ovs_rwlock_wrlock(&rwlock);
412 if (!bond->name || strcmp(bond->name, s->name)) {
414 hmap_remove(all_bonds, &bond->hmap_node);
417 bond->name = xstrdup(s->name);
418 hmap_insert(all_bonds, &bond->hmap_node, hash_string(bond->name, 0));
421 bond->updelay = s->up_delay;
422 bond->downdelay = s->down_delay;
424 if (bond->lacp_fallback_ab != s->lacp_fallback_ab_cfg) {
425 bond->lacp_fallback_ab = s->lacp_fallback_ab_cfg;
429 if (bond->rebalance_interval != s->rebalance_interval) {
430 bond->rebalance_interval = s->rebalance_interval;
434 if (bond->balance != s->balance) {
435 bond->balance = s->balance;
439 if (bond->basis != s->basis) {
440 bond->basis = s->basis;
445 if (bond->next_fake_iface_update == LLONG_MAX) {
446 bond->next_fake_iface_update = time_msec();
449 bond->next_fake_iface_update = LLONG_MAX;
452 if (bond->bond_revalidate) {
454 bond->bond_revalidate = false;
457 if (bond->balance != BM_AB) {
458 if (!bond->recirc_id) {
459 bond->recirc_id = ofproto_dpif_alloc_recirc_id(bond->ofproto);
461 } else if (bond->recirc_id) {
462 ofproto_dpif_free_recirc_id(bond->ofproto, bond->recirc_id);
466 if (bond->balance == BM_AB || !bond->hash || revalidate) {
467 bond_entry_reset(bond);
470 ovs_rwlock_unlock(&rwlock);
475 bond_slave_set_netdev__(struct bond_slave *slave, struct netdev *netdev)
476 OVS_REQ_WRLOCK(rwlock)
478 if (slave->netdev != netdev) {
479 slave->netdev = netdev;
480 slave->change_seq = 0;
484 /* Registers 'slave_' as a slave of 'bond'. The 'slave_' pointer is an
485 * arbitrary client-provided pointer that uniquely identifies a slave within a
486 * bond. If 'slave_' already exists within 'bond' then this function
487 * reconfigures the existing slave.
489 * 'netdev' must be the network device that 'slave_' represents. It is owned
490 * by the client, so the client must not close it before either unregistering
491 * 'slave_' or destroying 'bond'.
494 bond_slave_register(struct bond *bond, void *slave_,
495 ofp_port_t ofport, struct netdev *netdev)
497 struct bond_slave *slave;
499 ovs_rwlock_wrlock(&rwlock);
500 slave = bond_slave_lookup(bond, slave_);
502 slave = xzalloc(sizeof *slave);
504 hmap_insert(&bond->slaves, &slave->hmap_node, hash_pointer(slave_, 0));
507 slave->ofp_port = ofport;
508 slave->delay_expires = LLONG_MAX;
509 slave->name = xstrdup(netdev_get_name(netdev));
510 bond->bond_revalidate = true;
512 slave->enabled = false;
513 bond_enable_slave(slave, netdev_get_carrier(netdev));
516 bond_slave_set_netdev__(slave, netdev);
519 slave->name = xstrdup(netdev_get_name(netdev));
520 ovs_rwlock_unlock(&rwlock);
523 /* Updates the network device to be used with 'slave_' to 'netdev'.
525 * This is useful if the caller closes and re-opens the network device
526 * registered with bond_slave_register() but doesn't need to change anything
529 bond_slave_set_netdev(struct bond *bond, void *slave_, struct netdev *netdev)
531 struct bond_slave *slave;
533 ovs_rwlock_wrlock(&rwlock);
534 slave = bond_slave_lookup(bond, slave_);
536 bond_slave_set_netdev__(slave, netdev);
538 ovs_rwlock_unlock(&rwlock);
541 /* Unregisters 'slave_' from 'bond'. If 'bond' does not contain such a slave
542 * then this function has no effect.
544 * Unregistering a slave invalidates all flows. */
546 bond_slave_unregister(struct bond *bond, const void *slave_)
548 struct bond_slave *slave;
551 ovs_rwlock_wrlock(&rwlock);
552 slave = bond_slave_lookup(bond, slave_);
557 bond->bond_revalidate = true;
558 bond_enable_slave(slave, false);
560 del_active = bond->active_slave == slave;
562 struct bond_entry *e;
563 for (e = bond->hash; e <= &bond->hash[BOND_MASK]; e++) {
564 if (e->slave == slave) {
572 hmap_remove(&bond->slaves, &slave->hmap_node);
573 /* Client owns 'slave->netdev'. */
577 bond_choose_active_slave(bond);
578 bond->send_learning_packets = true;
581 ovs_rwlock_unlock(&rwlock);
584 /* Should be called on each slave in 'bond' before bond_run() to indicate
585 * whether or not 'slave_' may be enabled. This function is intended to allow
586 * other protocols to have some impact on bonding decisions. For example LACP
587 * or high level link monitoring protocols may decide that a given slave should
588 * not be able to send traffic. */
590 bond_slave_set_may_enable(struct bond *bond, void *slave_, bool may_enable)
592 ovs_rwlock_wrlock(&rwlock);
593 bond_slave_lookup(bond, slave_)->may_enable = may_enable;
594 ovs_rwlock_unlock(&rwlock);
597 /* Performs periodic maintenance on 'bond'.
599 * Returns true if the caller should revalidate its flows.
601 * The caller should check bond_should_send_learning_packets() afterward. */
603 bond_run(struct bond *bond, enum lacp_status lacp_status)
605 struct bond_slave *slave;
608 ovs_rwlock_wrlock(&rwlock);
609 if (bond->lacp_status != lacp_status) {
610 bond->lacp_status = lacp_status;
611 bond->bond_revalidate = true;
614 /* Enable slaves based on link status and LACP feedback. */
615 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
616 bond_link_status_update(slave);
617 slave->change_seq = seq_read(connectivity_seq_get());
619 if (!bond->active_slave || !bond->active_slave->enabled) {
620 bond_choose_active_slave(bond);
623 /* Update fake bond interface stats. */
624 if (time_msec() >= bond->next_fake_iface_update) {
625 bond_update_fake_slave_stats(bond);
626 bond->next_fake_iface_update = time_msec() + 1000;
629 revalidate = bond->bond_revalidate;
630 bond->bond_revalidate = false;
631 ovs_rwlock_unlock(&rwlock);
636 /* Causes poll_block() to wake up when 'bond' needs something to be done. */
638 bond_wait(struct bond *bond)
640 struct bond_slave *slave;
642 ovs_rwlock_rdlock(&rwlock);
643 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
644 if (slave->delay_expires != LLONG_MAX) {
645 poll_timer_wait_until(slave->delay_expires);
648 seq_wait(connectivity_seq_get(), slave->change_seq);
651 if (bond->next_fake_iface_update != LLONG_MAX) {
652 poll_timer_wait_until(bond->next_fake_iface_update);
655 if (bond->bond_revalidate) {
656 poll_immediate_wake();
658 ovs_rwlock_unlock(&rwlock);
660 /* We don't wait for bond->next_rebalance because rebalancing can only run
661 * at a flow account checkpoint. ofproto does checkpointing on its own
662 * schedule and bond_rebalance() gets called afterward, so we'd just be
663 * waking up for no purpose. */
666 /* MAC learning table interaction. */
669 may_send_learning_packets(const struct bond *bond)
671 return ((bond->lacp_status == LACP_DISABLED
672 && (bond->balance == BM_SLB || bond->balance == BM_AB))
673 || (bond->lacp_fallback_ab && bond->lacp_status == LACP_CONFIGURED))
674 && bond->active_slave;
677 /* Returns true if 'bond' needs the client to send out packets to assist with
678 * MAC learning on 'bond'. If this function returns true, then the client
679 * should iterate through its MAC learning table for the bridge on which 'bond'
680 * is located. For each MAC that has been learned on a port other than 'bond',
681 * it should call bond_compose_learning_packet().
683 * This function will only return true if 'bond' is in SLB or active-backup
684 * mode and LACP is not negotiated. Otherwise sending learning packets isn't
687 * Calling this function resets the state that it checks. */
689 bond_should_send_learning_packets(struct bond *bond)
693 ovs_rwlock_wrlock(&rwlock);
694 send = bond->send_learning_packets && may_send_learning_packets(bond);
695 bond->send_learning_packets = false;
696 ovs_rwlock_unlock(&rwlock);
700 /* Sends a gratuitous learning packet on 'bond' from 'eth_src' on 'vlan'.
702 * See bond_should_send_learning_packets() for description of usage. The
703 * caller should send the composed packet on the port associated with
704 * port_aux and takes ownership of the returned ofpbuf. */
706 bond_compose_learning_packet(struct bond *bond,
707 const uint8_t eth_src[ETH_ADDR_LEN],
708 uint16_t vlan, void **port_aux)
710 struct bond_slave *slave;
711 struct ofpbuf *packet;
714 ovs_rwlock_rdlock(&rwlock);
715 ovs_assert(may_send_learning_packets(bond));
716 memset(&flow, 0, sizeof flow);
717 memcpy(flow.dl_src, eth_src, ETH_ADDR_LEN);
718 slave = choose_output_slave(bond, &flow, NULL, vlan);
720 packet = ofpbuf_new(0);
721 compose_rarp(packet, eth_src);
723 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), htons(vlan));
726 *port_aux = slave->aux;
727 ovs_rwlock_unlock(&rwlock);
731 /* Checks whether a packet that arrived on 'slave_' within 'bond', with an
732 * Ethernet destination address of 'eth_dst', should be admitted.
734 * The return value is one of the following:
736 * - BV_ACCEPT: Admit the packet.
738 * - BV_DROP: Drop the packet.
740 * - BV_DROP_IF_MOVED: Consult the MAC learning table for the packet's
741 * Ethernet source address and VLAN. If there is none, or if the packet
742 * is on the learned port, then admit the packet. If a different port has
743 * been learned, however, drop the packet (and do not use it for MAC
747 bond_check_admissibility(struct bond *bond, const void *slave_,
748 const uint8_t eth_dst[ETH_ADDR_LEN])
750 enum bond_verdict verdict = BV_DROP;
751 struct bond_slave *slave;
753 ovs_rwlock_rdlock(&rwlock);
754 slave = bond_slave_lookup(bond, slave_);
759 /* LACP bonds have very loose admissibility restrictions because we can
760 * assume the remote switch is aware of the bond and will "do the right
761 * thing". However, as a precaution we drop packets on disabled slaves
762 * because no correctly implemented partner switch should be sending
765 * If LACP is configured, but LACP negotiations have been unsuccessful, we
766 * drop all incoming traffic except if lacp_fallback_ab is enabled. */
767 switch (bond->lacp_status) {
768 case LACP_NEGOTIATED:
769 verdict = slave->enabled ? BV_ACCEPT : BV_DROP;
771 case LACP_CONFIGURED:
772 if (!bond->lacp_fallback_ab) {
779 /* Drop all multicast packets on inactive slaves. */
780 if (eth_addr_is_multicast(eth_dst)) {
781 if (bond->active_slave != slave) {
786 switch (bond->balance) {
788 /* TCP balanced bonds require successful LACP negotiations. Based on the
789 * above check, LACP is off or lacp_fallback_ab is true on this bond.
790 * If lacp_fallback_ab is true fall through to BM_AB case else, we
791 * drop all incoming traffic. */
792 if (!bond->lacp_fallback_ab) {
797 /* Drop all packets which arrive on backup slaves. This is similar to
798 * how Linux bonding handles active-backup bonds. */
799 if (bond->active_slave != slave) {
800 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
802 VLOG_DBG_RL(&rl, "active-backup bond received packet on backup"
803 " slave (%s) destined for " ETH_ADDR_FMT,
804 slave->name, ETH_ADDR_ARGS(eth_dst));
811 /* Drop all packets for which we have learned a different input port,
812 * because we probably sent the packet on one slave and got it back on
813 * the other. Gratuitous ARP packets are an exception to this rule:
814 * the host has moved to another switch. The exception to the
815 * exception is if we locked the learning table to avoid reflections on
817 verdict = BV_DROP_IF_MOVED;
823 ovs_rwlock_unlock(&rwlock);
828 /* Returns the slave (registered on 'bond' by bond_slave_register()) to which
829 * a packet with the given 'flow' and 'vlan' should be forwarded. Returns
830 * NULL if the packet should be dropped because no slaves are enabled.
832 * 'vlan' is not necessarily the same as 'flow->vlan_tci'. First, 'vlan'
833 * should be a VID only (i.e. excluding the PCP bits). Second,
834 * 'flow->vlan_tci' is the VLAN TCI that appeared on the packet (so it will be
835 * nonzero only for trunk ports), whereas 'vlan' is the logical VLAN that the
836 * packet belongs to (so for an access port it will be the access port's VLAN).
838 * If 'wc' is non-NULL, bitwise-OR's 'wc' with the set of bits that were
839 * significant in the selection. At some point earlier, 'wc' should
840 * have been initialized (e.g., by flow_wildcards_init_catchall()).
843 bond_choose_output_slave(struct bond *bond, const struct flow *flow,
844 struct flow_wildcards *wc, uint16_t vlan)
846 struct bond_slave *slave;
849 ovs_rwlock_rdlock(&rwlock);
850 slave = choose_output_slave(bond, flow, wc, vlan);
851 aux = slave ? slave->aux : NULL;
852 ovs_rwlock_unlock(&rwlock);
859 bond_entry_account(struct bond_entry *entry, uint64_t rule_tx_bytes)
860 OVS_REQ_WRLOCK(rwlock)
865 delta = rule_tx_bytes - entry->pr_tx_bytes;
866 entry->tx_bytes += delta;
867 entry->pr_tx_bytes = rule_tx_bytes;
871 /* Maintain bond stats using post recirculation rule byte counters.*/
873 bond_recirculation_account(struct bond *bond)
877 ovs_rwlock_wrlock(&rwlock);
878 for (i=0; i<=BOND_MASK; i++) {
879 struct bond_entry *entry = &bond->hash[i];
880 struct rule *rule = entry->pr_rule;
883 uint64_t n_packets OVS_UNUSED;
884 long long int used OVS_UNUSED;
887 rule->ofproto->ofproto_class->rule_get_stats(
888 rule, &n_packets, &n_bytes, &used);
889 bond_entry_account(entry, n_bytes);
892 ovs_rwlock_unlock(&rwlock);
896 bond_may_recirc(const struct bond *bond, uint32_t *recirc_id,
899 if (bond->balance == BM_TCP) {
901 *recirc_id = bond->recirc_id;
904 *hash_bias = bond->basis;
913 bond_update_post_recirc_rules(struct bond* bond, const bool force)
915 struct bond_entry *e;
916 bool update_rules = force; /* Always update rules if caller forces it. */
918 /* Make sure all bond entries are populated */
919 for (e = bond->hash; e <= &bond->hash[BOND_MASK]; e++) {
920 if (!e->slave || !e->slave->enabled) {
922 e->slave = CONTAINER_OF(hmap_random_node(&bond->slaves),
923 struct bond_slave, hmap_node);
924 if (!e->slave->enabled) {
925 e->slave = bond->active_slave;
931 update_recirc_rules(bond);
938 bond_is_balanced(const struct bond *bond) OVS_REQ_RDLOCK(rwlock)
940 return bond->rebalance_interval
941 && (bond->balance == BM_SLB || bond->balance == BM_TCP);
944 /* Notifies 'bond' that 'n_bytes' bytes were sent in 'flow' within 'vlan'. */
946 bond_account(struct bond *bond, const struct flow *flow, uint16_t vlan,
949 ovs_rwlock_wrlock(&rwlock);
950 if (bond_is_balanced(bond)) {
951 lookup_bond_entry(bond, flow, vlan)->tx_bytes += n_bytes;
953 ovs_rwlock_unlock(&rwlock);
956 static struct bond_slave *
957 bond_slave_from_bal_node(struct list *bal) OVS_REQ_RDLOCK(rwlock)
959 return CONTAINER_OF(bal, struct bond_slave, bal_node);
963 log_bals(struct bond *bond, const struct list *bals)
964 OVS_REQ_RDLOCK(rwlock)
966 if (VLOG_IS_DBG_ENABLED()) {
967 struct ds ds = DS_EMPTY_INITIALIZER;
968 const struct bond_slave *slave;
970 LIST_FOR_EACH (slave, bal_node, bals) {
972 ds_put_char(&ds, ',');
974 ds_put_format(&ds, " %s %"PRIu64"kB",
975 slave->name, slave->tx_bytes / 1024);
977 if (!slave->enabled) {
978 ds_put_cstr(&ds, " (disabled)");
980 if (!list_is_empty(&slave->entries)) {
981 struct bond_entry *e;
983 ds_put_cstr(&ds, " (");
984 LIST_FOR_EACH (e, list_node, &slave->entries) {
985 if (&e->list_node != list_front(&slave->entries)) {
986 ds_put_cstr(&ds, " + ");
988 ds_put_format(&ds, "h%"PRIdPTR": %"PRIu64"kB",
989 e - bond->hash, e->tx_bytes / 1024);
991 ds_put_cstr(&ds, ")");
994 VLOG_DBG("bond %s:%s", bond->name, ds_cstr(&ds));
999 /* Shifts 'hash' from its current slave to 'to'. */
1001 bond_shift_load(struct bond_entry *hash, struct bond_slave *to)
1002 OVS_REQ_WRLOCK(rwlock)
1004 struct bond_slave *from = hash->slave;
1005 struct bond *bond = from->bond;
1006 uint64_t delta = hash->tx_bytes;
1008 VLOG_INFO("bond %s: shift %"PRIu64"kB of load (with hash %"PRIdPTR") "
1009 "from %s to %s (now carrying %"PRIu64"kB and "
1010 "%"PRIu64"kB load, respectively)",
1011 bond->name, delta / 1024, hash - bond->hash,
1012 from->name, to->name,
1013 (from->tx_bytes - delta) / 1024,
1014 (to->tx_bytes + delta) / 1024);
1016 /* Shift load away from 'from' to 'to'. */
1017 from->tx_bytes -= delta;
1018 to->tx_bytes += delta;
1020 /* Arrange for flows to be revalidated. */
1022 bond->bond_revalidate = true;
1025 /* Picks and returns a bond_entry to migrate from 'from' (the most heavily
1026 * loaded bond slave) to a bond slave that has 'to_tx_bytes' bytes of load,
1027 * given that doing so must decrease the ratio of the load on the two slaves by
1028 * at least 0.1. Returns NULL if there is no appropriate entry.
1030 * The list of entries isn't sorted. I don't know of a reason to prefer to
1031 * shift away small hashes or large hashes. */
1032 static struct bond_entry *
1033 choose_entry_to_migrate(const struct bond_slave *from, uint64_t to_tx_bytes)
1034 OVS_REQ_WRLOCK(rwlock)
1036 struct bond_entry *e;
1038 if (list_is_short(&from->entries)) {
1039 /* 'from' carries no more than one MAC hash, so shifting load away from
1040 * it would be pointless. */
1044 LIST_FOR_EACH (e, list_node, &from->entries) {
1045 double old_ratio, new_ratio;
1048 if (to_tx_bytes == 0) {
1049 /* Nothing on the new slave, move it. */
1053 delta = e->tx_bytes;
1054 old_ratio = (double)from->tx_bytes / to_tx_bytes;
1055 new_ratio = (double)(from->tx_bytes - delta) / (to_tx_bytes + delta);
1056 if (old_ratio - new_ratio > 0.1
1057 && fabs(new_ratio - 1.0) < fabs(old_ratio - 1.0)) {
1058 /* We're aiming for an ideal ratio of 1, meaning both the 'from'
1059 and 'to' slave have the same load. Therefore, we only move an
1060 entry if it decreases the load on 'from', and brings us closer
1061 to equal traffic load. */
1069 /* Inserts 'slave' into 'bals' so that descending order of 'tx_bytes' is
1072 insert_bal(struct list *bals, struct bond_slave *slave)
1074 struct bond_slave *pos;
1076 LIST_FOR_EACH (pos, bal_node, bals) {
1077 if (slave->tx_bytes > pos->tx_bytes) {
1081 list_insert(&pos->bal_node, &slave->bal_node);
1084 /* Removes 'slave' from its current list and then inserts it into 'bals' so
1085 * that descending order of 'tx_bytes' is maintained. */
1087 reinsert_bal(struct list *bals, struct bond_slave *slave)
1089 list_remove(&slave->bal_node);
1090 insert_bal(bals, slave);
1093 /* If 'bond' needs rebalancing, does so.
1095 * The caller should have called bond_account() for each active flow, or in case
1096 * of recirculation is used, have called bond_recirculation_account(bond),
1097 * to ensure that flow data is consistently accounted at this point.
1099 * Return whether rebalancing took place.*/
1101 bond_rebalance(struct bond *bond)
1103 struct bond_slave *slave;
1104 struct bond_entry *e;
1106 bool rebalanced = false;
1108 ovs_rwlock_wrlock(&rwlock);
1109 if (!bond_is_balanced(bond) || time_msec() < bond->next_rebalance) {
1112 bond->next_rebalance = time_msec() + bond->rebalance_interval;
1114 /* Add each bond_entry to its slave's 'entries' list.
1115 * Compute each slave's tx_bytes as the sum of its entries' tx_bytes. */
1116 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1117 slave->tx_bytes = 0;
1118 list_init(&slave->entries);
1120 for (e = &bond->hash[0]; e <= &bond->hash[BOND_MASK]; e++) {
1121 if (e->slave && e->tx_bytes) {
1122 e->slave->tx_bytes += e->tx_bytes;
1123 list_push_back(&e->slave->entries, &e->list_node);
1127 /* Add enabled slaves to 'bals' in descending order of tx_bytes.
1129 * XXX This is O(n**2) in the number of slaves but it could be O(n lg n)
1130 * with a proper list sort algorithm. */
1132 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1133 if (slave->enabled) {
1134 insert_bal(&bals, slave);
1137 log_bals(bond, &bals);
1139 /* Shift load from the most-loaded slaves to the least-loaded slaves. */
1140 while (!list_is_short(&bals)) {
1141 struct bond_slave *from = bond_slave_from_bal_node(list_front(&bals));
1142 struct bond_slave *to = bond_slave_from_bal_node(list_back(&bals));
1145 overload = from->tx_bytes - to->tx_bytes;
1146 if (overload < to->tx_bytes >> 5 || overload < 100000) {
1147 /* The extra load on 'from' (and all less-loaded slaves), compared
1148 * to that of 'to' (the least-loaded slave), is less than ~3%, or
1149 * it is less than ~1Mbps. No point in rebalancing. */
1153 /* 'from' is carrying significantly more load than 'to'. Pick a hash
1154 * to move from 'from' to 'to'. */
1155 e = choose_entry_to_migrate(from, to->tx_bytes);
1157 bond_shift_load(e, to);
1159 /* Delete element from from->entries.
1161 * We don't add the element to to->hashes. That would only allow
1162 * 'e' to be migrated to another slave in this rebalancing run, and
1163 * there is no point in doing that. */
1164 list_remove(&e->list_node);
1166 /* Re-sort 'bals'. */
1167 reinsert_bal(&bals, from);
1168 reinsert_bal(&bals, to);
1171 /* Can't usefully migrate anything away from 'from'.
1172 * Don't reconsider it. */
1173 list_remove(&from->bal_node);
1177 /* Implement exponentially weighted moving average. A weight of 1/2 causes
1178 * historical data to decay to <1% in 7 rebalancing runs. 1,000,000 bytes
1179 * take 20 rebalancing runs to decay to 0 and get deleted entirely. */
1180 for (e = &bond->hash[0]; e <= &bond->hash[BOND_MASK]; e++) {
1185 ovs_rwlock_unlock(&rwlock);
1189 /* Bonding unixctl user interface functions. */
1191 static struct bond *
1192 bond_find(const char *name) OVS_REQ_RDLOCK(rwlock)
1196 HMAP_FOR_EACH_WITH_HASH (bond, hmap_node, hash_string(name, 0),
1198 if (!strcmp(bond->name, name)) {
1205 static struct bond_slave *
1206 bond_lookup_slave(struct bond *bond, const char *slave_name)
1208 struct bond_slave *slave;
1210 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1211 if (!strcmp(slave->name, slave_name)) {
1219 bond_unixctl_list(struct unixctl_conn *conn,
1220 int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
1221 void *aux OVS_UNUSED)
1223 struct ds ds = DS_EMPTY_INITIALIZER;
1224 const struct bond *bond;
1226 ds_put_cstr(&ds, "bond\ttype\trecircID\tslaves\n");
1228 ovs_rwlock_rdlock(&rwlock);
1229 HMAP_FOR_EACH (bond, hmap_node, all_bonds) {
1230 const struct bond_slave *slave;
1233 ds_put_format(&ds, "%s\t%s\t%d\t", bond->name,
1234 bond_mode_to_string(bond->balance), bond->recirc_id);
1237 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1239 ds_put_cstr(&ds, ", ");
1241 ds_put_cstr(&ds, slave->name);
1243 ds_put_char(&ds, '\n');
1245 ovs_rwlock_unlock(&rwlock);
1246 unixctl_command_reply(conn, ds_cstr(&ds));
1251 bond_print_details(struct ds *ds, const struct bond *bond)
1252 OVS_REQ_RDLOCK(rwlock)
1254 struct shash slave_shash = SHASH_INITIALIZER(&slave_shash);
1255 const struct shash_node **sorted_slaves = NULL;
1256 const struct bond_slave *slave;
1261 ds_put_format(ds, "---- %s ----\n", bond->name);
1262 ds_put_format(ds, "bond_mode: %s\n",
1263 bond_mode_to_string(bond->balance));
1265 may_recirc = bond_may_recirc(bond, &recirc_id, NULL);
1266 ds_put_format(ds, "bond may use recirculation: %s, Recirc-ID : %d\n",
1267 may_recirc ? "yes" : "no", may_recirc ? recirc_id: -1);
1269 ds_put_format(ds, "bond-hash-basis: %"PRIu32"\n", bond->basis);
1271 ds_put_format(ds, "updelay: %d ms\n", bond->updelay);
1272 ds_put_format(ds, "downdelay: %d ms\n", bond->downdelay);
1274 if (bond_is_balanced(bond)) {
1275 ds_put_format(ds, "next rebalance: %lld ms\n",
1276 bond->next_rebalance - time_msec());
1279 ds_put_cstr(ds, "lacp_status: ");
1280 switch (bond->lacp_status) {
1281 case LACP_NEGOTIATED:
1282 ds_put_cstr(ds, "negotiated\n");
1284 case LACP_CONFIGURED:
1285 ds_put_cstr(ds, "configured\n");
1288 ds_put_cstr(ds, "off\n");
1291 ds_put_cstr(ds, "<unknown>\n");
1295 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1296 shash_add(&slave_shash, slave->name, slave);
1298 sorted_slaves = shash_sort(&slave_shash);
1300 for (i = 0; i < shash_count(&slave_shash); i++) {
1301 struct bond_entry *be;
1303 slave = sorted_slaves[i]->data;
1306 ds_put_format(ds, "\nslave %s: %s\n",
1307 slave->name, slave->enabled ? "enabled" : "disabled");
1308 if (slave == bond->active_slave) {
1309 ds_put_cstr(ds, "\tactive slave\n");
1311 if (slave->delay_expires != LLONG_MAX) {
1312 ds_put_format(ds, "\t%s expires in %lld ms\n",
1313 slave->enabled ? "downdelay" : "updelay",
1314 slave->delay_expires - time_msec());
1317 ds_put_format(ds, "\tmay_enable: %s\n",
1318 slave->may_enable ? "true" : "false");
1320 if (!bond_is_balanced(bond)) {
1325 for (be = bond->hash; be <= &bond->hash[BOND_MASK]; be++) {
1326 int hash = be - bond->hash;
1329 if (be->slave != slave) {
1333 be_tx_k = be->tx_bytes / 1024;
1335 ds_put_format(ds, "\thash %d: %"PRIu64" kB load\n",
1339 /* XXX How can we list the MACs assigned to hashes of SLB bonds? */
1342 shash_destroy(&slave_shash);
1343 free(sorted_slaves);
1344 ds_put_cstr(ds, "\n");
1348 bond_unixctl_show(struct unixctl_conn *conn,
1349 int argc, const char *argv[],
1350 void *aux OVS_UNUSED)
1352 struct ds ds = DS_EMPTY_INITIALIZER;
1354 ovs_rwlock_rdlock(&rwlock);
1356 const struct bond *bond = bond_find(argv[1]);
1359 unixctl_command_reply_error(conn, "no such bond");
1362 bond_print_details(&ds, bond);
1364 const struct bond *bond;
1366 HMAP_FOR_EACH (bond, hmap_node, all_bonds) {
1367 bond_print_details(&ds, bond);
1371 unixctl_command_reply(conn, ds_cstr(&ds));
1375 ovs_rwlock_unlock(&rwlock);
1379 bond_unixctl_migrate(struct unixctl_conn *conn,
1380 int argc OVS_UNUSED, const char *argv[],
1381 void *aux OVS_UNUSED)
1383 const char *bond_s = argv[1];
1384 const char *hash_s = argv[2];
1385 const char *slave_s = argv[3];
1387 struct bond_slave *slave;
1388 struct bond_entry *entry;
1391 ovs_rwlock_wrlock(&rwlock);
1392 bond = bond_find(bond_s);
1394 unixctl_command_reply_error(conn, "no such bond");
1398 if (bond->balance != BM_SLB) {
1399 unixctl_command_reply_error(conn, "not an SLB bond");
1403 if (strspn(hash_s, "0123456789") == strlen(hash_s)) {
1404 hash = atoi(hash_s) & BOND_MASK;
1406 unixctl_command_reply_error(conn, "bad hash");
1410 slave = bond_lookup_slave(bond, slave_s);
1412 unixctl_command_reply_error(conn, "no such slave");
1416 if (!slave->enabled) {
1417 unixctl_command_reply_error(conn, "cannot migrate to disabled slave");
1421 entry = &bond->hash[hash];
1422 bond->bond_revalidate = true;
1423 entry->slave = slave;
1424 unixctl_command_reply(conn, "migrated");
1427 ovs_rwlock_unlock(&rwlock);
1431 bond_unixctl_set_active_slave(struct unixctl_conn *conn,
1432 int argc OVS_UNUSED, const char *argv[],
1433 void *aux OVS_UNUSED)
1435 const char *bond_s = argv[1];
1436 const char *slave_s = argv[2];
1438 struct bond_slave *slave;
1440 ovs_rwlock_wrlock(&rwlock);
1441 bond = bond_find(bond_s);
1443 unixctl_command_reply_error(conn, "no such bond");
1447 slave = bond_lookup_slave(bond, slave_s);
1449 unixctl_command_reply_error(conn, "no such slave");
1453 if (!slave->enabled) {
1454 unixctl_command_reply_error(conn, "cannot make disabled slave active");
1458 if (bond->active_slave != slave) {
1459 bond->bond_revalidate = true;
1460 bond->active_slave = slave;
1461 VLOG_INFO("bond %s: active interface is now %s",
1462 bond->name, slave->name);
1463 bond->send_learning_packets = true;
1464 unixctl_command_reply(conn, "done");
1466 unixctl_command_reply(conn, "no change");
1469 ovs_rwlock_unlock(&rwlock);
1473 enable_slave(struct unixctl_conn *conn, const char *argv[], bool enable)
1475 const char *bond_s = argv[1];
1476 const char *slave_s = argv[2];
1478 struct bond_slave *slave;
1480 ovs_rwlock_wrlock(&rwlock);
1481 bond = bond_find(bond_s);
1483 unixctl_command_reply_error(conn, "no such bond");
1487 slave = bond_lookup_slave(bond, slave_s);
1489 unixctl_command_reply_error(conn, "no such slave");
1493 bond_enable_slave(slave, enable);
1494 unixctl_command_reply(conn, enable ? "enabled" : "disabled");
1497 ovs_rwlock_unlock(&rwlock);
1501 bond_unixctl_enable_slave(struct unixctl_conn *conn,
1502 int argc OVS_UNUSED, const char *argv[],
1503 void *aux OVS_UNUSED)
1505 enable_slave(conn, argv, true);
1509 bond_unixctl_disable_slave(struct unixctl_conn *conn,
1510 int argc OVS_UNUSED, const char *argv[],
1511 void *aux OVS_UNUSED)
1513 enable_slave(conn, argv, false);
1517 bond_unixctl_hash(struct unixctl_conn *conn, int argc, const char *argv[],
1518 void *aux OVS_UNUSED)
1520 const char *mac_s = argv[1];
1521 const char *vlan_s = argc > 2 ? argv[2] : NULL;
1522 const char *basis_s = argc > 3 ? argv[3] : NULL;
1523 uint8_t mac[ETH_ADDR_LEN];
1530 if (!ovs_scan(vlan_s, "%u", &vlan)) {
1531 unixctl_command_reply_error(conn, "invalid vlan");
1539 if (!ovs_scan(basis_s, "%"SCNu32, &basis)) {
1540 unixctl_command_reply_error(conn, "invalid basis");
1547 if (ovs_scan(mac_s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))) {
1548 hash = bond_hash_src(mac, vlan, basis) & BOND_MASK;
1550 hash_cstr = xasprintf("%u", hash);
1551 unixctl_command_reply(conn, hash_cstr);
1554 unixctl_command_reply_error(conn, "invalid mac");
1561 unixctl_command_register("bond/list", "", 0, 0, bond_unixctl_list, NULL);
1562 unixctl_command_register("bond/show", "[port]", 0, 1, bond_unixctl_show,
1564 unixctl_command_register("bond/migrate", "port hash slave", 3, 3,
1565 bond_unixctl_migrate, NULL);
1566 unixctl_command_register("bond/set-active-slave", "port slave", 2, 2,
1567 bond_unixctl_set_active_slave, NULL);
1568 unixctl_command_register("bond/enable-slave", "port slave", 2, 2,
1569 bond_unixctl_enable_slave, NULL);
1570 unixctl_command_register("bond/disable-slave", "port slave", 2, 2,
1571 bond_unixctl_disable_slave, NULL);
1572 unixctl_command_register("bond/hash", "mac [vlan] [basis]", 1, 3,
1573 bond_unixctl_hash, NULL);
1577 bond_entry_reset(struct bond *bond)
1579 if (bond->balance != BM_AB) {
1580 size_t hash_len = BOND_BUCKETS * sizeof *bond->hash;
1583 bond->hash = xmalloc(hash_len);
1585 memset(bond->hash, 0, hash_len);
1587 bond->next_rebalance = time_msec() + bond->rebalance_interval;
1594 static struct bond_slave *
1595 bond_slave_lookup(struct bond *bond, const void *slave_)
1597 struct bond_slave *slave;
1599 HMAP_FOR_EACH_IN_BUCKET (slave, hmap_node, hash_pointer(slave_, 0),
1601 if (slave->aux == slave_) {
1610 bond_enable_slave(struct bond_slave *slave, bool enable)
1612 slave->delay_expires = LLONG_MAX;
1613 if (enable != slave->enabled) {
1614 slave->bond->bond_revalidate = true;
1615 slave->enabled = enable;
1617 ovs_mutex_lock(&slave->bond->mutex);
1619 list_insert(&slave->bond->enabled_slaves, &slave->list_node);
1621 list_remove(&slave->list_node);
1623 ovs_mutex_unlock(&slave->bond->mutex);
1625 VLOG_INFO("interface %s: %s", slave->name,
1626 slave->enabled ? "enabled" : "disabled");
1631 bond_link_status_update(struct bond_slave *slave)
1633 struct bond *bond = slave->bond;
1636 up = netdev_get_carrier(slave->netdev) && slave->may_enable;
1637 if ((up == slave->enabled) != (slave->delay_expires == LLONG_MAX)) {
1638 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1639 VLOG_INFO_RL(&rl, "interface %s: link state %s",
1640 slave->name, up ? "up" : "down");
1641 if (up == slave->enabled) {
1642 slave->delay_expires = LLONG_MAX;
1643 VLOG_INFO_RL(&rl, "interface %s: will not be %s",
1644 slave->name, up ? "disabled" : "enabled");
1646 int delay = (bond->lacp_status != LACP_DISABLED ? 0
1647 : up ? bond->updelay : bond->downdelay);
1648 slave->delay_expires = time_msec() + delay;
1650 VLOG_INFO_RL(&rl, "interface %s: will be %s if it stays %s "
1653 up ? "enabled" : "disabled",
1660 if (time_msec() >= slave->delay_expires) {
1661 bond_enable_slave(slave, up);
1666 bond_hash_src(const uint8_t mac[ETH_ADDR_LEN], uint16_t vlan, uint32_t basis)
1668 return hash_mac(mac, vlan, basis);
1672 bond_hash_tcp(const struct flow *flow, uint16_t vlan, uint32_t basis)
1674 struct flow hash_flow = *flow;
1675 hash_flow.vlan_tci = htons(vlan);
1677 /* The symmetric quality of this hash function is not required, but
1678 * flow_hash_symmetric_l4 already exists, and is sufficient for our
1679 * purposes, so we use it out of convenience. */
1680 return flow_hash_symmetric_l4(&hash_flow, basis);
1684 bond_hash(const struct bond *bond, const struct flow *flow, uint16_t vlan)
1686 ovs_assert(bond->balance == BM_TCP || bond->balance == BM_SLB);
1688 return (bond->balance == BM_TCP
1689 ? bond_hash_tcp(flow, vlan, bond->basis)
1690 : bond_hash_src(flow->dl_src, vlan, bond->basis));
1693 static struct bond_entry *
1694 lookup_bond_entry(const struct bond *bond, const struct flow *flow,
1697 return &bond->hash[bond_hash(bond, flow, vlan) & BOND_MASK];
1700 /* Selects and returns an enabled slave from the 'enabled_slaves' list
1701 * in a round-robin fashion. If the 'enabled_slaves' list is empty,
1703 static struct bond_slave *
1704 get_enabled_slave(struct bond *bond)
1708 ovs_mutex_lock(&bond->mutex);
1709 if (list_is_empty(&bond->enabled_slaves)) {
1710 ovs_mutex_unlock(&bond->mutex);
1714 node = list_pop_front(&bond->enabled_slaves);
1715 list_push_back(&bond->enabled_slaves, node);
1716 ovs_mutex_unlock(&bond->mutex);
1718 return CONTAINER_OF(node, struct bond_slave, list_node);
1721 static struct bond_slave *
1722 choose_output_slave(const struct bond *bond, const struct flow *flow,
1723 struct flow_wildcards *wc, uint16_t vlan)
1725 struct bond_entry *e;
1728 balance = bond->balance;
1729 if (bond->lacp_status == LACP_CONFIGURED) {
1730 /* LACP has been configured on this bond but negotiations were
1731 * unsuccussful. If lacp_fallback_ab is enabled use active-
1732 * backup mode else drop all traffic. */
1733 if (!bond->lacp_fallback_ab) {
1741 return bond->active_slave;
1744 if (bond->lacp_status != LACP_NEGOTIATED) {
1745 /* Must have LACP negotiations for TCP balanced bonds. */
1749 flow_mask_hash_fields(flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
1754 flow_mask_hash_fields(flow, wc, NX_HASH_FIELDS_ETH_SRC);
1756 e = lookup_bond_entry(bond, flow, vlan);
1757 if (!e->slave || !e->slave->enabled) {
1758 e->slave = get_enabled_slave(CONST_CAST(struct bond*, bond));
1767 static struct bond_slave *
1768 bond_choose_slave(const struct bond *bond)
1770 struct bond_slave *slave, *best;
1772 /* Find an enabled slave. */
1773 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1774 if (slave->enabled) {
1779 /* All interfaces are disabled. Find an interface that will be enabled
1780 * after its updelay expires. */
1782 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1783 if (slave->delay_expires != LLONG_MAX
1784 && slave->may_enable
1785 && (!best || slave->delay_expires < best->delay_expires)) {
1793 bond_choose_active_slave(struct bond *bond)
1795 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1796 struct bond_slave *old_active_slave = bond->active_slave;
1798 bond->active_slave = bond_choose_slave(bond);
1799 if (bond->active_slave) {
1800 if (bond->active_slave->enabled) {
1801 VLOG_INFO_RL(&rl, "bond %s: active interface is now %s",
1802 bond->name, bond->active_slave->name);
1804 VLOG_INFO_RL(&rl, "bond %s: active interface is now %s, skipping "
1805 "remaining %lld ms updelay (since no interface was "
1806 "enabled)", bond->name, bond->active_slave->name,
1807 bond->active_slave->delay_expires - time_msec());
1808 bond_enable_slave(bond->active_slave, true);
1811 bond->send_learning_packets = true;
1812 } else if (old_active_slave) {
1813 VLOG_INFO_RL(&rl, "bond %s: all interfaces disabled", bond->name);
1817 /* Attempts to make the sum of the bond slaves' statistics appear on the fake
1818 * bond interface. */
1820 bond_update_fake_slave_stats(struct bond *bond)
1822 struct netdev_stats bond_stats;
1823 struct bond_slave *slave;
1824 struct netdev *bond_dev;
1826 memset(&bond_stats, 0, sizeof bond_stats);
1828 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1829 struct netdev_stats slave_stats;
1831 if (!netdev_get_stats(slave->netdev, &slave_stats)) {
1832 /* XXX: We swap the stats here because they are swapped back when
1833 * reported by the internal device. The reason for this is
1834 * internal devices normally represent packets going into the
1835 * system but when used as fake bond device they represent packets
1836 * leaving the system. We really should do this in the internal
1837 * device itself because changing it here reverses the counts from
1838 * the perspective of the switch. However, the internal device
1839 * doesn't know what type of device it represents so we have to do
1840 * it here for now. */
1841 bond_stats.tx_packets += slave_stats.rx_packets;
1842 bond_stats.tx_bytes += slave_stats.rx_bytes;
1843 bond_stats.rx_packets += slave_stats.tx_packets;
1844 bond_stats.rx_bytes += slave_stats.tx_bytes;
1848 if (!netdev_open(bond->name, "system", &bond_dev)) {
1849 netdev_set_stats(bond_dev, &bond_stats);
1850 netdev_close(bond_dev);