2 * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
27 #include "ofp-actions.h"
29 #include "ofproto/ofproto-provider.h"
30 #include "ofproto/ofproto-dpif.h"
31 #include "connectivity.h"
33 #include "dynamic-string.h"
42 #include "poll-loop.h"
50 VLOG_DEFINE_THIS_MODULE(bond);
52 static struct ovs_rwlock rwlock = OVS_RWLOCK_INITIALIZER;
53 static struct hmap all_bonds__ = HMAP_INITIALIZER(&all_bonds__);
54 static struct hmap *const all_bonds OVS_GUARDED_BY(rwlock) = &all_bonds__;
56 /* Bit-mask for hashing a flow down to a bucket. */
57 #define BOND_MASK 0xff
58 #define BOND_BUCKETS (BOND_MASK + 1)
59 #define RECIRC_RULE_PRIORITY 20 /* Priority level for internal rules */
61 /* A hash bucket for mapping a flow to a slave.
62 * "struct bond" has an array of BOND_BUCKETS of these. */
64 struct bond_slave *slave; /* Assigned slave, NULL if unassigned. */
65 uint64_t tx_bytes /* Count of bytes recently transmitted. */
66 OVS_GUARDED_BY(rwlock);
67 struct list list_node; /* In bond_slave's 'entries' list. */
71 * 'pr_rule' is the post-recirculation rule for this entry.
72 * 'pr_tx_bytes' is the most recently seen statistics for 'pr_rule', which
73 * is used to determine delta (applied to 'tx_bytes' above.) */
75 uint64_t pr_tx_bytes OVS_GUARDED_BY(rwlock);
78 /* A bond slave, that is, one of the links comprising a bond. */
80 struct hmap_node hmap_node; /* In struct bond's slaves hmap. */
81 struct list list_node; /* In struct bond's enabled_slaves list. */
82 struct bond *bond; /* The bond that contains this slave. */
83 void *aux; /* Client-provided handle for this slave. */
85 struct netdev *netdev; /* Network device, owned by the client. */
86 unsigned int change_seq; /* Tracks changes in 'netdev'. */
87 ofp_port_t ofp_port; /* Open flow port number */
88 char *name; /* Name (a copy of netdev_get_name(netdev)). */
91 long long delay_expires; /* Time after which 'enabled' may change. */
92 bool enabled; /* May be chosen for flows? */
93 bool may_enable; /* Client considers this slave bondable. */
95 /* Rebalancing info. Used only by bond_rebalance(). */
96 struct list bal_node; /* In bond_rebalance()'s 'bals' list. */
97 struct list entries; /* 'struct bond_entry's assigned here. */
98 uint64_t tx_bytes; /* Sum across 'tx_bytes' of entries. */
101 /* A bond, that is, a set of network devices grouped to improve performance or
104 struct hmap_node hmap_node; /* In 'all_bonds' hmap. */
105 char *name; /* Name provided by client. */
106 struct ofproto_dpif *ofproto; /* The bridge this bond belongs to. */
113 * Any reader or writer of 'enabled_slaves' must hold 'mutex'.
114 * (To prevent the bond_slave from disappearing they must also hold
116 struct ovs_mutex mutex OVS_ACQ_AFTER(rwlock);
117 struct list enabled_slaves OVS_GUARDED; /* Contains struct bond_slaves. */
120 enum bond_mode balance; /* Balancing mode, one of BM_*. */
121 struct bond_slave *active_slave;
122 int updelay, downdelay; /* Delay before slave goes up/down, in ms. */
123 enum lacp_status lacp_status; /* Status of LACP negotiations. */
124 bool bond_revalidate; /* True if flows need revalidation. */
125 uint32_t basis; /* Basis for flow hash function. */
127 /* SLB specific bonding info. */
128 struct bond_entry *hash; /* An array of BOND_BUCKETS elements. */
129 int rebalance_interval; /* Interval between rebalances, in ms. */
130 long long int next_rebalance; /* Next rebalancing time. */
131 bool send_learning_packets;
132 uint32_t recirc_id; /* Non zero if recirculation can be used.*/
133 struct hmap pr_rule_ops; /* Helps to maintain post recirculation rules.*/
135 /* Legacy compatibility. */
136 long long int next_fake_iface_update; /* LLONG_MAX if disabled. */
137 bool lacp_fallback_ab; /* Fallback to active-backup on LACP failure. */
139 struct ovs_refcount ref_cnt;
142 /* What to do with an bond_recirc_rule. */
144 ADD, /* Add the rule to ofproto's flow table. */
145 DEL, /* Delete the rule from the ofproto's flow table. */
148 /* A rule to add to or delete from ofproto's internal flow table. */
149 struct bond_pr_rule_op {
150 struct hmap_node hmap_node;
152 ofp_port_t out_ofport;
154 struct rule *pr_rule;
157 static void bond_entry_reset(struct bond *) OVS_REQ_WRLOCK(rwlock);
158 static struct bond_slave *bond_slave_lookup(struct bond *, const void *slave_)
159 OVS_REQ_RDLOCK(rwlock);
160 static void bond_enable_slave(struct bond_slave *, bool enable)
161 OVS_REQ_WRLOCK(rwlock);
162 static void bond_link_status_update(struct bond_slave *)
163 OVS_REQ_WRLOCK(rwlock);
164 static void bond_choose_active_slave(struct bond *)
165 OVS_REQ_WRLOCK(rwlock);
166 static unsigned int bond_hash_src(const uint8_t mac[ETH_ADDR_LEN],
167 uint16_t vlan, uint32_t basis);
168 static unsigned int bond_hash_tcp(const struct flow *, uint16_t vlan,
170 static struct bond_entry *lookup_bond_entry(const struct bond *,
173 OVS_REQ_RDLOCK(rwlock);
174 static struct bond_slave *get_enabled_slave(struct bond *)
175 OVS_REQ_RDLOCK(rwlock);
176 static struct bond_slave *choose_output_slave(const struct bond *,
178 struct flow_wildcards *,
180 OVS_REQ_RDLOCK(rwlock);
181 static void bond_update_fake_slave_stats(struct bond *)
182 OVS_REQ_RDLOCK(rwlock);
184 /* Attempts to parse 's' as the name of a bond balancing mode. If successful,
185 * stores the mode in '*balance' and returns true. Otherwise returns false
186 * without modifying '*balance'. */
188 bond_mode_from_string(enum bond_mode *balance, const char *s)
190 if (!strcmp(s, bond_mode_to_string(BM_TCP))) {
192 } else if (!strcmp(s, bond_mode_to_string(BM_SLB))) {
194 } else if (!strcmp(s, bond_mode_to_string(BM_AB))) {
202 /* Returns a string representing 'balance'. */
204 bond_mode_to_string(enum bond_mode balance) {
207 return "balance-tcp";
209 return "balance-slb";
211 return "active-backup";
217 /* Creates and returns a new bond whose configuration is initially taken from
220 * The caller should register each slave on the new bond by calling
221 * bond_slave_register(). */
223 bond_create(const struct bond_settings *s, struct ofproto_dpif *ofproto)
227 bond = xzalloc(sizeof *bond);
228 bond->ofproto = ofproto;
229 hmap_init(&bond->slaves);
230 list_init(&bond->enabled_slaves);
231 ovs_mutex_init(&bond->mutex);
232 bond->next_fake_iface_update = LLONG_MAX;
233 ovs_refcount_init(&bond->ref_cnt);
236 hmap_init(&bond->pr_rule_ops);
238 bond_reconfigure(bond, s);
243 bond_ref(const struct bond *bond_)
245 struct bond *bond = CONST_CAST(struct bond *, bond_);
248 ovs_refcount_ref(&bond->ref_cnt);
255 bond_unref(struct bond *bond)
257 struct bond_slave *slave, *next_slave;
258 struct bond_pr_rule_op *pr_op, *next_op;
260 if (!bond || ovs_refcount_unref(&bond->ref_cnt) != 1) {
264 ovs_rwlock_wrlock(&rwlock);
265 hmap_remove(all_bonds, &bond->hmap_node);
266 ovs_rwlock_unlock(&rwlock);
268 HMAP_FOR_EACH_SAFE (slave, next_slave, hmap_node, &bond->slaves) {
269 hmap_remove(&bond->slaves, &slave->hmap_node);
270 /* Client owns 'slave->netdev'. */
274 hmap_destroy(&bond->slaves);
276 ovs_mutex_destroy(&bond->mutex);
280 HMAP_FOR_EACH_SAFE(pr_op, next_op, hmap_node, &bond->pr_rule_ops) {
281 hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node);
284 hmap_destroy(&bond->pr_rule_ops);
286 if (bond->recirc_id) {
287 ofproto_dpif_free_recirc_id(bond->ofproto, bond->recirc_id);
294 add_pr_rule(struct bond *bond, const struct match *match,
295 ofp_port_t out_ofport, struct rule *rule)
297 uint32_t hash = match_hash(match, 0);
298 struct bond_pr_rule_op *pr_op;
300 HMAP_FOR_EACH_WITH_HASH(pr_op, hmap_node, hash, &bond->pr_rule_ops) {
301 if (match_equal(&pr_op->match, match)) {
303 pr_op->out_ofport = out_ofport;
304 pr_op->pr_rule = rule;
309 pr_op = xmalloc(sizeof *pr_op);
310 pr_op->match = *match;
312 pr_op->out_ofport = out_ofport;
313 pr_op->pr_rule = rule;
314 hmap_insert(&bond->pr_rule_ops, &pr_op->hmap_node, hash);
318 update_recirc_rules(struct bond *bond)
321 struct bond_pr_rule_op *pr_op, *next_op;
322 uint64_t ofpacts_stub[128 / 8];
323 struct ofpbuf ofpacts;
326 ofpbuf_use_stub(&ofpacts, ofpacts_stub, sizeof ofpacts_stub);
328 HMAP_FOR_EACH(pr_op, hmap_node, &bond->pr_rule_ops) {
332 if ((bond->hash == NULL) || (!bond->recirc_id)) {
336 for (i = 0; i < BOND_BUCKETS; i++) {
337 struct bond_slave *slave = bond->hash[i].slave;
340 match_init_catchall(&match);
341 match_set_recirc_id(&match, bond->recirc_id);
342 /* recirc_id -> metadata to speed up look ups. */
343 match_set_metadata(&match, htonll(bond->recirc_id));
344 match_set_dp_hash_masked(&match, i, BOND_MASK);
346 add_pr_rule(bond, &match, slave->ofp_port,
347 bond->hash[i].pr_rule);
351 HMAP_FOR_EACH_SAFE(pr_op, next_op, hmap_node, &bond->pr_rule_ops) {
356 ofpbuf_clear(&ofpacts);
357 ofpact_put_OUTPUT(&ofpacts)->port = pr_op->out_ofport;
358 error = ofproto_dpif_add_internal_flow(bond->ofproto,
360 RECIRC_RULE_PRIORITY,
363 char *err_s = match_to_string(&pr_op->match,
364 RECIRC_RULE_PRIORITY);
366 VLOG_ERR("failed to add post recirculation flow %s", err_s);
368 pr_op->pr_rule = NULL;
370 pr_op->pr_rule = rule;
375 error = ofproto_dpif_delete_internal_flow(bond->ofproto,
377 RECIRC_RULE_PRIORITY);
379 char *err_s = match_to_string(&pr_op->match,
380 RECIRC_RULE_PRIORITY);
382 VLOG_ERR("failed to remove post recirculation flow %s", err_s);
386 hmap_remove(&bond->pr_rule_ops, &pr_op->hmap_node);
387 pr_op->pr_rule = NULL;
393 ofpbuf_uninit(&ofpacts);
397 /* Updates 'bond''s overall configuration to 's'.
399 * The caller should register each slave on 'bond' by calling
400 * bond_slave_register(). This is optional if none of the slaves'
401 * configuration has changed. In any case it can't hurt.
403 * Returns true if the configuration has changed in such a way that requires
407 bond_reconfigure(struct bond *bond, const struct bond_settings *s)
409 bool revalidate = false;
411 ovs_rwlock_wrlock(&rwlock);
412 if (!bond->name || strcmp(bond->name, s->name)) {
414 hmap_remove(all_bonds, &bond->hmap_node);
417 bond->name = xstrdup(s->name);
418 hmap_insert(all_bonds, &bond->hmap_node, hash_string(bond->name, 0));
421 bond->updelay = s->up_delay;
422 bond->downdelay = s->down_delay;
424 if (bond->lacp_fallback_ab != s->lacp_fallback_ab_cfg) {
425 bond->lacp_fallback_ab = s->lacp_fallback_ab_cfg;
429 if (bond->rebalance_interval != s->rebalance_interval) {
430 bond->rebalance_interval = s->rebalance_interval;
434 if (bond->balance != s->balance) {
435 bond->balance = s->balance;
439 if (bond->basis != s->basis) {
440 bond->basis = s->basis;
445 if (bond->next_fake_iface_update == LLONG_MAX) {
446 bond->next_fake_iface_update = time_msec();
449 bond->next_fake_iface_update = LLONG_MAX;
452 if (bond->bond_revalidate) {
454 bond->bond_revalidate = false;
457 if (bond->balance != BM_AB) {
458 if (!bond->recirc_id) {
459 bond->recirc_id = ofproto_dpif_alloc_recirc_id(bond->ofproto);
461 } else if (bond->recirc_id) {
462 ofproto_dpif_free_recirc_id(bond->ofproto, bond->recirc_id);
466 if (bond->balance == BM_AB || !bond->hash || revalidate) {
467 bond_entry_reset(bond);
470 ovs_rwlock_unlock(&rwlock);
475 bond_slave_set_netdev__(struct bond_slave *slave, struct netdev *netdev)
476 OVS_REQ_WRLOCK(rwlock)
478 if (slave->netdev != netdev) {
479 slave->netdev = netdev;
480 slave->change_seq = 0;
484 /* Registers 'slave_' as a slave of 'bond'. The 'slave_' pointer is an
485 * arbitrary client-provided pointer that uniquely identifies a slave within a
486 * bond. If 'slave_' already exists within 'bond' then this function
487 * reconfigures the existing slave.
489 * 'netdev' must be the network device that 'slave_' represents. It is owned
490 * by the client, so the client must not close it before either unregistering
491 * 'slave_' or destroying 'bond'.
494 bond_slave_register(struct bond *bond, void *slave_,
495 ofp_port_t ofport, struct netdev *netdev)
497 struct bond_slave *slave;
499 ovs_rwlock_wrlock(&rwlock);
500 slave = bond_slave_lookup(bond, slave_);
502 slave = xzalloc(sizeof *slave);
504 hmap_insert(&bond->slaves, &slave->hmap_node, hash_pointer(slave_, 0));
507 slave->ofp_port = ofport;
508 slave->delay_expires = LLONG_MAX;
509 slave->name = xstrdup(netdev_get_name(netdev));
510 bond->bond_revalidate = true;
512 slave->enabled = false;
513 bond_enable_slave(slave, netdev_get_carrier(netdev));
516 bond_slave_set_netdev__(slave, netdev);
519 slave->name = xstrdup(netdev_get_name(netdev));
520 ovs_rwlock_unlock(&rwlock);
523 /* Updates the network device to be used with 'slave_' to 'netdev'.
525 * This is useful if the caller closes and re-opens the network device
526 * registered with bond_slave_register() but doesn't need to change anything
529 bond_slave_set_netdev(struct bond *bond, void *slave_, struct netdev *netdev)
531 struct bond_slave *slave;
533 ovs_rwlock_wrlock(&rwlock);
534 slave = bond_slave_lookup(bond, slave_);
536 bond_slave_set_netdev__(slave, netdev);
538 ovs_rwlock_unlock(&rwlock);
541 /* Unregisters 'slave_' from 'bond'. If 'bond' does not contain such a slave
542 * then this function has no effect.
544 * Unregistering a slave invalidates all flows. */
546 bond_slave_unregister(struct bond *bond, const void *slave_)
548 struct bond_slave *slave;
551 ovs_rwlock_wrlock(&rwlock);
552 slave = bond_slave_lookup(bond, slave_);
557 bond->bond_revalidate = true;
558 bond_enable_slave(slave, false);
560 del_active = bond->active_slave == slave;
562 struct bond_entry *e;
563 for (e = bond->hash; e <= &bond->hash[BOND_MASK]; e++) {
564 if (e->slave == slave) {
572 hmap_remove(&bond->slaves, &slave->hmap_node);
573 /* Client owns 'slave->netdev'. */
577 bond_choose_active_slave(bond);
578 bond->send_learning_packets = true;
581 ovs_rwlock_unlock(&rwlock);
584 /* Should be called on each slave in 'bond' before bond_run() to indicate
585 * whether or not 'slave_' may be enabled. This function is intended to allow
586 * other protocols to have some impact on bonding decisions. For example LACP
587 * or high level link monitoring protocols may decide that a given slave should
588 * not be able to send traffic. */
590 bond_slave_set_may_enable(struct bond *bond, void *slave_, bool may_enable)
592 ovs_rwlock_wrlock(&rwlock);
593 bond_slave_lookup(bond, slave_)->may_enable = may_enable;
594 ovs_rwlock_unlock(&rwlock);
597 /* Performs periodic maintenance on 'bond'.
599 * Returns true if the caller should revalidate its flows.
601 * The caller should check bond_should_send_learning_packets() afterward. */
603 bond_run(struct bond *bond, enum lacp_status lacp_status)
605 struct bond_slave *slave;
608 ovs_rwlock_wrlock(&rwlock);
609 if (bond->lacp_status != lacp_status) {
610 bond->lacp_status = lacp_status;
611 bond->bond_revalidate = true;
614 /* Enable slaves based on link status and LACP feedback. */
615 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
616 bond_link_status_update(slave);
617 slave->change_seq = seq_read(connectivity_seq_get());
619 if (!bond->active_slave || !bond->active_slave->enabled) {
620 bond_choose_active_slave(bond);
623 /* Update fake bond interface stats. */
624 if (time_msec() >= bond->next_fake_iface_update) {
625 bond_update_fake_slave_stats(bond);
626 bond->next_fake_iface_update = time_msec() + 1000;
629 revalidate = bond->bond_revalidate;
630 bond->bond_revalidate = false;
631 ovs_rwlock_unlock(&rwlock);
636 /* Causes poll_block() to wake up when 'bond' needs something to be done. */
638 bond_wait(struct bond *bond)
640 struct bond_slave *slave;
642 ovs_rwlock_rdlock(&rwlock);
643 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
644 if (slave->delay_expires != LLONG_MAX) {
645 poll_timer_wait_until(slave->delay_expires);
648 seq_wait(connectivity_seq_get(), slave->change_seq);
651 if (bond->next_fake_iface_update != LLONG_MAX) {
652 poll_timer_wait_until(bond->next_fake_iface_update);
655 if (bond->bond_revalidate) {
656 poll_immediate_wake();
658 ovs_rwlock_unlock(&rwlock);
660 /* We don't wait for bond->next_rebalance because rebalancing can only run
661 * at a flow account checkpoint. ofproto does checkpointing on its own
662 * schedule and bond_rebalance() gets called afterward, so we'd just be
663 * waking up for no purpose. */
666 /* MAC learning table interaction. */
669 may_send_learning_packets(const struct bond *bond)
671 return ((bond->lacp_status == LACP_DISABLED
672 && (bond->balance == BM_SLB || bond->balance == BM_AB))
673 || (bond->lacp_fallback_ab && bond->lacp_status == LACP_CONFIGURED))
674 && bond->active_slave;
677 /* Returns true if 'bond' needs the client to send out packets to assist with
678 * MAC learning on 'bond'. If this function returns true, then the client
679 * should iterate through its MAC learning table for the bridge on which 'bond'
680 * is located. For each MAC that has been learned on a port other than 'bond',
681 * it should call bond_compose_learning_packet().
683 * This function will only return true if 'bond' is in SLB or active-backup
684 * mode and LACP is not negotiated. Otherwise sending learning packets isn't
687 * Calling this function resets the state that it checks. */
689 bond_should_send_learning_packets(struct bond *bond)
693 ovs_rwlock_wrlock(&rwlock);
694 send = bond->send_learning_packets && may_send_learning_packets(bond);
695 bond->send_learning_packets = false;
696 ovs_rwlock_unlock(&rwlock);
700 /* Sends a gratuitous learning packet on 'bond' from 'eth_src' on 'vlan'.
702 * See bond_should_send_learning_packets() for description of usage. The
703 * caller should send the composed packet on the port associated with
704 * port_aux and takes ownership of the returned ofpbuf. */
706 bond_compose_learning_packet(struct bond *bond,
707 const uint8_t eth_src[ETH_ADDR_LEN],
708 uint16_t vlan, void **port_aux)
710 struct bond_slave *slave;
711 struct ofpbuf *packet;
714 ovs_rwlock_rdlock(&rwlock);
715 ovs_assert(may_send_learning_packets(bond));
716 memset(&flow, 0, sizeof flow);
717 memcpy(flow.dl_src, eth_src, ETH_ADDR_LEN);
718 slave = choose_output_slave(bond, &flow, NULL, vlan);
720 packet = ofpbuf_new(0);
721 compose_rarp(packet, eth_src);
723 eth_push_vlan(packet, htons(ETH_TYPE_VLAN), htons(vlan));
726 *port_aux = slave->aux;
727 ovs_rwlock_unlock(&rwlock);
731 /* Checks whether a packet that arrived on 'slave_' within 'bond', with an
732 * Ethernet destination address of 'eth_dst', should be admitted.
734 * The return value is one of the following:
736 * - BV_ACCEPT: Admit the packet.
738 * - BV_DROP: Drop the packet.
740 * - BV_DROP_IF_MOVED: Consult the MAC learning table for the packet's
741 * Ethernet source address and VLAN. If there is none, or if the packet
742 * is on the learned port, then admit the packet. If a different port has
743 * been learned, however, drop the packet (and do not use it for MAC
747 bond_check_admissibility(struct bond *bond, const void *slave_,
748 const uint8_t eth_dst[ETH_ADDR_LEN])
750 enum bond_verdict verdict = BV_DROP;
751 struct bond_slave *slave;
753 ovs_rwlock_rdlock(&rwlock);
754 slave = bond_slave_lookup(bond, slave_);
759 /* LACP bonds have very loose admissibility restrictions because we can
760 * assume the remote switch is aware of the bond and will "do the right
761 * thing". However, as a precaution we drop packets on disabled slaves
762 * because no correctly implemented partner switch should be sending
765 * If LACP is configured, but LACP negotiations have been unsuccessful, we
766 * drop all incoming traffic except if lacp_fallback_ab is enabled. */
767 switch (bond->lacp_status) {
768 case LACP_NEGOTIATED:
769 verdict = slave->enabled ? BV_ACCEPT : BV_DROP;
771 case LACP_CONFIGURED:
772 if (!bond->lacp_fallback_ab) {
779 /* Drop all multicast packets on inactive slaves. */
780 if (eth_addr_is_multicast(eth_dst)) {
781 if (bond->active_slave != slave) {
786 switch (bond->balance) {
788 /* TCP balanced bonds require successful LACP negotiations. Based on the
789 * above check, LACP is off or lacp_fallback_ab is true on this bond.
790 * If lacp_fallback_ab is true fall through to BM_AB case else, we
791 * drop all incoming traffic. */
792 if (!bond->lacp_fallback_ab) {
797 /* Drop all packets which arrive on backup slaves. This is similar to
798 * how Linux bonding handles active-backup bonds. */
799 if (bond->active_slave != slave) {
800 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
802 VLOG_DBG_RL(&rl, "active-backup bond received packet on backup"
803 " slave (%s) destined for " ETH_ADDR_FMT,
804 slave->name, ETH_ADDR_ARGS(eth_dst));
811 /* Drop all packets for which we have learned a different input port,
812 * because we probably sent the packet on one slave and got it back on
813 * the other. Gratuitous ARP packets are an exception to this rule:
814 * the host has moved to another switch. The exception to the
815 * exception is if we locked the learning table to avoid reflections on
817 verdict = BV_DROP_IF_MOVED;
823 ovs_rwlock_unlock(&rwlock);
828 /* Returns the slave (registered on 'bond' by bond_slave_register()) to which
829 * a packet with the given 'flow' and 'vlan' should be forwarded. Returns
830 * NULL if the packet should be dropped because no slaves are enabled.
832 * 'vlan' is not necessarily the same as 'flow->vlan_tci'. First, 'vlan'
833 * should be a VID only (i.e. excluding the PCP bits). Second,
834 * 'flow->vlan_tci' is the VLAN TCI that appeared on the packet (so it will be
835 * nonzero only for trunk ports), whereas 'vlan' is the logical VLAN that the
836 * packet belongs to (so for an access port it will be the access port's VLAN).
838 * If 'wc' is non-NULL, bitwise-OR's 'wc' with the set of bits that were
839 * significant in the selection. At some point earlier, 'wc' should
840 * have been initialized (e.g., by flow_wildcards_init_catchall()).
843 bond_choose_output_slave(struct bond *bond, const struct flow *flow,
844 struct flow_wildcards *wc, uint16_t vlan)
846 struct bond_slave *slave;
849 ovs_rwlock_rdlock(&rwlock);
850 slave = choose_output_slave(bond, flow, wc, vlan);
851 aux = slave ? slave->aux : NULL;
852 ovs_rwlock_unlock(&rwlock);
859 bond_entry_account(struct bond_entry *entry, uint64_t rule_tx_bytes)
860 OVS_REQ_WRLOCK(rwlock)
865 delta = rule_tx_bytes - entry->pr_tx_bytes;
866 entry->tx_bytes += delta;
867 entry->pr_tx_bytes = rule_tx_bytes;
871 /* Maintain bond stats using post recirculation rule byte counters.*/
873 bond_recirculation_account(struct bond *bond)
877 ovs_rwlock_wrlock(&rwlock);
878 for (i=0; i<=BOND_MASK; i++) {
879 struct bond_entry *entry = &bond->hash[i];
880 struct rule *rule = entry->pr_rule;
883 uint64_t n_packets OVS_UNUSED;
884 long long int used OVS_UNUSED;
887 rule->ofproto->ofproto_class->rule_get_stats(
888 rule, &n_packets, &n_bytes, &used);
889 bond_entry_account(entry, n_bytes);
892 ovs_rwlock_unlock(&rwlock);
896 bond_may_recirc(const struct bond *bond, uint32_t *recirc_id,
899 if (bond->balance == BM_TCP) {
901 *recirc_id = bond->recirc_id;
904 *hash_bias = bond->basis;
913 bond_update_post_recirc_rules(struct bond* bond, const bool force)
915 struct bond_entry *e;
916 bool update_rules = force; /* Always update rules if caller forces it. */
918 /* Make sure all bond entries are populated */
919 for (e = bond->hash; e <= &bond->hash[BOND_MASK]; e++) {
920 if (!e->slave || !e->slave->enabled) {
922 e->slave = CONTAINER_OF(hmap_random_node(&bond->slaves),
923 struct bond_slave, hmap_node);
924 if (!e->slave->enabled) {
925 e->slave = bond->active_slave;
931 update_recirc_rules(bond);
938 bond_is_balanced(const struct bond *bond) OVS_REQ_RDLOCK(rwlock)
940 return bond->rebalance_interval
941 && (bond->balance == BM_SLB || bond->balance == BM_TCP);
944 /* Notifies 'bond' that 'n_bytes' bytes were sent in 'flow' within 'vlan'. */
946 bond_account(struct bond *bond, const struct flow *flow, uint16_t vlan,
949 ovs_rwlock_wrlock(&rwlock);
950 if (bond_is_balanced(bond)) {
951 lookup_bond_entry(bond, flow, vlan)->tx_bytes += n_bytes;
953 ovs_rwlock_unlock(&rwlock);
956 static struct bond_slave *
957 bond_slave_from_bal_node(struct list *bal) OVS_REQ_RDLOCK(rwlock)
959 return CONTAINER_OF(bal, struct bond_slave, bal_node);
963 log_bals(struct bond *bond, const struct list *bals)
964 OVS_REQ_RDLOCK(rwlock)
966 if (VLOG_IS_DBG_ENABLED()) {
967 struct ds ds = DS_EMPTY_INITIALIZER;
968 const struct bond_slave *slave;
970 LIST_FOR_EACH (slave, bal_node, bals) {
972 ds_put_char(&ds, ',');
974 ds_put_format(&ds, " %s %"PRIu64"kB",
975 slave->name, slave->tx_bytes / 1024);
977 if (!slave->enabled) {
978 ds_put_cstr(&ds, " (disabled)");
980 if (!list_is_empty(&slave->entries)) {
981 struct bond_entry *e;
983 ds_put_cstr(&ds, " (");
984 LIST_FOR_EACH (e, list_node, &slave->entries) {
985 if (&e->list_node != list_front(&slave->entries)) {
986 ds_put_cstr(&ds, " + ");
988 ds_put_format(&ds, "h%"PRIdPTR": %"PRIu64"kB",
989 e - bond->hash, e->tx_bytes / 1024);
991 ds_put_cstr(&ds, ")");
994 VLOG_DBG("bond %s:%s", bond->name, ds_cstr(&ds));
999 /* Shifts 'hash' from its current slave to 'to'. */
1001 bond_shift_load(struct bond_entry *hash, struct bond_slave *to)
1002 OVS_REQ_WRLOCK(rwlock)
1004 struct bond_slave *from = hash->slave;
1005 struct bond *bond = from->bond;
1006 uint64_t delta = hash->tx_bytes;
1008 VLOG_INFO("bond %s: shift %"PRIu64"kB of load (with hash %"PRIdPTR") "
1009 "from %s to %s (now carrying %"PRIu64"kB and "
1010 "%"PRIu64"kB load, respectively)",
1011 bond->name, delta / 1024, hash - bond->hash,
1012 from->name, to->name,
1013 (from->tx_bytes - delta) / 1024,
1014 (to->tx_bytes + delta) / 1024);
1016 /* Shift load away from 'from' to 'to'. */
1017 from->tx_bytes -= delta;
1018 to->tx_bytes += delta;
1020 /* Arrange for flows to be revalidated. */
1022 bond->bond_revalidate = true;
1025 /* Picks and returns a bond_entry to migrate from 'from' (the most heavily
1026 * loaded bond slave) to a bond slave that has 'to_tx_bytes' bytes of load,
1027 * given that doing so must decrease the ratio of the load on the two slaves by
1028 * at least 0.1. Returns NULL if there is no appropriate entry.
1030 * The list of entries isn't sorted. I don't know of a reason to prefer to
1031 * shift away small hashes or large hashes. */
1032 static struct bond_entry *
1033 choose_entry_to_migrate(const struct bond_slave *from, uint64_t to_tx_bytes)
1034 OVS_REQ_WRLOCK(rwlock)
1036 struct bond_entry *e;
1038 if (list_is_short(&from->entries)) {
1039 /* 'from' carries no more than one MAC hash, so shifting load away from
1040 * it would be pointless. */
1044 LIST_FOR_EACH (e, list_node, &from->entries) {
1045 double old_ratio, new_ratio;
1048 if (to_tx_bytes == 0) {
1049 /* Nothing on the new slave, move it. */
1053 delta = e->tx_bytes;
1054 old_ratio = (double)from->tx_bytes / to_tx_bytes;
1055 new_ratio = (double)(from->tx_bytes - delta) / (to_tx_bytes + delta);
1056 if (old_ratio - new_ratio > 0.1
1057 && fabs(new_ratio - 1.0) < fabs(old_ratio - 1.0)) {
1058 /* We're aiming for an ideal ratio of 1, meaning both the 'from'
1059 and 'to' slave have the same load. Therefore, we only move an
1060 entry if it decreases the load on 'from', and brings us closer
1061 to equal traffic load. */
1069 /* Inserts 'slave' into 'bals' so that descending order of 'tx_bytes' is
1072 insert_bal(struct list *bals, struct bond_slave *slave)
1074 struct bond_slave *pos;
1076 LIST_FOR_EACH (pos, bal_node, bals) {
1077 if (slave->tx_bytes > pos->tx_bytes) {
1081 list_insert(&pos->bal_node, &slave->bal_node);
1084 /* Removes 'slave' from its current list and then inserts it into 'bals' so
1085 * that descending order of 'tx_bytes' is maintained. */
1087 reinsert_bal(struct list *bals, struct bond_slave *slave)
1089 list_remove(&slave->bal_node);
1090 insert_bal(bals, slave);
1093 /* If 'bond' needs rebalancing, does so.
1095 * The caller should have called bond_account() for each active flow, or in case
1096 * of recirculation is used, have called bond_recirculation_account(bond),
1097 * to ensure that flow data is consistently accounted at this point.
1099 * Return whether rebalancing took place.*/
1101 bond_rebalance(struct bond *bond)
1103 struct bond_slave *slave;
1104 struct bond_entry *e;
1106 bool rebalanced = false;
1108 ovs_rwlock_wrlock(&rwlock);
1109 if (!bond_is_balanced(bond) || time_msec() < bond->next_rebalance) {
1112 bond->next_rebalance = time_msec() + bond->rebalance_interval;
1114 /* Add each bond_entry to its slave's 'entries' list.
1115 * Compute each slave's tx_bytes as the sum of its entries' tx_bytes. */
1116 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1117 slave->tx_bytes = 0;
1118 list_init(&slave->entries);
1120 for (e = &bond->hash[0]; e <= &bond->hash[BOND_MASK]; e++) {
1121 if (e->slave && e->tx_bytes) {
1122 e->slave->tx_bytes += e->tx_bytes;
1123 list_push_back(&e->slave->entries, &e->list_node);
1127 /* Add enabled slaves to 'bals' in descending order of tx_bytes.
1129 * XXX This is O(n**2) in the number of slaves but it could be O(n lg n)
1130 * with a proper list sort algorithm. */
1132 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1133 if (slave->enabled) {
1134 insert_bal(&bals, slave);
1137 log_bals(bond, &bals);
1139 /* Shift load from the most-loaded slaves to the least-loaded slaves. */
1140 while (!list_is_short(&bals)) {
1141 struct bond_slave *from = bond_slave_from_bal_node(list_front(&bals));
1142 struct bond_slave *to = bond_slave_from_bal_node(list_back(&bals));
1145 overload = from->tx_bytes - to->tx_bytes;
1146 if (overload < to->tx_bytes >> 5 || overload < 100000) {
1147 /* The extra load on 'from' (and all less-loaded slaves), compared
1148 * to that of 'to' (the least-loaded slave), is less than ~3%, or
1149 * it is less than ~1Mbps. No point in rebalancing. */
1153 /* 'from' is carrying significantly more load than 'to'. Pick a hash
1154 * to move from 'from' to 'to'. */
1155 e = choose_entry_to_migrate(from, to->tx_bytes);
1157 bond_shift_load(e, to);
1159 /* Delete element from from->entries.
1161 * We don't add the element to to->hashes. That would only allow
1162 * 'e' to be migrated to another slave in this rebalancing run, and
1163 * there is no point in doing that. */
1164 list_remove(&e->list_node);
1166 /* Re-sort 'bals'. */
1167 reinsert_bal(&bals, from);
1168 reinsert_bal(&bals, to);
1171 /* Can't usefully migrate anything away from 'from'.
1172 * Don't reconsider it. */
1173 list_remove(&from->bal_node);
1177 /* Implement exponentially weighted moving average. A weight of 1/2 causes
1178 * historical data to decay to <1% in 7 rebalancing runs. 1,000,000 bytes
1179 * take 20 rebalancing runs to decay to 0 and get deleted entirely. */
1180 for (e = &bond->hash[0]; e <= &bond->hash[BOND_MASK]; e++) {
1188 ovs_rwlock_unlock(&rwlock);
1192 /* Bonding unixctl user interface functions. */
1194 static struct bond *
1195 bond_find(const char *name) OVS_REQ_RDLOCK(rwlock)
1199 HMAP_FOR_EACH_WITH_HASH (bond, hmap_node, hash_string(name, 0),
1201 if (!strcmp(bond->name, name)) {
1208 static struct bond_slave *
1209 bond_lookup_slave(struct bond *bond, const char *slave_name)
1211 struct bond_slave *slave;
1213 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1214 if (!strcmp(slave->name, slave_name)) {
1222 bond_unixctl_list(struct unixctl_conn *conn,
1223 int argc OVS_UNUSED, const char *argv[] OVS_UNUSED,
1224 void *aux OVS_UNUSED)
1226 struct ds ds = DS_EMPTY_INITIALIZER;
1227 const struct bond *bond;
1229 ds_put_cstr(&ds, "bond\ttype\trecircID\tslaves\n");
1231 ovs_rwlock_rdlock(&rwlock);
1232 HMAP_FOR_EACH (bond, hmap_node, all_bonds) {
1233 const struct bond_slave *slave;
1236 ds_put_format(&ds, "%s\t%s\t%d\t", bond->name,
1237 bond_mode_to_string(bond->balance), bond->recirc_id);
1240 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1242 ds_put_cstr(&ds, ", ");
1244 ds_put_cstr(&ds, slave->name);
1246 ds_put_char(&ds, '\n');
1248 ovs_rwlock_unlock(&rwlock);
1249 unixctl_command_reply(conn, ds_cstr(&ds));
1254 bond_print_details(struct ds *ds, const struct bond *bond)
1255 OVS_REQ_RDLOCK(rwlock)
1257 struct shash slave_shash = SHASH_INITIALIZER(&slave_shash);
1258 const struct shash_node **sorted_slaves = NULL;
1259 const struct bond_slave *slave;
1264 ds_put_format(ds, "---- %s ----\n", bond->name);
1265 ds_put_format(ds, "bond_mode: %s\n",
1266 bond_mode_to_string(bond->balance));
1268 may_recirc = bond_may_recirc(bond, &recirc_id, NULL);
1269 ds_put_format(ds, "bond may use recirculation: %s, Recirc-ID : %d\n",
1270 may_recirc ? "yes" : "no", may_recirc ? recirc_id: -1);
1272 ds_put_format(ds, "bond-hash-basis: %"PRIu32"\n", bond->basis);
1274 ds_put_format(ds, "updelay: %d ms\n", bond->updelay);
1275 ds_put_format(ds, "downdelay: %d ms\n", bond->downdelay);
1277 if (bond_is_balanced(bond)) {
1278 ds_put_format(ds, "next rebalance: %lld ms\n",
1279 bond->next_rebalance - time_msec());
1282 ds_put_cstr(ds, "lacp_status: ");
1283 switch (bond->lacp_status) {
1284 case LACP_NEGOTIATED:
1285 ds_put_cstr(ds, "negotiated\n");
1287 case LACP_CONFIGURED:
1288 ds_put_cstr(ds, "configured\n");
1291 ds_put_cstr(ds, "off\n");
1294 ds_put_cstr(ds, "<unknown>\n");
1298 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1299 shash_add(&slave_shash, slave->name, slave);
1301 sorted_slaves = shash_sort(&slave_shash);
1303 for (i = 0; i < shash_count(&slave_shash); i++) {
1304 struct bond_entry *be;
1306 slave = sorted_slaves[i]->data;
1309 ds_put_format(ds, "\nslave %s: %s\n",
1310 slave->name, slave->enabled ? "enabled" : "disabled");
1311 if (slave == bond->active_slave) {
1312 ds_put_cstr(ds, "\tactive slave\n");
1314 if (slave->delay_expires != LLONG_MAX) {
1315 ds_put_format(ds, "\t%s expires in %lld ms\n",
1316 slave->enabled ? "downdelay" : "updelay",
1317 slave->delay_expires - time_msec());
1320 ds_put_format(ds, "\tmay_enable: %s\n",
1321 slave->may_enable ? "true" : "false");
1323 if (!bond_is_balanced(bond)) {
1328 for (be = bond->hash; be <= &bond->hash[BOND_MASK]; be++) {
1329 int hash = be - bond->hash;
1332 if (be->slave != slave) {
1336 be_tx_k = be->tx_bytes / 1024;
1338 ds_put_format(ds, "\thash %d: %"PRIu64" kB load\n",
1342 /* XXX How can we list the MACs assigned to hashes of SLB bonds? */
1345 shash_destroy(&slave_shash);
1346 free(sorted_slaves);
1347 ds_put_cstr(ds, "\n");
1351 bond_unixctl_show(struct unixctl_conn *conn,
1352 int argc, const char *argv[],
1353 void *aux OVS_UNUSED)
1355 struct ds ds = DS_EMPTY_INITIALIZER;
1357 ovs_rwlock_rdlock(&rwlock);
1359 const struct bond *bond = bond_find(argv[1]);
1362 unixctl_command_reply_error(conn, "no such bond");
1365 bond_print_details(&ds, bond);
1367 const struct bond *bond;
1369 HMAP_FOR_EACH (bond, hmap_node, all_bonds) {
1370 bond_print_details(&ds, bond);
1374 unixctl_command_reply(conn, ds_cstr(&ds));
1378 ovs_rwlock_unlock(&rwlock);
1382 bond_unixctl_migrate(struct unixctl_conn *conn,
1383 int argc OVS_UNUSED, const char *argv[],
1384 void *aux OVS_UNUSED)
1386 const char *bond_s = argv[1];
1387 const char *hash_s = argv[2];
1388 const char *slave_s = argv[3];
1390 struct bond_slave *slave;
1391 struct bond_entry *entry;
1394 ovs_rwlock_wrlock(&rwlock);
1395 bond = bond_find(bond_s);
1397 unixctl_command_reply_error(conn, "no such bond");
1401 if (bond->balance != BM_SLB) {
1402 unixctl_command_reply_error(conn, "not an SLB bond");
1406 if (strspn(hash_s, "0123456789") == strlen(hash_s)) {
1407 hash = atoi(hash_s) & BOND_MASK;
1409 unixctl_command_reply_error(conn, "bad hash");
1413 slave = bond_lookup_slave(bond, slave_s);
1415 unixctl_command_reply_error(conn, "no such slave");
1419 if (!slave->enabled) {
1420 unixctl_command_reply_error(conn, "cannot migrate to disabled slave");
1424 entry = &bond->hash[hash];
1425 bond->bond_revalidate = true;
1426 entry->slave = slave;
1427 unixctl_command_reply(conn, "migrated");
1430 ovs_rwlock_unlock(&rwlock);
1434 bond_unixctl_set_active_slave(struct unixctl_conn *conn,
1435 int argc OVS_UNUSED, const char *argv[],
1436 void *aux OVS_UNUSED)
1438 const char *bond_s = argv[1];
1439 const char *slave_s = argv[2];
1441 struct bond_slave *slave;
1443 ovs_rwlock_wrlock(&rwlock);
1444 bond = bond_find(bond_s);
1446 unixctl_command_reply_error(conn, "no such bond");
1450 slave = bond_lookup_slave(bond, slave_s);
1452 unixctl_command_reply_error(conn, "no such slave");
1456 if (!slave->enabled) {
1457 unixctl_command_reply_error(conn, "cannot make disabled slave active");
1461 if (bond->active_slave != slave) {
1462 bond->bond_revalidate = true;
1463 bond->active_slave = slave;
1464 VLOG_INFO("bond %s: active interface is now %s",
1465 bond->name, slave->name);
1466 bond->send_learning_packets = true;
1467 unixctl_command_reply(conn, "done");
1469 unixctl_command_reply(conn, "no change");
1472 ovs_rwlock_unlock(&rwlock);
1476 enable_slave(struct unixctl_conn *conn, const char *argv[], bool enable)
1478 const char *bond_s = argv[1];
1479 const char *slave_s = argv[2];
1481 struct bond_slave *slave;
1483 ovs_rwlock_wrlock(&rwlock);
1484 bond = bond_find(bond_s);
1486 unixctl_command_reply_error(conn, "no such bond");
1490 slave = bond_lookup_slave(bond, slave_s);
1492 unixctl_command_reply_error(conn, "no such slave");
1496 bond_enable_slave(slave, enable);
1497 unixctl_command_reply(conn, enable ? "enabled" : "disabled");
1500 ovs_rwlock_unlock(&rwlock);
1504 bond_unixctl_enable_slave(struct unixctl_conn *conn,
1505 int argc OVS_UNUSED, const char *argv[],
1506 void *aux OVS_UNUSED)
1508 enable_slave(conn, argv, true);
1512 bond_unixctl_disable_slave(struct unixctl_conn *conn,
1513 int argc OVS_UNUSED, const char *argv[],
1514 void *aux OVS_UNUSED)
1516 enable_slave(conn, argv, false);
1520 bond_unixctl_hash(struct unixctl_conn *conn, int argc, const char *argv[],
1521 void *aux OVS_UNUSED)
1523 const char *mac_s = argv[1];
1524 const char *vlan_s = argc > 2 ? argv[2] : NULL;
1525 const char *basis_s = argc > 3 ? argv[3] : NULL;
1526 uint8_t mac[ETH_ADDR_LEN];
1533 if (!ovs_scan(vlan_s, "%u", &vlan)) {
1534 unixctl_command_reply_error(conn, "invalid vlan");
1542 if (!ovs_scan(basis_s, "%"SCNu32, &basis)) {
1543 unixctl_command_reply_error(conn, "invalid basis");
1550 if (ovs_scan(mac_s, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))) {
1551 hash = bond_hash_src(mac, vlan, basis) & BOND_MASK;
1553 hash_cstr = xasprintf("%u", hash);
1554 unixctl_command_reply(conn, hash_cstr);
1557 unixctl_command_reply_error(conn, "invalid mac");
1564 unixctl_command_register("bond/list", "", 0, 0, bond_unixctl_list, NULL);
1565 unixctl_command_register("bond/show", "[port]", 0, 1, bond_unixctl_show,
1567 unixctl_command_register("bond/migrate", "port hash slave", 3, 3,
1568 bond_unixctl_migrate, NULL);
1569 unixctl_command_register("bond/set-active-slave", "port slave", 2, 2,
1570 bond_unixctl_set_active_slave, NULL);
1571 unixctl_command_register("bond/enable-slave", "port slave", 2, 2,
1572 bond_unixctl_enable_slave, NULL);
1573 unixctl_command_register("bond/disable-slave", "port slave", 2, 2,
1574 bond_unixctl_disable_slave, NULL);
1575 unixctl_command_register("bond/hash", "mac [vlan] [basis]", 1, 3,
1576 bond_unixctl_hash, NULL);
1580 bond_entry_reset(struct bond *bond)
1582 if (bond->balance != BM_AB) {
1583 size_t hash_len = BOND_BUCKETS * sizeof *bond->hash;
1586 bond->hash = xmalloc(hash_len);
1588 memset(bond->hash, 0, hash_len);
1590 bond->next_rebalance = time_msec() + bond->rebalance_interval;
1597 static struct bond_slave *
1598 bond_slave_lookup(struct bond *bond, const void *slave_)
1600 struct bond_slave *slave;
1602 HMAP_FOR_EACH_IN_BUCKET (slave, hmap_node, hash_pointer(slave_, 0),
1604 if (slave->aux == slave_) {
1613 bond_enable_slave(struct bond_slave *slave, bool enable)
1615 slave->delay_expires = LLONG_MAX;
1616 if (enable != slave->enabled) {
1617 slave->bond->bond_revalidate = true;
1618 slave->enabled = enable;
1620 ovs_mutex_lock(&slave->bond->mutex);
1622 list_insert(&slave->bond->enabled_slaves, &slave->list_node);
1624 list_remove(&slave->list_node);
1626 ovs_mutex_unlock(&slave->bond->mutex);
1628 VLOG_INFO("interface %s: %s", slave->name,
1629 slave->enabled ? "enabled" : "disabled");
1634 bond_link_status_update(struct bond_slave *slave)
1636 struct bond *bond = slave->bond;
1639 up = netdev_get_carrier(slave->netdev) && slave->may_enable;
1640 if ((up == slave->enabled) != (slave->delay_expires == LLONG_MAX)) {
1641 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1642 VLOG_INFO_RL(&rl, "interface %s: link state %s",
1643 slave->name, up ? "up" : "down");
1644 if (up == slave->enabled) {
1645 slave->delay_expires = LLONG_MAX;
1646 VLOG_INFO_RL(&rl, "interface %s: will not be %s",
1647 slave->name, up ? "disabled" : "enabled");
1649 int delay = (bond->lacp_status != LACP_DISABLED ? 0
1650 : up ? bond->updelay : bond->downdelay);
1651 slave->delay_expires = time_msec() + delay;
1653 VLOG_INFO_RL(&rl, "interface %s: will be %s if it stays %s "
1656 up ? "enabled" : "disabled",
1663 if (time_msec() >= slave->delay_expires) {
1664 bond_enable_slave(slave, up);
1669 bond_hash_src(const uint8_t mac[ETH_ADDR_LEN], uint16_t vlan, uint32_t basis)
1671 return hash_mac(mac, vlan, basis);
1675 bond_hash_tcp(const struct flow *flow, uint16_t vlan, uint32_t basis)
1677 struct flow hash_flow = *flow;
1678 hash_flow.vlan_tci = htons(vlan);
1680 /* The symmetric quality of this hash function is not required, but
1681 * flow_hash_symmetric_l4 already exists, and is sufficient for our
1682 * purposes, so we use it out of convenience. */
1683 return flow_hash_symmetric_l4(&hash_flow, basis);
1687 bond_hash(const struct bond *bond, const struct flow *flow, uint16_t vlan)
1689 ovs_assert(bond->balance == BM_TCP || bond->balance == BM_SLB);
1691 return (bond->balance == BM_TCP
1692 ? bond_hash_tcp(flow, vlan, bond->basis)
1693 : bond_hash_src(flow->dl_src, vlan, bond->basis));
1696 static struct bond_entry *
1697 lookup_bond_entry(const struct bond *bond, const struct flow *flow,
1700 return &bond->hash[bond_hash(bond, flow, vlan) & BOND_MASK];
1703 /* Selects and returns an enabled slave from the 'enabled_slaves' list
1704 * in a round-robin fashion. If the 'enabled_slaves' list is empty,
1706 static struct bond_slave *
1707 get_enabled_slave(struct bond *bond)
1711 ovs_mutex_lock(&bond->mutex);
1712 if (list_is_empty(&bond->enabled_slaves)) {
1713 ovs_mutex_unlock(&bond->mutex);
1717 node = list_pop_front(&bond->enabled_slaves);
1718 list_push_back(&bond->enabled_slaves, node);
1719 ovs_mutex_unlock(&bond->mutex);
1721 return CONTAINER_OF(node, struct bond_slave, list_node);
1724 static struct bond_slave *
1725 choose_output_slave(const struct bond *bond, const struct flow *flow,
1726 struct flow_wildcards *wc, uint16_t vlan)
1728 struct bond_entry *e;
1731 balance = bond->balance;
1732 if (bond->lacp_status == LACP_CONFIGURED) {
1733 /* LACP has been configured on this bond but negotiations were
1734 * unsuccussful. If lacp_fallback_ab is enabled use active-
1735 * backup mode else drop all traffic. */
1736 if (!bond->lacp_fallback_ab) {
1744 return bond->active_slave;
1747 if (bond->lacp_status != LACP_NEGOTIATED) {
1748 /* Must have LACP negotiations for TCP balanced bonds. */
1752 flow_mask_hash_fields(flow, wc, NX_HASH_FIELDS_SYMMETRIC_L4);
1757 flow_mask_hash_fields(flow, wc, NX_HASH_FIELDS_ETH_SRC);
1759 e = lookup_bond_entry(bond, flow, vlan);
1760 if (!e->slave || !e->slave->enabled) {
1761 e->slave = get_enabled_slave(CONST_CAST(struct bond*, bond));
1770 static struct bond_slave *
1771 bond_choose_slave(const struct bond *bond)
1773 struct bond_slave *slave, *best;
1775 /* Find an enabled slave. */
1776 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1777 if (slave->enabled) {
1782 /* All interfaces are disabled. Find an interface that will be enabled
1783 * after its updelay expires. */
1785 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1786 if (slave->delay_expires != LLONG_MAX
1787 && slave->may_enable
1788 && (!best || slave->delay_expires < best->delay_expires)) {
1796 bond_choose_active_slave(struct bond *bond)
1798 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 20);
1799 struct bond_slave *old_active_slave = bond->active_slave;
1801 bond->active_slave = bond_choose_slave(bond);
1802 if (bond->active_slave) {
1803 if (bond->active_slave->enabled) {
1804 VLOG_INFO_RL(&rl, "bond %s: active interface is now %s",
1805 bond->name, bond->active_slave->name);
1807 VLOG_INFO_RL(&rl, "bond %s: active interface is now %s, skipping "
1808 "remaining %lld ms updelay (since no interface was "
1809 "enabled)", bond->name, bond->active_slave->name,
1810 bond->active_slave->delay_expires - time_msec());
1811 bond_enable_slave(bond->active_slave, true);
1814 bond->send_learning_packets = true;
1815 } else if (old_active_slave) {
1816 VLOG_INFO_RL(&rl, "bond %s: all interfaces disabled", bond->name);
1820 /* Attempts to make the sum of the bond slaves' statistics appear on the fake
1821 * bond interface. */
1823 bond_update_fake_slave_stats(struct bond *bond)
1825 struct netdev_stats bond_stats;
1826 struct bond_slave *slave;
1827 struct netdev *bond_dev;
1829 memset(&bond_stats, 0, sizeof bond_stats);
1831 HMAP_FOR_EACH (slave, hmap_node, &bond->slaves) {
1832 struct netdev_stats slave_stats;
1834 if (!netdev_get_stats(slave->netdev, &slave_stats)) {
1835 /* XXX: We swap the stats here because they are swapped back when
1836 * reported by the internal device. The reason for this is
1837 * internal devices normally represent packets going into the
1838 * system but when used as fake bond device they represent packets
1839 * leaving the system. We really should do this in the internal
1840 * device itself because changing it here reverses the counts from
1841 * the perspective of the switch. However, the internal device
1842 * doesn't know what type of device it represents so we have to do
1843 * it here for now. */
1844 bond_stats.tx_packets += slave_stats.rx_packets;
1845 bond_stats.tx_bytes += slave_stats.rx_bytes;
1846 bond_stats.rx_packets += slave_stats.tx_packets;
1847 bond_stats.rx_bytes += slave_stats.tx_bytes;
1851 if (!netdev_open(bond->name, "system", &bond_dev)) {
1852 netdev_set_stats(bond_dev, &bond_stats);
1853 netdev_close(bond_dev);