struct tag_set revalidate_set; /* Revalidate only matching facets. */
struct hmap drop_keys; /* Set of dropped odp keys. */
+ bool recv_set_enable; /* Enables or disables receiving packets. */
};
/* All existing ofproto_backer instances, indexed by ofproto->up.type. */
push_all_stats();
}
+ /* If vswitchd started with other_config:flow_restore_wait set as "true",
+ * and the configuration has now changed to "false", enable receiving
+ * packets from the datapath. */
+ if (!backer->recv_set_enable && !ofproto_get_flow_restore_wait()) {
+ backer->recv_set_enable = true;
+
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
+ if (error) {
+ VLOG_ERR("Failed to enable receiving packets in dpif.");
+ return error;
+ }
+ dpif_flow_flush(backer->dpif);
+ backer->need_revalidate = REV_RECONFIGURE;
+ }
+
if (backer->need_revalidate
|| !tag_set_is_empty(&backer->revalidate_set)) {
struct tag_set revalidate_set = backer->revalidate_set;
}
}
- if (timer_expired(&backer->next_expiration)) {
+ if (!backer->recv_set_enable) {
+ /* Wake up before a max of 1000ms. */
+ timer_set_duration(&backer->next_expiration, 1000);
+ } else if (timer_expired(&backer->next_expiration)) {
int delay = expire(backer);
timer_set_duration(&backer->next_expiration, delay);
}
{
unsigned int work;
+ /* If recv_set_enable is false, we should not handle upcalls. */
+ if (!backer->recv_set_enable) {
+ return 0;
+ }
+
/* Handle one or more batches of upcalls, until there's nothing left to do
* or until we do a fixed total amount of work.
*
backer->need_revalidate = 0;
simap_init(&backer->tnl_backers);
tag_set_init(&backer->revalidate_set);
+ backer->recv_set_enable = !ofproto_get_flow_restore_wait();
*backerp = backer;
- dpif_flow_flush(backer->dpif);
+ if (backer->recv_set_enable) {
+ dpif_flow_flush(backer->dpif);
+ }
/* Loop through the ports already on the datapath and remove any
* that we don't need anymore. */
shash_add(&all_dpif_backers, type, backer);
- error = dpif_recv_set(backer->dpif, true);
+ error = dpif_recv_set(backer->dpif, backer->recv_set_enable);
if (error) {
VLOG_ERR("failed to listen on datapath of type %s: %s",
type, strerror(error));
struct ofproto_dpif *ofproto = ofproto_dpif_cast(ofproto_);
struct ofport_dpif *ofport;
+ /* Do not perform any periodic activity required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
+
HMAP_FOR_EACH (ofport, up.hmap_node, &ofproto->up.ports) {
port_run_fast(ofport);
}
complete_operations(ofproto);
}
+ /* Do not perform any periodic activity below required by 'ofproto' while
+ * waiting for flow restore to complete. */
+ if (ofproto_get_flow_restore_wait()) {
+ return 0;
+ }
+
error = run_fast(ofproto_);
if (error) {
return error;
poll_immediate_wake();
}
+ if (ofproto_get_flow_restore_wait()) {
+ return;
+ }
+
dpif_wait(ofproto->backer->dpif);
dpif_recv_wait(ofproto->backer->dpif);
if (ofproto->sflow) {
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+/* The default value of true waits for flow restore. */
+static bool flow_restore_wait = true;
+
/* Must be called to initialize the ofproto library.
*
* The caller may pass in 'iface_hints', which contains an shash of
return (bo || fo) ? EOPNOTSUPP : 0;
}
}
+
+void
+ofproto_set_flow_restore_wait(bool flow_restore_wait_db)
+{
+ flow_restore_wait = flow_restore_wait_db;
+}
+
+bool
+ofproto_get_flow_restore_wait(void)
+{
+ return flow_restore_wait;
+}
+
\f
/* Spanning Tree Protocol (STP) configuration. */
The Citrix XenServer universally unique identifier for the physical
host as displayed by <code>xe host-list</code>.
</column>
+
+ <column name="other_config" key="flow-restore-wait"
+ type='{"type": "boolean"}'>
+ <p>
+ When <code>ovs-vswitchd</code> starts up, it has an empty flow table
+ and therefore it handles all arriving packets in its default fashion
+ according to its configuration, by dropping them or sending them to
+ an OpenFlow controller or switching them as a standalone switch.
+ This behavior is ordinarily desirable. However, if
+ <code>ovs-vswitchd</code> is restarting as part of a ``hot-upgrade,''
+ then this leads to a relatively long period during which packets are
+ mishandled.
+ </p>
+ <p>
+ This option allows for improvement. When <code>ovs-vswitchd</code>
+ starts with this value set as <code>true</code>, it will neither
+ flush or expire previously set datapath flows nor will it send and
+ receive any packets to or from the datapath. When this value is
+ later set to <code>false</code>, <code>ovs-vswitchd</code> will
+ start receiving packets from the datapath and re-setup the flows.
+ </p>
+ <p>
+ Thus, with this option, the procedure for a hot-upgrade of
+ <code>ovs-vswitchd</code> becomes roughly the following:
+ </p>
+ <ol>
+ <li>
+ Stop <code>ovs-vswitchd</code>.
+ </li>
+ <li>
+ Set <ref column="other_config" key="flow-restore-wait"/>
+ to <code>true</code>.
+ </li>
+ <li>
+ Start <code>ovs-vswitchd</code>.
+ </li>
+ <li>
+ Use <code>ovs-ofctl</code> (or some other program, such as an
+ OpenFlow controller) to restore the OpenFlow flow table
+ to the desired state.
+ </li>
+ <li>
+ Set <ref column="other_config" key="flow-restore-wait"/>
+ to <code>false</code> (or remove it entirely from the database).
+ </li>
+ </ol>
+ </column>
</group>
<group title="Status">