#include "async-append.h"
#include "bfd.h"
#include "bitmap.h"
-#include "bond.h"
#include "cfm.h"
#include "coverage.h"
#include "daemon.h"
#include "ofp-print.h"
#include "ofp-util.h"
#include "ofpbuf.h"
+#include "ofproto/bond.h"
#include "ofproto/ofproto.h"
#include "poll-loop.h"
#include "sha1.h"
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_fault);
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_fault_status);
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_remote_mpids);
+ ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_flap_count);
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_health);
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_cfm_remote_opstate);
ovsdb_idl_omit_alert(idl, &ovsrec_interface_col_bfd_status);
SSET_FOR_EACH (target, &targets) {
struct sockaddr_in *sin = &managers[n_managers];
- if (stream_parse_target_with_default_ports(target,
- JSONRPC_TCP_PORT,
- JSONRPC_SSL_PORT,
- sin)) {
+ if (stream_parse_target_with_default_port(target,
+ OVSDB_OLD_PORT,
+ sin)) {
n_managers++;
}
}
smap_get_int(&ovs_cfg->other_config, "flow-eviction-threshold",
OFPROTO_FLOW_EVICTION_THRESHOLD_DEFAULT));
+ ofproto_set_n_handler_threads(
+ smap_get_int(&ovs_cfg->other_config, "n-handler-threads", 0));
+
bridge_configure_flow_miss_model(smap_get(&ovs_cfg->other_config,
"force-miss-model"));
long long int deadline;
struct bridge *br;
- time_refresh();
deadline = time_msec() + OFP_PORT_ACTION_WINDOW;
/* The kernel will reject any attempt to add a given port to a datapath if
list_remove(&garbage->list_node);
free(garbage);
- time_refresh();
if (time_msec() >= deadline) {
return false;
}
HMAP_FOR_EACH_SAFE (if_cfg, next, hmap_node, &br->if_cfg_todo) {
iface_create(br, if_cfg, OFPP_NONE);
- time_refresh();
if (time_msec() >= deadline) {
return false;
}
sset_destroy(&oso.targets);
}
+/* Returns whether a IPFIX row is valid. */
+static bool
+ovsrec_ipfix_is_valid(const struct ovsrec_ipfix *ipfix)
+{
+ return ipfix && ipfix->n_targets > 0;
+}
+
+/* Returns whether a Flow_Sample_Collector_Set row is valid. */
+static bool
+ovsrec_fscs_is_valid(const struct ovsrec_flow_sample_collector_set *fscs,
+ const struct bridge *br)
+{
+ return ovsrec_ipfix_is_valid(fscs->ipfix) && fscs->bridge == br->cfg;
+}
+
/* Set IPFIX configuration on 'br'. */
static void
bridge_configure_ipfix(struct bridge *br)
{
const struct ovsrec_ipfix *be_cfg = br->cfg->ipfix;
+ bool valid_be_cfg = ovsrec_ipfix_is_valid(be_cfg);
const struct ovsrec_flow_sample_collector_set *fe_cfg;
struct ofproto_ipfix_bridge_exporter_options be_opts;
struct ofproto_ipfix_flow_exporter_options *fe_opts = NULL;
size_t n_fe_opts = 0;
OVSREC_FLOW_SAMPLE_COLLECTOR_SET_FOR_EACH(fe_cfg, idl) {
- if (fe_cfg->bridge == br->cfg) {
+ if (ovsrec_fscs_is_valid(fe_cfg, br)) {
n_fe_opts++;
}
}
- if (!be_cfg && n_fe_opts == 0) {
+ if (!valid_be_cfg && n_fe_opts == 0) {
ofproto_set_ipfix(br->ofproto, NULL, NULL, 0);
return;
}
- if (be_cfg) {
+ if (valid_be_cfg) {
memset(&be_opts, 0, sizeof be_opts);
sset_init(&be_opts.targets);
if (be_cfg->obs_point_id) {
be_opts.obs_point_id = *be_cfg->obs_point_id;
}
+ if (be_cfg->cache_active_timeout) {
+ be_opts.cache_active_timeout = *be_cfg->cache_active_timeout;
+ }
+ if (be_cfg->cache_max_flows) {
+ be_opts.cache_max_flows = *be_cfg->cache_max_flows;
+ }
}
if (n_fe_opts > 0) {
fe_opts = xcalloc(n_fe_opts, sizeof *fe_opts);
opts = fe_opts;
OVSREC_FLOW_SAMPLE_COLLECTOR_SET_FOR_EACH(fe_cfg, idl) {
- if (fe_cfg->bridge == br->cfg) {
+ if (ovsrec_fscs_is_valid(fe_cfg, br)) {
opts->collector_set_id = fe_cfg->id;
sset_init(&opts->targets);
sset_add_array(&opts->targets, fe_cfg->ipfix->targets,
fe_cfg->ipfix->n_targets);
+ opts->cache_active_timeout = fe_cfg->ipfix->cache_active_timeout
+ ? *fe_cfg->ipfix->cache_active_timeout : 0;
+ opts->cache_max_flows = fe_cfg->ipfix->cache_max_flows
+ ? *fe_cfg->ipfix->cache_max_flows : 0;
opts++;
}
}
}
- ofproto_set_ipfix(br->ofproto, be_cfg ? &be_opts : NULL, fe_opts,
+ ofproto_set_ipfix(br->ofproto, valid_be_cfg ? &be_opts : NULL, fe_opts,
n_fe_opts);
- if (be_cfg) {
+ if (valid_be_cfg) {
sset_destroy(&be_opts.targets);
}
ovsrec_interface_set_cfm_fault(cfg, NULL, 0);
ovsrec_interface_set_cfm_fault_status(cfg, NULL, 0);
ovsrec_interface_set_cfm_remote_opstate(cfg, NULL);
+ ovsrec_interface_set_cfm_flap_count(cfg, NULL, 0);
ovsrec_interface_set_cfm_health(cfg, NULL, 0);
ovsrec_interface_set_cfm_remote_mpids(cfg, NULL, 0);
} else {
const char *reasons[CFM_FAULT_N_REASONS];
int64_t cfm_health = status.health;
+ int64_t cfm_flap_count = status.flap_count;
bool faulted = status.faults != 0;
size_t i, j;
}
ovsrec_interface_set_cfm_fault_status(cfg, (char **) reasons, j);
+ ovsrec_interface_set_cfm_flap_count(cfg, &cfm_flap_count, 1);
+
if (status.remote_opstate >= 0) {
const char *remote_opstate = status.remote_opstate ? "up" : "down";
ovsrec_interface_set_cfm_remote_opstate(cfg, remote_opstate);
} else {
ovsrec_interface_set_cfm_health(cfg, NULL, 0);
}
+
+ free(status.rmps);
}
}
iface_refresh_cfm_stats(iface);
smap_init(&smap);
- if (!ofproto_port_get_bfd_status(br->ofproto, iface->ofp_port,
- &smap)) {
- ovsrec_interface_set_bfd_status(iface->cfg, &smap);
- smap_destroy(&smap);
- }
+ ofproto_port_get_bfd_status(br->ofproto, iface->ofp_port,
+ &smap);
+ ovsrec_interface_set_bfd_status(iface->cfg, &smap);
+ smap_destroy(&smap);
}
}
}
* process that forked us to exit successfully. */
daemonize_complete();
- async_append_enable();
+ vlog_enable_async();
VLOG_INFO_ONCE("%s (Open vSwitch) %s", program_name, VERSION);
}
};
static void
-qos_unixctl_show_cb(unsigned int queue_id,
- const struct smap *details,
- void *aux)
+qos_unixctl_show_queue(unsigned int queue_id,
+ const struct smap *details,
+ struct iface *iface,
+ struct ds *ds)
{
- struct qos_unixctl_show_cbdata *data = aux;
- struct ds *ds = data->ds;
- struct iface *iface = data->iface;
struct netdev_queue_stats stats;
struct smap_node *node;
int error;
struct iface *iface;
const char *type;
struct smap_node *node;
- struct qos_unixctl_show_cbdata data;
- int error;
iface = iface_find(argv[1]);
if (!iface) {
netdev_get_qos(iface->netdev, &type, &smap);
if (*type != '\0') {
+ struct netdev_queue_dump dump;
+ struct smap details;
+ unsigned int queue_id;
+
ds_put_format(&ds, "QoS: %s %s\n", iface->name, type);
SMAP_FOR_EACH (node, &smap) {
ds_put_format(&ds, "%s: %s\n", node->key, node->value);
}
- data.ds = &ds;
- data.iface = iface;
- error = netdev_dump_queues(iface->netdev, qos_unixctl_show_cb, &data);
-
- if (error) {
- ds_put_format(&ds, "failed to dump queues: %s",
- ovs_strerror(error));
+ smap_init(&details);
+ NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &dump, iface->netdev) {
+ qos_unixctl_show_queue(queue_id, &details, iface, &ds);
}
+ smap_destroy(&details);
+
unixctl_command_reply(conn, ds_cstr(&ds));
} else {
ds_put_format(&ds, "QoS not configured on %s\n", iface->name);
system_id = smap_get(&port->cfg->other_config, "lacp-system-id");
if (system_id) {
- if (sscanf(system_id, ETH_ADDR_SCAN_FMT,
- ETH_ADDR_SCAN_ARGS(s->id)) != ETH_ADDR_SCAN_COUNT) {
+ if (!ovs_scan(system_id, ETH_ADDR_SCAN_FMT,
+ ETH_ADDR_SCAN_ARGS(s->id))) {
VLOG_WARN("port %s: LACP system ID (%s) must be an Ethernet"
" address.", port->name, system_id);
return NULL;
lacp_time = smap_get(&port->cfg->other_config, "lacp-time");
s->fast = lacp_time && !strcasecmp(lacp_time, "fast");
+
+ s->fallback_ab_cfg = smap_get_bool(&port->cfg->other_config,
+ "lacp-fallback-ab", false);
+
return s;
}
s->fake_iface = port->cfg->bond_fake_iface;
+ s->lacp_fallback_ab_cfg = smap_get_bool(&port->cfg->other_config,
+ "lacp-fallback-ab", false);
+
LIST_FOR_EACH (iface, port_elem, &port->ifaces) {
netdev_set_miimon_interval(iface->netdev, miimon_interval);
}
static void
iface_set_ofport(const struct ovsrec_interface *if_cfg, ofp_port_t ofport)
{
- int64_t port_;
- port_ = (ofport == OFPP_NONE) ? -1 : ofp_to_u16(ofport);
if (if_cfg && !ovsdb_idl_row_is_synthetic(&if_cfg->header_)) {
- ovsrec_interface_set_ofport(if_cfg, &port_, 1);
+ int64_t port = ofport == OFPP_NONE ? -1 : ofp_to_u16(ofport);
+ ovsrec_interface_set_ofport(if_cfg, &port, 1);
}
}
}
}
-struct iface_delete_queues_cbdata {
- struct netdev *netdev;
- const struct ovsdb_datum *queues;
-};
-
static bool
queue_ids_include(const struct ovsdb_datum *queues, int64_t target)
{
return ovsdb_datum_find_key(queues, &atom, OVSDB_TYPE_INTEGER) != UINT_MAX;
}
-static void
-iface_delete_queues(unsigned int queue_id,
- const struct smap *details OVS_UNUSED, void *cbdata_)
-{
- struct iface_delete_queues_cbdata *cbdata = cbdata_;
-
- if (!queue_ids_include(cbdata->queues, queue_id)) {
- netdev_delete_queue(cbdata->netdev, queue_id);
- }
-}
-
static void
iface_configure_qos(struct iface *iface, const struct ovsrec_qos *qos)
{
if (!qos || qos->type[0] == '\0' || qos->n_queues < 1) {
netdev_set_qos(iface->netdev, NULL, NULL);
} else {
- struct iface_delete_queues_cbdata cbdata;
+ const struct ovsdb_datum *queues;
+ struct netdev_queue_dump dump;
+ unsigned int queue_id;
+ struct smap details;
bool queue_zero;
size_t i;
netdev_set_qos(iface->netdev, qos->type, &qos->other_config);
/* Deconfigure queues that were deleted. */
- cbdata.netdev = iface->netdev;
- cbdata.queues = ovsrec_qos_get_queues(qos, OVSDB_TYPE_INTEGER,
- OVSDB_TYPE_UUID);
- netdev_dump_queues(iface->netdev, iface_delete_queues, &cbdata);
+ queues = ovsrec_qos_get_queues(qos, OVSDB_TYPE_INTEGER,
+ OVSDB_TYPE_UUID);
+ smap_init(&details);
+ NETDEV_QUEUE_FOR_EACH (&queue_id, &details, &dump, iface->netdev) {
+ if (!queue_ids_include(queues, queue_id)) {
+ netdev_delete_queue(iface->netdev, queue_id);
+ }
+ }
+ smap_destroy(&details);
/* Configure queues for 'iface'. */
queue_zero = false;
if (!netdev_open(vlan_dev->name, "system", &netdev)) {
if (!netdev_get_in4(netdev, NULL, NULL) ||
!netdev_get_in6(netdev, NULL)) {
- vlandev_del(vlan_dev->name);
- } else {
/* It has an IP address configured, so we don't own
* it. Don't delete it. */
+ } else {
+ vlandev_del(vlan_dev->name);
}
netdev_close(netdev);
}