struct ofproto *ofproto;
struct ofsettings s;
int error;
+ struct netflow_options nf_options;
set_program_name(argv[0]);
register_fault_handlers();
ovs_fatal(error,
"failed to configure controller snooping connections");
}
- error = ofproto_set_netflow(ofproto, &s.netflow, 0, 0, false);
+ memset(&nf_options, 0, sizeof nf_options);
+ nf_options.collectors = s.netflow;
+ error = ofproto_set_netflow(ofproto, &nf_options);
if (error) {
ovs_fatal(error, "failed to configure NetFlow collectors");
}
#define NETFLOW_V5_VERSION 5
+static const int ACTIVE_TIMEOUT_DEFAULT = 600;
+
/* Every NetFlow v5 message contains the header that follows. This is
* followed by up to thirty records that describe a terminating flow.
* We only send a single record per NetFlow message.
* bits of the interface fields. */
uint32_t netflow_cnt; /* Flow sequence number for NetFlow. */
struct ofpbuf packet; /* NetFlow packet being accumulated. */
+ long long int active_timeout; /* Timeout for flows that are still active. */
+ long long int reconfig_time; /* When we reconfigured the timeouts. */
};
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
}
void
-netflow_expire(struct netflow *nf, const struct ofexpired *expired)
+netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow,
+ struct ofexpired *expired)
{
struct netflow_v5_header *nf_hdr;
struct netflow_v5_record *nf_rec;
struct timeval now;
- /* NetFlow only reports on IP packets. */
- if (expired->flow.dl_type != htons(ETH_TYPE_IP)) {
+ nf_flow->last_expired += nf->active_timeout;
+
+ /* NetFlow only reports on IP packets and we should only report flows
+ * that actually have traffic. */
+ if (expired->flow.dl_type != htons(ETH_TYPE_IP) ||
+ expired->packet_count - nf_flow->packet_count_off == 0) {
return;
}
if (nf->add_id_to_iface) {
uint16_t iface = (nf->engine_id & 0x7f) << 9;
nf_rec->input = htons(iface | (expired->flow.in_port & 0x1ff));
- nf_rec->output = htons(iface | (expired->output_iface & 0x1ff));
+ nf_rec->output = htons(iface | (nf_flow->output_iface & 0x1ff));
} else {
nf_rec->input = htons(expired->flow.in_port);
- nf_rec->output = htons(expired->output_iface);
+ nf_rec->output = htons(nf_flow->output_iface);
}
- nf_rec->packet_count = htonl(MIN(expired->packet_count, UINT32_MAX));
- nf_rec->byte_count = htonl(MIN(expired->byte_count, UINT32_MAX));
- nf_rec->init_time = htonl(expired->created - nf->boot_time);
- nf_rec->used_time = htonl(MAX(expired->created, expired->used)
+ nf_rec->packet_count = htonl(MIN(expired->packet_count -
+ nf_flow->packet_count_off, UINT32_MAX));
+ nf_rec->byte_count = htonl(MIN(expired->byte_count -
+ nf_flow->byte_count_off, UINT32_MAX));
+ nf_rec->init_time = htonl(nf_flow->created - nf->boot_time);
+ nf_rec->used_time = htonl(MAX(nf_flow->created, expired->used)
- nf->boot_time);
if (expired->flow.nw_proto == IP_TYPE_ICMP) {
/* In NetFlow, the ICMP type and code are concatenated and
nf_rec->src_port = expired->flow.tp_src;
nf_rec->dst_port = expired->flow.tp_dst;
}
- nf_rec->tcp_flags = expired->tcp_flags;
+ nf_rec->tcp_flags = nf_flow->tcp_flags;
nf_rec->ip_proto = expired->flow.nw_proto;
- nf_rec->ip_tos = expired->ip_tos;
+ nf_rec->ip_tos = nf_flow->ip_tos;
+
+ /* Update flow tracking data. */
+ nf_flow->created = 0;
+ nf_flow->packet_count_off = expired->packet_count;
+ nf_flow->byte_count_off = expired->byte_count;
+ nf_flow->tcp_flags = 0;
/* NetFlow messages are limited to 30 records. */
if (ntohs(nf_hdr->count) >= 30) {
}
int
-netflow_set_collectors(struct netflow *nf, const struct svec *collectors_)
+netflow_set_options(struct netflow *nf,
+ const struct netflow_options *nf_options)
{
struct svec collectors;
int error = 0;
size_t i;
+ long long int old_timeout;
+
+ nf->engine_type = nf_options->engine_type;
+ nf->engine_id = nf_options->engine_id;
+ nf->add_id_to_iface = nf_options->add_id_to_iface;
clear_collectors(nf);
- svec_clone(&collectors, collectors_);
+ svec_clone(&collectors, &nf_options->collectors);
svec_sort_unique(&collectors);
nf->fds = xmalloc(sizeof *nf->fds * collectors.n);
}
svec_destroy(&collectors);
- return error;
-}
-void
-netflow_set_engine(struct netflow *nf, uint8_t engine_type,
- uint8_t engine_id, bool add_id_to_iface)
-{
- nf->engine_type = engine_type;
- nf->engine_id = engine_id;
- nf->add_id_to_iface = add_id_to_iface;
+ old_timeout = nf->active_timeout;
+ if (nf_options->active_timeout != -1) {
+ nf->active_timeout = nf_options->active_timeout;
+ } else {
+ nf->active_timeout = ACTIVE_TIMEOUT_DEFAULT;
+ }
+ nf->active_timeout *= 1000;
+ if (old_timeout != nf->active_timeout) {
+ nf->reconfig_time = time_msec();
+ }
+
+ return error;
}
struct netflow *
free(nf);
}
}
+
+void
+netflow_flow_clear(struct netflow_flow *nf_flow)
+{
+ uint16_t output_iface = nf_flow->output_iface;
+
+ memset(nf_flow, 0, sizeof *nf_flow);
+ nf_flow->output_iface = output_iface;
+}
+
+void
+netflow_flow_update_time(struct netflow *nf, struct netflow_flow *nf_flow,
+ long long int used)
+{
+ if (!nf_flow->created) {
+ nf_flow->created = used;
+ }
+
+ if (!nf || !nf->active_timeout || !nf_flow->last_expired ||
+ nf->reconfig_time > nf_flow->last_expired) {
+ /* Keep the time updated to prevent a flood of expiration in
+ * the future. */
+ nf_flow->last_expired = time_msec();
+ }
+}
+
+void
+netflow_flow_update_flags(struct netflow_flow *nf_flow, uint8_t ip_tos,
+ uint8_t tcp_flags)
+{
+ nf_flow->ip_tos = ip_tos;
+ nf_flow->tcp_flags |= tcp_flags;
+}
+
+bool
+netflow_active_timeout_expired(struct netflow *nf, struct netflow_flow *nf_flow)
+{
+ if (nf->active_timeout) {
+ return time_msec() > nf_flow->last_expired + nf->active_timeout;
+ }
+
+ return false;
+}
#define NETFLOW_H 1
#include "flow.h"
+#include "svec.h"
struct ofexpired;
-struct svec;
+
+struct netflow_options {
+ struct svec collectors;
+ uint8_t engine_type;
+ uint8_t engine_id;
+ int active_timeout;
+ bool add_id_to_iface;
+};
enum netflow_output_ports {
NF_OUT_FLOOD = UINT16_MAX,
NF_OUT_DROP = UINT16_MAX - 2
};
+struct netflow_flow {
+ long long int last_expired; /* Time this flow last timed out. */
+ long long int created; /* Time flow was created since time out. */
+
+ uint64_t packet_count_off; /* Packet count at last time out. */
+ uint64_t byte_count_off; /* Byte count at last time out. */
+
+ uint16_t output_iface; /* Output interface index. */
+ uint8_t ip_tos; /* Last-seen IP type-of-service. */
+ uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
+};
+
struct netflow *netflow_create(void);
void netflow_destroy(struct netflow *);
-int netflow_set_collectors(struct netflow *, const struct svec *collectors);
-void netflow_set_engine(struct netflow *nf, uint8_t engine_type,
- uint8_t engine_id, bool add_id_to_iface);
-void netflow_expire(struct netflow *, const struct ofexpired *);
+int netflow_set_options(struct netflow *, const struct netflow_options *);
+void netflow_expire(struct netflow *, struct netflow_flow *,
+ struct ofexpired *);
void netflow_run(struct netflow *);
+void netflow_flow_clear(struct netflow_flow *);
+void netflow_flow_update_time(struct netflow *, struct netflow_flow *,
+ long long int used);
+void netflow_flow_update_flags(struct netflow_flow *, uint8_t ip_tos,
+ uint8_t tcp_flags);
+bool netflow_active_timeout_expired(struct netflow *, struct netflow_flow *);
+
#endif /* netflow.h */
uint64_t packet_count; /* Number of packets received. */
uint64_t byte_count; /* Number of bytes received. */
uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
- uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
- uint8_t ip_tos; /* Last-seen IP type-of-service. */
tag_type tags; /* Tags (set only by hooks). */
- uint16_t nf_output_iface; /* Output interface index for NetFlow. */
+ struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */
/* If 'super' is non-NULL, this rule is a subrule, that is, it is an
* exact-match rule (having cr.wc.wildcards of 0) generated from the
return false;
}
-static struct rule *rule_create(struct rule *super, const union ofp_action *,
- size_t n_actions, uint16_t idle_timeout,
- uint16_t hard_timeout);
+static struct rule *rule_create(struct ofproto *, struct rule *super,
+ const union ofp_action *, size_t n_actions,
+ uint16_t idle_timeout, uint16_t hard_timeout);
static void rule_free(struct rule *);
static void rule_destroy(struct ofproto *, struct rule *);
static struct rule *rule_from_cls_rule(const struct cls_rule *);
static void send_packet_in_miss(struct ofpbuf *, void *ofproto);
static void send_packet_in_action(struct ofpbuf *, void *ofproto);
static void update_used(struct ofproto *);
-static void update_stats(struct rule *, const struct odp_flow_stats *);
+static void update_stats(struct ofproto *, struct rule *,
+ const struct odp_flow_stats *);
static void expire_rule(struct cls_rule *, void *ofproto);
+static void active_timeout(struct ofproto *ofproto, struct rule *rule);
static bool revalidate_rule(struct ofproto *p, struct rule *rule);
static void revalidate_cb(struct cls_rule *rule_, void *p_);
}
int
-ofproto_set_netflow(struct ofproto *ofproto, const struct svec *collectors,
- uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface)
+ofproto_set_netflow(struct ofproto *ofproto,
+ const struct netflow_options *nf_options)
{
- if (collectors && collectors->n) {
+ if (nf_options->collectors.n) {
if (!ofproto->netflow) {
ofproto->netflow = netflow_create();
}
- netflow_set_engine(ofproto->netflow, engine_type, engine_id,
- add_id_to_iface);
- return netflow_set_collectors(ofproto->netflow, collectors);
+ return netflow_set_options(ofproto->netflow, nf_options);
} else {
netflow_destroy(ofproto->netflow);
ofproto->netflow = NULL;
int idle_timeout)
{
struct rule *rule;
- rule = rule_create(NULL, actions, n_actions,
+ rule = rule_create(p, NULL, actions, n_actions,
idle_timeout >= 0 ? idle_timeout : 5 /* XXX */, 0);
cls_rule_from_flow(&rule->cr, flow, wildcards, priority);
rule_insert(p, rule, NULL, 0);
/* Caller is responsible for initializing the 'cr' member of the returned
* rule. */
static struct rule *
-rule_create(struct rule *super,
+rule_create(struct ofproto *ofproto, struct rule *super,
const union ofp_action *actions, size_t n_actions,
uint16_t idle_timeout, uint16_t hard_timeout)
{
}
rule->n_actions = n_actions;
rule->actions = xmemdup(actions, n_actions * sizeof *actions);
+ netflow_flow_clear(&rule->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
+
return rule;
}
actions, n_actions, packet)) {
struct odp_flow_stats stats;
flow_extract_stats(flow, packet, &stats);
- update_stats(rule, &stats);
+ update_stats(ofproto, rule, &stats);
rule->used = time_msec();
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->used);
}
}
rule_create_subrule(struct ofproto *ofproto, struct rule *rule,
const flow_t *flow)
{
- struct rule *subrule = rule_create(rule, NULL, 0,
+ struct rule *subrule = rule_create(ofproto, rule, NULL, 0,
rule->idle_timeout, rule->hard_timeout);
COVERAGE_INC(ofproto_subrule_create);
cls_rule_from_flow(&subrule->cr, flow, 0,
rule->tags = 0;
xlate_actions(super->actions, super->n_actions, &rule->cr.flow, p,
packet, &a, &rule->tags, &rule->may_install,
- &rule->nf_output_iface);
+ &rule->nf_flow.output_iface);
actions_len = a.n_actions * sizeof *a.actions;
if (rule->n_odp_actions != a.n_actions
&put)) {
rule->installed = true;
if (displaced_rule) {
- update_stats(rule, &put.flow.stats);
+ update_stats(p, rule, &put.flow.stats);
rule_post_uninstall(p, displaced_rule);
}
}
odp_flow.actions = NULL;
odp_flow.n_actions = 0;
if (!dpif_flow_del(&p->dpif, &odp_flow)) {
- update_stats(rule, &odp_flow.stats);
+ update_stats(p, rule, &odp_flow.stats);
}
rule->installed = false;
}
}
+static bool
+is_controller_rule(struct rule *rule)
+{
+ /* If the only action is send to the controller then don't report
+ * NetFlow expiration messages since it is just part of the control
+ * logic for the network and not real traffic. */
+
+ if (rule && rule->super) {
+ struct rule *super = rule->super;
+
+ return super->n_actions == 1 &&
+ super->actions[0].type == htons(OFPAT_OUTPUT) &&
+ super->actions[0].output.port == htons(OFPP_CONTROLLER);
+ }
+
+ return false;
+}
+
static void
rule_post_uninstall(struct ofproto *ofproto, struct rule *rule)
{
struct rule *super = rule->super;
- bool controller_action;
rule_account(ofproto, rule, 0);
- /* If the only action is send to the controller then don't report
- * NetFlow expiration messages since it is just part of the control
- * logic for the network and not real traffic. */
- controller_action = rule->n_odp_actions == 1 &&
- rule->odp_actions[0].type == ODPAT_CONTROLLER;
-
- if (ofproto->netflow && rule->byte_count && !controller_action) {
+ if (ofproto->netflow && !is_controller_rule(rule)) {
struct ofexpired expired;
expired.flow = rule->cr.flow;
expired.packet_count = rule->packet_count;
expired.byte_count = rule->byte_count;
expired.used = rule->used;
- expired.created = rule->created;
- expired.tcp_flags = rule->tcp_flags;
- expired.ip_tos = rule->ip_tos;
- expired.output_iface = rule->nf_output_iface;
- netflow_expire(ofproto->netflow, &expired);
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
}
if (super) {
super->packet_count += rule->packet_count;
super->byte_count += rule->byte_count;
- super->tcp_flags |= rule->tcp_flags;
- if (rule->packet_count) {
- super->ip_tos = rule->ip_tos;
- }
/* Reset counters to prevent double counting if the rule ever gets
* reinstalled. */
rule->packet_count = 0;
rule->byte_count = 0;
rule->accounted_bytes = 0;
- rule->tcp_flags = 0;
- rule->ip_tos = 0;
+
+ netflow_flow_clear(&rule->nf_flow);
}
}
\f
}
static void
-update_time(struct rule *rule, const struct odp_flow_stats *stats)
+update_time(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
long long int used = msec_from_nsec(stats->used_sec, stats->used_nsec);
if (used > rule->used) {
rule->used = used;
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
}
}
static void
-update_stats(struct rule *rule, const struct odp_flow_stats *stats)
+update_stats(struct ofproto *ofproto, struct rule *rule,
+ const struct odp_flow_stats *stats)
{
- update_time(rule, stats);
- rule->packet_count += stats->n_packets;
- rule->byte_count += stats->n_bytes;
- rule->tcp_flags |= stats->tcp_flags;
if (stats->n_packets) {
- rule->ip_tos = stats->ip_tos;
+ update_time(ofproto, rule, stats);
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
+ netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
+ stats->tcp_flags);
}
}
uint16_t in_port;
int error;
- rule = rule_create(NULL, (const union ofp_action *) ofm->actions,
+ rule = rule_create(p, NULL, (const union ofp_action *) ofm->actions,
n_actions, ntohs(ofm->idle_timeout),
ntohs(ofm->hard_timeout));
cls_rule_from_match(&rule->cr, &ofm->match, ntohs(ofm->priority));
? rule->used + rule->idle_timeout * 1000
: LLONG_MAX);
expire = MIN(hard_expire, idle_expire);
- if (expire == LLONG_MAX) {
- if (rule->installed && time_msec() >= rule->used + 5000) {
- uninstall_idle_flow(p, rule);
- }
- return;
- }
now = time_msec();
if (now < expire) {
if (rule->installed && now >= rule->used + 5000) {
uninstall_idle_flow(p, rule);
+ } else if (!rule->cr.wc.wildcards) {
+ active_timeout(p, rule);
}
+
return;
}
rule_remove(p, rule);
}
+static void
+active_timeout(struct ofproto *ofproto, struct rule *rule)
+{
+ if (ofproto->netflow && !is_controller_rule(rule) &&
+ netflow_active_timeout_expired(ofproto->netflow, &rule->nf_flow)) {
+ struct ofexpired expired;
+ struct odp_flow odp_flow;
+
+ /* Get updated flow stats. */
+ memset(&odp_flow, 0, sizeof odp_flow);
+ odp_flow.key = rule->cr.flow;
+ odp_flow.flags = ODPFF_ZERO_TCP_FLAGS;
+ dpif_flow_get(&ofproto->dpif, &odp_flow);
+
+ if (odp_flow.stats.n_packets) {
+ update_time(ofproto, rule, &odp_flow.stats);
+ netflow_flow_update_flags(&rule->nf_flow, odp_flow.stats.ip_tos,
+ odp_flow.stats.tcp_flags);
+ }
+
+ expired.flow = rule->cr.flow;
+ expired.packet_count = rule->packet_count +
+ odp_flow.stats.n_packets;
+ expired.byte_count = rule->byte_count + odp_flow.stats.n_bytes;
+ expired.used = rule->used;
+
+ netflow_expire(ofproto->netflow, &rule->nf_flow, &expired);
+
+ /* Schedule us to send the accumulated records once we have
+ * collected all of them. */
+ poll_immediate_wake();
+ }
+}
+
static void
update_used(struct ofproto *p)
{
continue;
}
- update_time(rule, &f->stats);
+ update_time(p, rule, &f->stats);
rule_account(p, rule, f->stats.n_bytes);
}
free(flows);
#include <stddef.h>
#include <stdint.h>
#include "flow.h"
+#include "netflow.h"
#include "tag.h"
struct odp_actions;
struct ofexpired {
flow_t flow;
- uint64_t packet_count; /* Packets from *expired* subrules. */
- uint64_t byte_count; /* Bytes from *expired* subrules. */
+ uint64_t packet_count; /* Packets from subrules. */
+ uint64_t byte_count; /* Bytes from subrules. */
long long int used; /* Last-used time (0 if never used). */
- long long int created; /* Creation time. */
- uint8_t tcp_flags; /* Bitwise-OR of all TCP flags seen. */
- uint8_t ip_tos; /* Last-seen IP type-of-service. */
- uint16_t output_iface; /* Output interface index. */
};
int ofproto_create(const char *datapath, const struct ofhooks *, void *aux,
int ofproto_set_controller(struct ofproto *, const char *controller);
int ofproto_set_listeners(struct ofproto *, const struct svec *listeners);
int ofproto_set_snoops(struct ofproto *, const struct svec *snoops);
-int ofproto_set_netflow(struct ofproto *, const struct svec *collectors,
- uint8_t engine_type, uint8_t engine_id, bool add_id_to_iface);
+int ofproto_set_netflow(struct ofproto *,
+ const struct netflow_options *nf_options);
void ofproto_set_failure(struct ofproto *, bool fail_open);
void ofproto_set_rate_limit(struct ofproto *, int rate_limit, int burst_limit);
int ofproto_set_stp(struct ofproto *, bool enable_stp);
uint64_t dpid;
struct iface *local_iface = NULL;
const char *devname;
- uint8_t engine_type = br->dpif.minor;
- uint8_t engine_id = br->dpif.minor;
- bool add_id_to_iface = false;
- struct svec nf_hosts;
+ struct netflow_options nf_options;
bridge_fetch_dp_ifaces(br);
for (i = 0; i < br->n_ports; ) {
ofproto_set_datapath_id(br->ofproto, dpid);
/* Set NetFlow configuration on this bridge. */
+ memset(&nf_options, 0, sizeof nf_options);
+ nf_options.engine_type = br->dpif.minor;
+ nf_options.engine_id = br->dpif.minor;
+ nf_options.active_timeout = -1;
+
if (cfg_has("netflow.%s.engine-type", br->name)) {
- engine_type = cfg_get_int(0, "netflow.%s.engine-type",
+ nf_options.engine_type = cfg_get_int(0, "netflow.%s.engine-type",
br->name);
}
if (cfg_has("netflow.%s.engine-id", br->name)) {
- engine_id = cfg_get_int(0, "netflow.%s.engine-id", br->name);
+ nf_options.engine_id = cfg_get_int(0, "netflow.%s.engine-id",
+ br->name);
+ }
+ if (cfg_has("netflow.%s.active-timeout", br->name)) {
+ nf_options.active_timeout = cfg_get_int(0,
+ "netflow.%s.active-timeout",
+ br->name);
}
if (cfg_has("netflow.%s.add-id-to-iface", br->name)) {
- add_id_to_iface = cfg_get_bool(0, "netflow.%s.add-id-to-iface",
- br->name);
+ nf_options.add_id_to_iface = cfg_get_bool(0,
+ "netflow.%s.add-id-to-iface",
+ br->name);
}
- if (add_id_to_iface && engine_id > 0x7f) {
+ if (nf_options.add_id_to_iface && nf_options.engine_id > 0x7f) {
VLOG_WARN("bridge %s: netflow port mangling may conflict with "
"another vswitch, choose an engine id less than 128",
br->name);
}
- if (add_id_to_iface && br->n_ports > 0x1ff) {
+ if (nf_options.add_id_to_iface && br->n_ports > 508) {
VLOG_WARN("bridge %s: netflow port mangling will conflict with "
- "another port when 512 or more ports are used",
+ "another port when more than 508 ports are used",
br->name);
}
- svec_init(&nf_hosts);
- cfg_get_all_keys(&nf_hosts, "netflow.%s.host", br->name);
- if (ofproto_set_netflow(br->ofproto, &nf_hosts, engine_type,
- engine_id, add_id_to_iface)) {
+ svec_init(&nf_options.collectors);
+ cfg_get_all_keys(&nf_options.collectors, "netflow.%s.host", br->name);
+ if (ofproto_set_netflow(br->ofproto, &nf_options)) {
VLOG_ERR("bridge %s: problem setting netflow collectors",
br->name);
}
- svec_destroy(&nf_hosts);
+ svec_destroy(&nf_options.collectors);
/* Update the controller and related settings. It would be more
* straightforward to call this from bridge_reconfigure_one(), but we
.fi
.SS "NetFlow v5 Flow Logging"
-NetFlow is a protocol that exports a number of details about terminating
-IP flows, such as the principals involved and duration. A bridge may be
-configured to send NetFlow v5 records to NetFlow collectors when flows
-end. To enable, define the key \fBnetflow.\fIbridge\fB.host\fR for each
-collector in the form \fIip\fB:\fIport\fR. Records from \fIbridge\fR
+NetFlow is a protocol that exports a number of details about terminating
+IP flows, such as the principals involved and duration. A bridge may be
+configured to send NetFlow v5 records to NetFlow collectors when flows
+end. To enable, define the key \fBnetflow.\fIbridge\fB.host\fR for each
+collector in the form \fIip\fB:\fIport\fR. Records from \fIbridge\fR
will be sent to each \fIip\fR on UDP \fIport\fR. The \fIip\fR must
be specified numerically, not as a DNS name.
-The NetFlow messages will use the datapath index for the engine type and id.
-This can be overridden with the \fBnetflow.\fIbridge\fB.engine-type\fR and
+In addition to terminating flows, NetFlow can also send records at a set
+interval for flows that are still active. This interval can be configured
+by defining the key \fBnetflow.\fIbridge\fB\.active-timeout\fR. The value
+is in seconds. An active timeout of 0 will disable this functionality. By
+default there is timeout value of 600 seconds.
+
+The NetFlow messages will use the datapath index for the engine type and id.
+This can be overridden with the \fBnetflow.\fIbridge\fB.engine-type\fR and
\fBnetflow.\fIbridge\fB.engine-id\fR, respectively. Each takes a value
-between 0 and 255, inclusive.
+between 0 and 255, inclusive.
Many NetFlow collectors do not expect multiple virtual switches to be
sending messages from the same host, and they do not store the engine
flows from multiple switches appearing as if they came on the interface,
add \fBnetflow.\fIbridge\fB.add-id-to-iface=true\fR to the configuration
file. This will place the least significant 7 bits of the engine id
-into the most significant bits of the ingress and egress interface fields
+into the most significant bits of the ingress and egress interface fields
of flow records. When this option is enabled, a maximum of 508 ports are
supported. By default, this behavior is disabled.