#define NETFLOW_V5_VERSION 5
+static const int ACTIVE_TIMEOUT_DEFAULT = 600;
+
/* Every NetFlow v5 message contains the header that follows. This is
* followed by up to thirty records that describe a terminating flow.
* We only send a single record per NetFlow message.
* bits of the interface fields. */
uint32_t netflow_cnt; /* Flow sequence number for NetFlow. */
struct ofpbuf packet; /* NetFlow packet being accumulated. */
+ long long int active_timeout; /* Timeout for flows that are still active. */
+ long long int reconfig_time; /* When we reconfigured the timeouts. */
};
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
static int
open_collector(char *dst)
{
- char *save_ptr = NULL;
- const char *host_name;
- const char *port_string;
- struct sockaddr_in sin;
- int retval;
- int fd;
-
- /* Glibc 2.7 has a bug in strtok_r when compiling with optimization that
- * can cause segfaults here:
- * http://sources.redhat.com/bugzilla/show_bug.cgi?id=5614.
- * Using "::" instead of the obvious ":" works around it. */
- host_name = strtok_r(dst, ":", &save_ptr);
- port_string = strtok_r(NULL, ":", &save_ptr);
- if (!host_name) {
- ovs_error(0, "%s: bad peer name format", dst);
- return -EAFNOSUPPORT;
- }
- if (!port_string) {
- ovs_error(0, "%s: bad port format", dst);
- return -EAFNOSUPPORT;
- }
-
- memset(&sin, 0, sizeof sin);
- sin.sin_family = AF_INET;
- if (lookup_ip(host_name, &sin.sin_addr)) {
- return -ENOENT;
- }
- sin.sin_port = htons(atoi(port_string));
-
- fd = socket(AF_INET, SOCK_DGRAM, 0);
- if (fd < 0) {
- VLOG_ERR("%s: socket: %s", dst, strerror(errno));
- return -errno;
- }
-
- retval = set_nonblocking(fd);
- if (retval) {
- close(fd);
- return -retval;
- }
-
- retval = connect(fd, (struct sockaddr *) &sin, sizeof sin);
- if (retval < 0) {
- int error = errno;
- VLOG_ERR("%s: connect: %s", dst, strerror(error));
- close(fd);
- return -error;
- }
+ int error, fd;
- return fd;
+ error = inet_open_active(SOCK_DGRAM, dst, 0, NULL, &fd);
+ return fd >= 0 ? fd : -error;
}
void
-netflow_expire(struct netflow *nf, const struct ofexpired *expired)
+netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow,
+ struct ofexpired *expired)
{
struct netflow_v5_header *nf_hdr;
struct netflow_v5_record *nf_rec;
struct timeval now;
- /* NetFlow only reports on IP packets. */
- if (expired->flow.dl_type != htons(ETH_TYPE_IP)) {
+ nf_flow->last_expired += nf->active_timeout;
+
+ /* NetFlow only reports on IP packets and we should only report flows
+ * that actually have traffic. */
+ if (expired->flow.dl_type != htons(ETH_TYPE_IP) ||
+ expired->packet_count - nf_flow->packet_count_off == 0) {
return;
}
if (nf->add_id_to_iface) {
uint16_t iface = (nf->engine_id & 0x7f) << 9;
nf_rec->input = htons(iface | (expired->flow.in_port & 0x1ff));
- nf_rec->output = htons(iface);
- printf("input: %x\n", ntohs(nf_rec->input));
+ nf_rec->output = htons(iface | (nf_flow->output_iface & 0x1ff));
} else {
nf_rec->input = htons(expired->flow.in_port);
- nf_rec->output = htons(0);
+ nf_rec->output = htons(nf_flow->output_iface);
}
- nf_rec->packet_count = htonl(MIN(expired->packet_count, UINT32_MAX));
- nf_rec->byte_count = htonl(MIN(expired->byte_count, UINT32_MAX));
- nf_rec->init_time = htonl(expired->created - nf->boot_time);
- nf_rec->used_time = htonl(MAX(expired->created, expired->used)
+ nf_rec->packet_count = htonl(MIN(expired->packet_count -
+ nf_flow->packet_count_off, UINT32_MAX));
+ nf_rec->byte_count = htonl(MIN(expired->byte_count -
+ nf_flow->byte_count_off, UINT32_MAX));
+ nf_rec->init_time = htonl(nf_flow->created - nf->boot_time);
+ nf_rec->used_time = htonl(MAX(nf_flow->created, expired->used)
- nf->boot_time);
if (expired->flow.nw_proto == IP_TYPE_ICMP) {
/* In NetFlow, the ICMP type and code are concatenated and
nf_rec->src_port = expired->flow.tp_src;
nf_rec->dst_port = expired->flow.tp_dst;
}
- nf_rec->tcp_flags = expired->tcp_flags;
+ nf_rec->tcp_flags = nf_flow->tcp_flags;
nf_rec->ip_proto = expired->flow.nw_proto;
- nf_rec->ip_tos = expired->ip_tos;
+ nf_rec->ip_tos = nf_flow->ip_tos;
+
+ /* Update flow tracking data. */
+ nf_flow->created = 0;
+ nf_flow->packet_count_off = expired->packet_count;
+ nf_flow->byte_count_off = expired->byte_count;
+ nf_flow->tcp_flags = 0;
- /* NetFlow messages are limited to 30 records. A length of 1400
- * bytes guarantees that the limit is not exceeded. */
- if (nf->packet.size >= 1400) {
+ /* NetFlow messages are limited to 30 records. */
+ if (ntohs(nf_hdr->count) >= 30) {
netflow_run(nf);
}
}
}
int
-netflow_set_collectors(struct netflow *nf, const struct svec *collectors_)
+netflow_set_options(struct netflow *nf,
+ const struct netflow_options *nf_options)
{
struct svec collectors;
int error = 0;
size_t i;
+ long long int old_timeout;
+
+ nf->engine_type = nf_options->engine_type;
+ nf->engine_id = nf_options->engine_id;
+ nf->add_id_to_iface = nf_options->add_id_to_iface;
clear_collectors(nf);
- svec_clone(&collectors, collectors_);
+ svec_clone(&collectors, &nf_options->collectors);
svec_sort_unique(&collectors);
nf->fds = xmalloc(sizeof *nf->fds * collectors.n);
}
svec_destroy(&collectors);
- return error;
-}
-void
-netflow_set_engine(struct netflow *nf, uint8_t engine_type,
- uint8_t engine_id, bool add_id_to_iface)
-{
- nf->engine_type = engine_type;
- nf->engine_id = engine_id;
- nf->add_id_to_iface = add_id_to_iface;
+ old_timeout = nf->active_timeout;
+ if (nf_options->active_timeout != -1) {
+ nf->active_timeout = nf_options->active_timeout;
+ } else {
+ nf->active_timeout = ACTIVE_TIMEOUT_DEFAULT;
+ }
+ nf->active_timeout *= 1000;
+ if (old_timeout != nf->active_timeout) {
+ nf->reconfig_time = time_msec();
+ }
+
+ return error;
}
struct netflow *
free(nf);
}
}
+
+void
+netflow_flow_clear(struct netflow_flow *nf_flow)
+{
+ uint16_t output_iface = nf_flow->output_iface;
+
+ memset(nf_flow, 0, sizeof *nf_flow);
+ nf_flow->output_iface = output_iface;
+}
+
+void
+netflow_flow_update_time(struct netflow *nf, struct netflow_flow *nf_flow,
+ long long int used)
+{
+ if (!nf_flow->created) {
+ nf_flow->created = used;
+ }
+
+ if (!nf || !nf->active_timeout || !nf_flow->last_expired ||
+ nf->reconfig_time > nf_flow->last_expired) {
+ /* Keep the time updated to prevent a flood of expiration in
+ * the future. */
+ nf_flow->last_expired = time_msec();
+ }
+}
+
+void
+netflow_flow_update_flags(struct netflow_flow *nf_flow, uint8_t ip_tos,
+ uint8_t tcp_flags)
+{
+ nf_flow->ip_tos = ip_tos;
+ nf_flow->tcp_flags |= tcp_flags;
+}
+
+bool
+netflow_active_timeout_expired(struct netflow *nf, struct netflow_flow *nf_flow)
+{
+ if (nf->active_timeout) {
+ return time_msec() > nf_flow->last_expired + nf->active_timeout;
+ }
+
+ return false;
+}