From: Ben Pfaff Date: Wed, 1 Sep 2010 19:43:11 +0000 (-0700) Subject: netflow: Do 64-bit division less often. X-Git-Tag: v1.0.2~5 X-Git-Url: http://git.onelab.eu/?p=sliver-openvswitch.git;a=commitdiff_plain;h=92428253d585ccdc9eeb628d4c2c6aac30ad9336 netflow: Do 64-bit division less often. 64-bit division is expensive. Usually we can avoid it entirely, as done by this patch. --- diff --git a/ofproto/netflow.c b/ofproto/netflow.c index 015208ac9..a70b2fce8 100644 --- a/ofproto/netflow.c +++ b/ofproto/netflow.c @@ -184,21 +184,24 @@ netflow_expire(struct netflow *nf, struct netflow_flow *nf_flow, return; } - /* NetFlow v5 records are limited to 32-bit counters. If we've - * wrapped a counter, send as multiple records so we don't lose - * track of any traffic. We try to evenly distribute the packet and - * byte counters, so that the bytes-per-packet lengths don't look - * wonky across the records. */ - while (byte_delta) { - int n_recs = (byte_delta + UINT32_MAX - 1) / UINT32_MAX; + /* NetFlow v5 records are limited to 32-bit counters. If we've wrapped + * a counter, send as multiple records so we don't lose track of any + * traffic. We try to evenly distribute the packet and byte counters, + * so that the bytes-per-packet lengths don't look wonky across the + * records. */ + while (byte_delta > UINT32_MAX) { + uint32_t n_recs = byte_delta >> 32; uint32_t pkt_count = pkt_delta / n_recs; uint32_t byte_count = byte_delta / n_recs; - + gen_netflow_rec(nf, nf_flow, expired, pkt_count, byte_count); pkt_delta -= pkt_count; byte_delta -= byte_count; } + if (byte_delta > 0) { + gen_netflow_rec(nf, nf_flow, expired, pkt_delta, byte_delta); + } /* Update flow tracking data. */ nf_flow->created = 0;