+static inline uint32_t
+ipfix_hash_flow_key(const struct ipfix_flow_key *flow_key, uint32_t basis)
+{
+ uint32_t hash;
+ hash = hash_int(flow_key->obs_domain_id, basis);
+ hash = hash_int(flow_key->template_id, hash);
+ hash = hash_bytes(flow_key->flow_key_msg_part,
+ flow_key->flow_key_msg_part_size, hash);
+ return hash;
+}
+
+static bool
+ipfix_flow_key_equal(const struct ipfix_flow_key *a,
+ const struct ipfix_flow_key *b)
+{
+ /* The template ID determines the flow key size, so not need to
+ * compare it. */
+ return (a->obs_domain_id == b->obs_domain_id
+ && a->template_id == b->template_id
+ && memcmp(a->flow_key_msg_part, b->flow_key_msg_part,
+ a->flow_key_msg_part_size) == 0);
+}
+
+static struct ipfix_flow_cache_entry*
+ipfix_cache_find_entry(const struct dpif_ipfix_exporter *exporter,
+ const struct ipfix_flow_key *flow_key)
+{
+ struct ipfix_flow_cache_entry *entry;
+
+ HMAP_FOR_EACH_WITH_HASH (entry, flow_key_map_node,
+ ipfix_hash_flow_key(flow_key, 0),
+ &exporter->cache_flow_key_map) {
+ if (ipfix_flow_key_equal(&entry->flow_key, flow_key)) {
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+static bool
+ipfix_cache_next_timeout_msec(const struct dpif_ipfix_exporter *exporter,
+ long long int *next_timeout_msec)
+{
+ struct ipfix_flow_cache_entry *entry;
+
+ LIST_FOR_EACH (entry, cache_flow_start_timestamp_list_node,
+ &exporter->cache_flow_start_timestamp_list) {
+ *next_timeout_msec = entry->flow_start_timestamp_usec / 1000LL
+ + 1000LL * exporter->cache_active_timeout;
+ return true;
+ }
+
+ return false;
+}
+
+static void
+ipfix_cache_aggregate_entries(struct ipfix_flow_cache_entry *from_entry,
+ struct ipfix_flow_cache_entry *to_entry)
+{
+ uint64_t *to_start, *to_end, *from_start, *from_end;
+ uint16_t *to_min_len, *to_max_len, *from_min_len, *from_max_len;
+
+ to_start = &to_entry->flow_start_timestamp_usec;
+ to_end = &to_entry->flow_end_timestamp_usec;
+ from_start = &from_entry->flow_start_timestamp_usec;
+ from_end = &from_entry->flow_end_timestamp_usec;
+
+ if (*to_start > *from_start) {
+ *to_start = *from_start;
+ }
+ if (*to_end < *from_end) {
+ *to_end = *from_end;
+ }
+
+ to_entry->packet_delta_count += from_entry->packet_delta_count;
+ to_entry->layer2_octet_delta_count += from_entry->layer2_octet_delta_count;
+
+ to_entry->octet_delta_count += from_entry->octet_delta_count;
+ to_entry->octet_delta_sum_of_squares +=
+ from_entry->octet_delta_sum_of_squares;
+
+ to_min_len = &to_entry->minimum_ip_total_length;
+ to_max_len = &to_entry->maximum_ip_total_length;
+ from_min_len = &from_entry->minimum_ip_total_length;
+ from_max_len = &from_entry->maximum_ip_total_length;
+
+ if (!*to_min_len || (*from_min_len && *to_min_len > *from_min_len)) {
+ *to_min_len = *from_min_len;
+ }
+ if (*to_max_len < *from_max_len) {
+ *to_max_len = *from_max_len;
+ }
+}
+
+/* Add an entry into a flow cache. The entry is either aggregated into
+ * an existing entry with the same flow key and free()d, or it is
+ * inserted into the cache. */
+static void
+ipfix_cache_update(struct dpif_ipfix_exporter *exporter,
+ struct ipfix_flow_cache_entry *entry)
+{
+ struct ipfix_flow_cache_entry *old_entry;
+
+ old_entry = ipfix_cache_find_entry(exporter, &entry->flow_key);
+
+ if (old_entry == NULL) {
+ hmap_insert(&exporter->cache_flow_key_map, &entry->flow_key_map_node,
+ ipfix_hash_flow_key(&entry->flow_key, 0));
+
+ /* As the latest entry added into the cache, it should
+ * logically have the highest flow_start_timestamp_usec, so
+ * append it at the tail. */
+ list_push_back(&exporter->cache_flow_start_timestamp_list,
+ &entry->cache_flow_start_timestamp_list_node);
+
+ /* Enforce exporter->cache_max_flows limit. */
+ if (hmap_count(&exporter->cache_flow_key_map)
+ > exporter->cache_max_flows) {
+ dpif_ipfix_cache_expire_now(exporter, false);
+ }
+ } else {
+ ipfix_cache_aggregate_entries(entry, old_entry);
+ free(entry);
+ }
+}
+