2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 /* Initializes 'hmap' as an empty hash table. */
27 hmap_init(struct hmap *hmap)
29 hmap->buckets = &hmap->one;
35 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
36 * the nodes themselves, if necessary. */
38 hmap_destroy(struct hmap *hmap)
40 if (hmap && hmap->buckets != &hmap->one) {
45 /* Exchanges hash maps 'a' and 'b'. */
47 hmap_swap(struct hmap *a, struct hmap *b)
56 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
59 hmap_moved(struct hmap *hmap)
62 hmap->buckets = &hmap->one;
67 resize(struct hmap *hmap, size_t new_mask)
72 assert(!(new_mask & (new_mask + 1)));
73 assert(new_mask != SIZE_MAX);
77 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
79 for (i = 0; i <= tmp.mask; i++) {
80 tmp.buckets[i] = NULL;
83 for (i = 0; i <= hmap->mask; i++) {
84 struct hmap_node *node, *next;
86 for (node = hmap->buckets[i]; node; node = next) {
88 hmap_insert_fast(&tmp, node, node->hash);
92 COVERAGE_INC(hmap_pathological);
95 hmap_swap(hmap, &tmp);
100 calc_mask(size_t capacity)
102 size_t mask = capacity / 2;
108 #if SIZE_MAX > UINT32_MAX
112 /* If we need to dynamically allocate buckets we might as well allocate at
113 * least 4 of them. */
114 mask |= (mask & 1) << 1;
119 /* Expands 'hmap', if necessary, to optimize the performance of searches. */
121 hmap_expand(struct hmap *hmap)
123 size_t new_mask = calc_mask(hmap->n);
124 if (new_mask > hmap->mask) {
125 COVERAGE_INC(hmap_expand);
126 resize(hmap, new_mask);
130 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
132 hmap_shrink(struct hmap *hmap)
134 size_t new_mask = calc_mask(hmap->n);
135 if (new_mask < hmap->mask) {
136 COVERAGE_INC(hmap_shrink);
137 resize(hmap, new_mask);
141 /* Expands 'hmap', if necessary, to optimize the performance of searches when
142 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
143 * allocated capacity is much higher than its current number of nodes.) */
145 hmap_reserve(struct hmap *hmap, size_t n)
147 size_t new_mask = calc_mask(n);
148 if (new_mask > hmap->mask) {
149 COVERAGE_INC(hmap_reserve);
150 resize(hmap, new_mask);
154 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
155 * to 'node' (e.g. due to realloc()). */
157 hmap_node_moved(struct hmap *hmap,
158 struct hmap_node *old_node, struct hmap_node *node)
160 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
161 while (*bucket != old_node) {
162 bucket = &(*bucket)->next;
167 /* Chooses and returns a randomly selected node from 'hmap', which must not be
170 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
171 * But it does at least ensure that any node in 'hmap' can be chosen. */
173 hmap_random_node(const struct hmap *hmap)
175 struct hmap_node *bucket, *node;
178 /* Choose a random non-empty bucket. */
179 for (i = random_uint32(); ; i++) {
180 bucket = hmap->buckets[i & hmap->mask];
186 /* Count nodes in bucket. */
188 for (node = bucket; node; node = node->next) {
192 /* Choose random node from bucket. */
194 for (node = bucket; i-- > 0; node = node->next) {