2 * Copyright (c) 2008, 2009, 2010, 2012 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 COVERAGE_DEFINE(hmap_pathological);
26 COVERAGE_DEFINE(hmap_expand);
27 COVERAGE_DEFINE(hmap_shrink);
28 COVERAGE_DEFINE(hmap_reserve);
30 /* Initializes 'hmap' as an empty hash table. */
32 hmap_init(struct hmap *hmap)
34 hmap->buckets = &hmap->one;
40 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
41 * the nodes themselves, if necessary. */
43 hmap_destroy(struct hmap *hmap)
45 if (hmap && hmap->buckets != &hmap->one) {
50 /* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
51 * not free memory allocated for 'hmap'.
53 * This function is appropriate when 'hmap' will soon have about as many
54 * elements as it before. If 'hmap' will likely have fewer elements than
55 * before, use hmap_destroy() followed by hmap_clear() to save memory and
58 hmap_clear(struct hmap *hmap)
62 memset(hmap->buckets, 0, (hmap->mask + 1) * sizeof *hmap->buckets);
66 /* Exchanges hash maps 'a' and 'b'. */
68 hmap_swap(struct hmap *a, struct hmap *b)
77 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
80 hmap_moved(struct hmap *hmap)
83 hmap->buckets = &hmap->one;
88 resize(struct hmap *hmap, size_t new_mask)
93 ovs_assert(!(new_mask & (new_mask + 1)));
94 ovs_assert(new_mask != SIZE_MAX);
98 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
100 for (i = 0; i <= tmp.mask; i++) {
101 tmp.buckets[i] = NULL;
104 for (i = 0; i <= hmap->mask; i++) {
105 struct hmap_node *node, *next;
107 for (node = hmap->buckets[i]; node; node = next) {
109 hmap_insert_fast(&tmp, node, node->hash);
113 COVERAGE_INC(hmap_pathological);
116 hmap_swap(hmap, &tmp);
121 calc_mask(size_t capacity)
123 size_t mask = capacity / 2;
129 #if SIZE_MAX > UINT32_MAX
133 /* If we need to dynamically allocate buckets we might as well allocate at
134 * least 4 of them. */
135 mask |= (mask & 1) << 1;
140 /* Expands 'hmap', if necessary, to optimize the performance of searches. */
142 hmap_expand(struct hmap *hmap)
144 size_t new_mask = calc_mask(hmap->n);
145 if (new_mask > hmap->mask) {
146 COVERAGE_INC(hmap_expand);
147 resize(hmap, new_mask);
151 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
153 hmap_shrink(struct hmap *hmap)
155 size_t new_mask = calc_mask(hmap->n);
156 if (new_mask < hmap->mask) {
157 COVERAGE_INC(hmap_shrink);
158 resize(hmap, new_mask);
162 /* Expands 'hmap', if necessary, to optimize the performance of searches when
163 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
164 * allocated capacity is much higher than its current number of nodes.) */
166 hmap_reserve(struct hmap *hmap, size_t n)
168 size_t new_mask = calc_mask(n);
169 if (new_mask > hmap->mask) {
170 COVERAGE_INC(hmap_reserve);
171 resize(hmap, new_mask);
175 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
176 * to 'node' (e.g. due to realloc()). */
178 hmap_node_moved(struct hmap *hmap,
179 struct hmap_node *old_node, struct hmap_node *node)
181 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
182 while (*bucket != old_node) {
183 bucket = &(*bucket)->next;
188 /* Chooses and returns a randomly selected node from 'hmap', which must not be
191 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
192 * But it does at least ensure that any node in 'hmap' can be chosen. */
194 hmap_random_node(const struct hmap *hmap)
196 struct hmap_node *bucket, *node;
199 /* Choose a random non-empty bucket. */
200 for (i = random_uint32(); ; i++) {
201 bucket = hmap->buckets[i & hmap->mask];
207 /* Count nodes in bucket. */
209 for (node = bucket; node; node = node->next) {
213 /* Choose random node from bucket. */
215 for (node = bucket; i-- > 0; node = node->next) {
221 /* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
222 * 'hmap'. Uses '*bucketp' and '*offsetp' to determine where to begin
223 * iteration, and stores new values to pass on the next iteration into them
226 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
227 * faster and better at dealing with hmaps that change during iteration.
229 * Before beginning iteration, store 0 into '*bucketp' and '*offsetp'.
232 hmap_at_position(const struct hmap *hmap,
233 uint32_t *bucketp, uint32_t *offsetp)
239 for (b_idx = *bucketp; b_idx <= hmap->mask; b_idx++) {
240 struct hmap_node *node;
243 for (n_idx = 0, node = hmap->buckets[b_idx]; node != NULL;
244 n_idx++, node = node->next) {
245 if (n_idx == offset) {
247 *bucketp = node->hash & hmap->mask;
248 *offsetp = offset + 1;
250 *bucketp = (node->hash & hmap->mask) + 1;
264 /* Returns true if 'node' is in 'hmap', false otherwise. */
266 hmap_contains(const struct hmap *hmap, const struct hmap_node *node)
270 for (p = hmap_first_in_bucket(hmap, node->hash); p; p = p->next) {