2 * Copyright (c) 2008, 2009, 2010, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
25 COVERAGE_DEFINE(hmap_pathological);
26 COVERAGE_DEFINE(hmap_expand);
27 COVERAGE_DEFINE(hmap_shrink);
28 COVERAGE_DEFINE(hmap_reserve);
30 /* Initializes 'hmap' as an empty hash table. */
32 hmap_init(struct hmap *hmap)
34 hmap->buckets = &hmap->one;
40 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
41 * the nodes themselves, if necessary. */
43 hmap_destroy(struct hmap *hmap)
45 if (hmap && hmap->buckets != &hmap->one) {
50 /* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
51 * not free memory allocated for 'hmap'.
53 * This function is appropriate when 'hmap' will soon have about as many
54 * elements as it before. If 'hmap' will likely have fewer elements than
55 * before, use hmap_destroy() followed by hmap_clear() to save memory and
58 hmap_clear(struct hmap *hmap)
62 memset(hmap->buckets, 0, (hmap->mask + 1) * sizeof *hmap->buckets);
66 /* Exchanges hash maps 'a' and 'b'. */
68 hmap_swap(struct hmap *a, struct hmap *b)
77 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
80 hmap_moved(struct hmap *hmap)
83 hmap->buckets = &hmap->one;
88 resize(struct hmap *hmap, size_t new_mask)
93 ovs_assert(is_pow2(new_mask + 1));
97 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
99 for (i = 0; i <= tmp.mask; i++) {
100 tmp.buckets[i] = NULL;
103 for (i = 0; i <= hmap->mask; i++) {
104 struct hmap_node *node, *next;
106 for (node = hmap->buckets[i]; node; node = next) {
108 hmap_insert_fast(&tmp, node, node->hash);
112 COVERAGE_INC(hmap_pathological);
115 hmap_swap(hmap, &tmp);
120 calc_mask(size_t capacity)
122 size_t mask = capacity / 2;
128 #if SIZE_MAX > UINT32_MAX
132 /* If we need to dynamically allocate buckets we might as well allocate at
133 * least 4 of them. */
134 mask |= (mask & 1) << 1;
139 /* Expands 'hmap', if necessary, to optimize the performance of searches. */
141 hmap_expand(struct hmap *hmap)
143 size_t new_mask = calc_mask(hmap->n);
144 if (new_mask > hmap->mask) {
145 COVERAGE_INC(hmap_expand);
146 resize(hmap, new_mask);
150 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
152 hmap_shrink(struct hmap *hmap)
154 size_t new_mask = calc_mask(hmap->n);
155 if (new_mask < hmap->mask) {
156 COVERAGE_INC(hmap_shrink);
157 resize(hmap, new_mask);
161 /* Expands 'hmap', if necessary, to optimize the performance of searches when
162 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
163 * allocated capacity is much higher than its current number of nodes.) */
165 hmap_reserve(struct hmap *hmap, size_t n)
167 size_t new_mask = calc_mask(n);
168 if (new_mask > hmap->mask) {
169 COVERAGE_INC(hmap_reserve);
170 resize(hmap, new_mask);
174 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
175 * to 'node' (e.g. due to realloc()). */
177 hmap_node_moved(struct hmap *hmap,
178 struct hmap_node *old_node, struct hmap_node *node)
180 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
181 while (*bucket != old_node) {
182 bucket = &(*bucket)->next;
187 /* Chooses and returns a randomly selected node from 'hmap', which must not be
190 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
191 * But it does at least ensure that any node in 'hmap' can be chosen. */
193 hmap_random_node(const struct hmap *hmap)
195 struct hmap_node *bucket, *node;
198 /* Choose a random non-empty bucket. */
199 for (i = random_uint32(); ; i++) {
200 bucket = hmap->buckets[i & hmap->mask];
206 /* Count nodes in bucket. */
208 for (node = bucket; node; node = node->next) {
212 /* Choose random node from bucket. */
214 for (node = bucket; i-- > 0; node = node->next) {
220 /* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
221 * 'hmap'. Uses '*bucketp' and '*offsetp' to determine where to begin
222 * iteration, and stores new values to pass on the next iteration into them
225 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
226 * faster and better at dealing with hmaps that change during iteration.
228 * Before beginning iteration, store 0 into '*bucketp' and '*offsetp'.
231 hmap_at_position(const struct hmap *hmap,
232 uint32_t *bucketp, uint32_t *offsetp)
238 for (b_idx = *bucketp; b_idx <= hmap->mask; b_idx++) {
239 struct hmap_node *node;
242 for (n_idx = 0, node = hmap->buckets[b_idx]; node != NULL;
243 n_idx++, node = node->next) {
244 if (n_idx == offset) {
246 *bucketp = node->hash & hmap->mask;
247 *offsetp = offset + 1;
249 *bucketp = (node->hash & hmap->mask) + 1;
263 /* Returns true if 'node' is in 'hmap', false otherwise. */
265 hmap_contains(const struct hmap *hmap, const struct hmap_node *node)
269 for (p = hmap_first_in_bucket(hmap, node->hash); p; p = p->next) {