4 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
5 * Copyright (C) 2002 by Concurrent Computer Corporation
6 * Distributed under the GNU GPL license version 2.
8 * Small id to pointer translation service.
10 * It uses a radix tree like structure as a sparse array indexed
11 * by the id to obtain the pointer. The bitmap makes allocating
14 * Modified by George Anzinger to reuse immediately and to use
15 * find bit instructions. Also removed _irq on spinlocks.
17 * So here is what this bit of code does:
19 * You call it to allocate an id (an int) an associate with that id a
20 * pointer or what ever, we treat it as a (void *). You can pass this
21 * id to a user for him to pass back at a later time. You then pass
22 * that id to this code and it returns your pointer.
24 * You can release ids at any time. When all ids are released, most of
25 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
26 * don't need to go to the memory "store" during an id allocate, just
27 * so you don't need to be too concerned about locking and conflicts
28 * with the slab allocator.
30 * What you need to do is, since we don't keep the counter as part of
31 * id / ptr pair, to keep a copy of it in the pointed to structure
32 * (or else where) so that when you ask for a ptr you can varify that
33 * the returned ptr is correct by comparing the id it contains with the one
34 * you asked for. In other words, we only did half the reuse protection.
35 * Since the code depends on your code doing this check, we ignore high
36 * order bits in the id, not just the count, but bits that would, if used,
37 * index outside of the allocated ids. In other words, if the largest id
38 * currently allocated is 32 a look up will only look at the low 5 bits of
39 * the id. Since you will want to keep this id in the structure anyway
40 * (if for no other reason than to be able to eliminate the id when the
41 * structure is found in some other way) this seems reasonable. If you
42 * really think otherwise, the code to check these bits here, it is just
43 * disabled with a #if 0.
46 * So here are the complete details:
48 * include <linux/idr.h>
50 * void idr_init(struct idr *idp)
52 * This function is use to set up the handle (idp) that you will pass
53 * to the rest of the functions. The structure is defined in the
56 * int idr_pre_get(struct idr *idp, unsigned gfp_mask)
58 * This function should be called prior to locking and calling the
59 * following function. It pre allocates enough memory to satisfy the
60 * worst possible allocation. Unless gfp_mask is GFP_ATOMIC, it can
61 * sleep, so must not be called with any spinlocks held. If the system is
62 * REALLY out of memory this function returns 0, other wise 1.
64 * int idr_get_new(struct idr *idp, void *ptr, int *id);
66 * This is the allocate id function. It should be called with any
67 * required locks. In fact, in the SMP case, you MUST lock prior to
68 * calling this function to avoid possible out of memory problems.
69 * If memory is required, it will return -EAGAIN, you should unlock
70 * and go back to the idr_pre_get() call. If the idr is full, it
71 * will return a -ENOSPC. ptr is the pointer you want associated
72 * with the id. The value is returned in the "id" field. idr_get_new()
73 * returns a value in the range 0 ... 0x7fffffff
75 * int idr_get_new_above(struct idr *idp, void *ptr, int start_id, int *id);
77 * Like idr_get_new(), but the returned id is guaranteed to be at or
80 * void *idr_find(struct idr *idp, int id);
82 * returns the "ptr", given the id. A NULL return indicates that the
83 * id is not valid (or you passed NULL in the idr_get_new(), shame on
84 * you). This function must be called with a spinlock that prevents
85 * calling either idr_get_new() or idr_remove() or idr_find() while it
88 * void idr_remove(struct idr *idp, int id);
90 * removes the given id, freeing that slot and any memory that may
91 * now be unused. See idr_find() for locking restrictions.
93 * int idr_full(struct idr *idp);
95 * Returns true if the idr is full and false if not.
101 #ifndef TEST // to test in user space...
102 #include <linux/slab.h>
103 #include <linux/init.h>
104 #include <linux/module.h>
106 #include <linux/string.h>
107 #include <linux/idr.h>
110 static kmem_cache_t *idr_layer_cache;
114 static struct idr_layer *alloc_layer(struct idr *idp)
118 spin_lock(&idp->lock);
119 if (!(p = idp->id_free))
121 idp->id_free = p->ary[0];
124 spin_unlock(&idp->lock);
128 static void free_layer(struct idr *idp, struct idr_layer *p)
131 * Depends on the return element being zeroed.
133 spin_lock(&idp->lock);
134 p->ary[0] = idp->id_free;
137 spin_unlock(&idp->lock);
140 int idr_pre_get(struct idr *idp, unsigned gfp_mask)
142 while (idp->id_free_cnt < IDR_FREE_MAX) {
143 struct idr_layer *new;
144 new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
147 free_layer(idp, new);
151 EXPORT_SYMBOL(idr_pre_get);
153 static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
156 struct idr_layer *p, *new;
157 struct idr_layer *pa[MAX_LEVEL];
167 * We run around this while until we reach the leaf node...
169 n = (id >> (IDR_BITS*l)) & IDR_MASK;
171 m = find_next_bit(&bm, IDR_SIZE, n);
173 /* no space available go back to previous layer. */
175 id = (id | ((1 << (IDR_BITS*l))-1)) + 1;
184 id = ((id >> sh) ^ n ^ m) << sh;
186 if ((id >= MAX_ID_BIT) || (id < 0))
191 * Create the layer below if it is missing.
194 if (!(new = alloc_layer(idp)))
203 * We have reached the leaf node, plant the
204 * users pointer and return the raw id.
206 p->ary[m] = (struct idr_layer *)ptr;
207 __set_bit(m, &p->bitmap);
210 * If this layer is full mark the bit in the layer above
211 * to show that this part of the radix tree is full.
212 * This may complete the layer above and require walking
216 while (p->bitmap == IDR_FULL) {
220 __set_bit((n & IDR_MASK), &p->bitmap);
225 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
227 struct idr_layer *p, *new;
233 layers = idp->layers;
235 if (!(p = alloc_layer(idp)))
240 * Add a new layer to the top of the tree if the requested
241 * id is larger than the currently allocated space.
243 while ((layers < MAX_LEVEL) && (id >= (1 << (layers*IDR_BITS)))) {
247 if (!(new = alloc_layer(idp))) {
249 * The allocation failed. If we built part of
250 * the structure tear it down.
252 for (new = p; p && p != idp->top; new = p) {
255 new->bitmap = new->count = 0;
256 free_layer(idp, new);
262 if (p->bitmap == IDR_FULL)
263 __set_bit(0, &new->bitmap);
267 idp->layers = layers;
268 v = sub_alloc(idp, ptr, &id);
274 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
277 rv = idr_get_new_above_int(idp, ptr, starting_id);
279 * This is a cheap hack until the IDR code can be fixed to
280 * return proper error values.
285 else /* Will be -3 */
291 EXPORT_SYMBOL(idr_get_new_above);
293 int idr_get_new(struct idr *idp, void *ptr, int *id)
296 rv = idr_get_new_above_int(idp, ptr, 0);
298 * This is a cheap hack until the IDR code can be fixed to
299 * return proper error values.
304 else /* Will be -3 */
310 EXPORT_SYMBOL(idr_get_new);
312 static void sub_remove(struct idr *idp, int shift, int id)
314 struct idr_layer *p = idp->top;
315 struct idr_layer **pa[MAX_LEVEL];
316 struct idr_layer ***paa = &pa[0];
321 while ((shift > 0) && p) {
322 int n = (id >> shift) & IDR_MASK;
323 __clear_bit(n, &p->bitmap);
328 if (likely(p != NULL)){
329 int n = id & IDR_MASK;
330 __clear_bit(n, &p->bitmap);
332 while(*paa && ! --((**paa)->count)){
333 free_layer(idp, **paa);
341 void idr_remove(struct idr *idp, int id)
345 /* Mask off upper bits we don't use for the search. */
348 sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
349 if ( idp->top && idp->top->count == 1 &&
351 idp->top->ary[0]){ // We can drop a layer
353 p = idp->top->ary[0];
354 idp->top->bitmap = idp->top->count = 0;
355 free_layer(idp, idp->top);
359 while (idp->id_free_cnt >= IDR_FREE_MAX) {
361 p = alloc_layer(idp);
362 kmem_cache_free(idr_layer_cache, p);
366 EXPORT_SYMBOL(idr_remove);
368 void *idr_find(struct idr *idp, int id)
373 n = idp->layers * IDR_BITS;
377 * This tests to see if bits outside the current tree are
378 * present. If so, tain't one of ours!
380 if ( unlikely( (id & ~(~0 << MAX_ID_SHIFT)) >> (n + IDR_BITS)))
383 /* Mask off upper bits we don't use for the search. */
388 p = p->ary[(id >> n) & IDR_MASK];
392 EXPORT_SYMBOL(idr_find);
394 static void idr_cache_ctor(void * idr_layer,
395 kmem_cache_t *idr_layer_cache, unsigned long flags)
397 memset(idr_layer, 0, sizeof(struct idr_layer));
400 static int init_id_cache(void)
402 if (!idr_layer_cache)
403 idr_layer_cache = kmem_cache_create("idr_layer_cache",
404 sizeof(struct idr_layer), 0, 0, idr_cache_ctor, 0);
408 void idr_init(struct idr *idp)
411 memset(idp, 0, sizeof(struct idr));
412 spin_lock_init(&idp->lock);
414 EXPORT_SYMBOL(idr_init);