xref: /openbmc/linux/lib/idr.c (revision 64c70b1c)
1 /*
2  * 2002-10-18  written by Jim Houston jim.houston@ccur.com
3  *	Copyright (C) 2002 by Concurrent Computer Corporation
4  *	Distributed under the GNU GPL license version 2.
5  *
6  * Modified by George Anzinger to reuse immediately and to use
7  * find bit instructions.  Also removed _irq on spinlocks.
8  *
9  * Small id to pointer translation service.
10  *
11  * It uses a radix tree like structure as a sparse array indexed
12  * by the id to obtain the pointer.  The bitmap makes allocating
13  * a new id quick.
14  *
15  * You call it to allocate an id (an int) an associate with that id a
16  * pointer or what ever, we treat it as a (void *).  You can pass this
17  * id to a user for him to pass back at a later time.  You then pass
18  * that id to this code and it returns your pointer.
19 
20  * You can release ids at any time. When all ids are released, most of
21  * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
22  * don't need to go to the memory "store" during an id allocate, just
23  * so you don't need to be too concerned about locking and conflicts
24  * with the slab allocator.
25  */
26 
27 #ifndef TEST                        // to test in user space...
28 #include <linux/slab.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #endif
32 #include <linux/err.h>
33 #include <linux/string.h>
34 #include <linux/idr.h>
35 
36 static struct kmem_cache *idr_layer_cache;
37 
38 static struct idr_layer *alloc_layer(struct idr *idp)
39 {
40 	struct idr_layer *p;
41 	unsigned long flags;
42 
43 	spin_lock_irqsave(&idp->lock, flags);
44 	if ((p = idp->id_free)) {
45 		idp->id_free = p->ary[0];
46 		idp->id_free_cnt--;
47 		p->ary[0] = NULL;
48 	}
49 	spin_unlock_irqrestore(&idp->lock, flags);
50 	return(p);
51 }
52 
53 /* only called when idp->lock is held */
54 static void __free_layer(struct idr *idp, struct idr_layer *p)
55 {
56 	p->ary[0] = idp->id_free;
57 	idp->id_free = p;
58 	idp->id_free_cnt++;
59 }
60 
61 static void free_layer(struct idr *idp, struct idr_layer *p)
62 {
63 	unsigned long flags;
64 
65 	/*
66 	 * Depends on the return element being zeroed.
67 	 */
68 	spin_lock_irqsave(&idp->lock, flags);
69 	__free_layer(idp, p);
70 	spin_unlock_irqrestore(&idp->lock, flags);
71 }
72 
73 /**
74  * idr_pre_get - reserver resources for idr allocation
75  * @idp:	idr handle
76  * @gfp_mask:	memory allocation flags
77  *
78  * This function should be called prior to locking and calling the
79  * following function.  It preallocates enough memory to satisfy
80  * the worst possible allocation.
81  *
82  * If the system is REALLY out of memory this function returns 0,
83  * otherwise 1.
84  */
85 int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
86 {
87 	while (idp->id_free_cnt < IDR_FREE_MAX) {
88 		struct idr_layer *new;
89 		new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
90 		if (new == NULL)
91 			return (0);
92 		free_layer(idp, new);
93 	}
94 	return 1;
95 }
96 EXPORT_SYMBOL(idr_pre_get);
97 
98 static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
99 {
100 	int n, m, sh;
101 	struct idr_layer *p, *new;
102 	struct idr_layer *pa[MAX_LEVEL];
103 	int l, id;
104 	long bm;
105 
106 	id = *starting_id;
107 	p = idp->top;
108 	l = idp->layers;
109 	pa[l--] = NULL;
110 	while (1) {
111 		/*
112 		 * We run around this while until we reach the leaf node...
113 		 */
114 		n = (id >> (IDR_BITS*l)) & IDR_MASK;
115 		bm = ~p->bitmap;
116 		m = find_next_bit(&bm, IDR_SIZE, n);
117 		if (m == IDR_SIZE) {
118 			/* no space available go back to previous layer. */
119 			l++;
120 			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
121 			if (!(p = pa[l])) {
122 				*starting_id = id;
123 				return -2;
124 			}
125 			continue;
126 		}
127 		if (m != n) {
128 			sh = IDR_BITS*l;
129 			id = ((id >> sh) ^ n ^ m) << sh;
130 		}
131 		if ((id >= MAX_ID_BIT) || (id < 0))
132 			return -3;
133 		if (l == 0)
134 			break;
135 		/*
136 		 * Create the layer below if it is missing.
137 		 */
138 		if (!p->ary[m]) {
139 			if (!(new = alloc_layer(idp)))
140 				return -1;
141 			p->ary[m] = new;
142 			p->count++;
143 		}
144 		pa[l--] = p;
145 		p = p->ary[m];
146 	}
147 	/*
148 	 * We have reached the leaf node, plant the
149 	 * users pointer and return the raw id.
150 	 */
151 	p->ary[m] = (struct idr_layer *)ptr;
152 	__set_bit(m, &p->bitmap);
153 	p->count++;
154 	/*
155 	 * If this layer is full mark the bit in the layer above
156 	 * to show that this part of the radix tree is full.
157 	 * This may complete the layer above and require walking
158 	 * up the radix tree.
159 	 */
160 	n = id;
161 	while (p->bitmap == IDR_FULL) {
162 		if (!(p = pa[++l]))
163 			break;
164 		n = n >> IDR_BITS;
165 		__set_bit((n & IDR_MASK), &p->bitmap);
166 	}
167 	return(id);
168 }
169 
170 static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
171 {
172 	struct idr_layer *p, *new;
173 	int layers, v, id;
174 	unsigned long flags;
175 
176 	id = starting_id;
177 build_up:
178 	p = idp->top;
179 	layers = idp->layers;
180 	if (unlikely(!p)) {
181 		if (!(p = alloc_layer(idp)))
182 			return -1;
183 		layers = 1;
184 	}
185 	/*
186 	 * Add a new layer to the top of the tree if the requested
187 	 * id is larger than the currently allocated space.
188 	 */
189 	while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
190 		layers++;
191 		if (!p->count)
192 			continue;
193 		if (!(new = alloc_layer(idp))) {
194 			/*
195 			 * The allocation failed.  If we built part of
196 			 * the structure tear it down.
197 			 */
198 			spin_lock_irqsave(&idp->lock, flags);
199 			for (new = p; p && p != idp->top; new = p) {
200 				p = p->ary[0];
201 				new->ary[0] = NULL;
202 				new->bitmap = new->count = 0;
203 				__free_layer(idp, new);
204 			}
205 			spin_unlock_irqrestore(&idp->lock, flags);
206 			return -1;
207 		}
208 		new->ary[0] = p;
209 		new->count = 1;
210 		if (p->bitmap == IDR_FULL)
211 			__set_bit(0, &new->bitmap);
212 		p = new;
213 	}
214 	idp->top = p;
215 	idp->layers = layers;
216 	v = sub_alloc(idp, ptr, &id);
217 	if (v == -2)
218 		goto build_up;
219 	return(v);
220 }
221 
222 /**
223  * idr_get_new_above - allocate new idr entry above or equal to a start id
224  * @idp: idr handle
225  * @ptr: pointer you want associated with the ide
226  * @start_id: id to start search at
227  * @id: pointer to the allocated handle
228  *
229  * This is the allocate id function.  It should be called with any
230  * required locks.
231  *
232  * If memory is required, it will return -EAGAIN, you should unlock
233  * and go back to the idr_pre_get() call.  If the idr is full, it will
234  * return -ENOSPC.
235  *
236  * @id returns a value in the range 0 ... 0x7fffffff
237  */
238 int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
239 {
240 	int rv;
241 
242 	rv = idr_get_new_above_int(idp, ptr, starting_id);
243 	/*
244 	 * This is a cheap hack until the IDR code can be fixed to
245 	 * return proper error values.
246 	 */
247 	if (rv < 0) {
248 		if (rv == -1)
249 			return -EAGAIN;
250 		else /* Will be -3 */
251 			return -ENOSPC;
252 	}
253 	*id = rv;
254 	return 0;
255 }
256 EXPORT_SYMBOL(idr_get_new_above);
257 
258 /**
259  * idr_get_new - allocate new idr entry
260  * @idp: idr handle
261  * @ptr: pointer you want associated with the ide
262  * @id: pointer to the allocated handle
263  *
264  * This is the allocate id function.  It should be called with any
265  * required locks.
266  *
267  * If memory is required, it will return -EAGAIN, you should unlock
268  * and go back to the idr_pre_get() call.  If the idr is full, it will
269  * return -ENOSPC.
270  *
271  * @id returns a value in the range 0 ... 0x7fffffff
272  */
273 int idr_get_new(struct idr *idp, void *ptr, int *id)
274 {
275 	int rv;
276 
277 	rv = idr_get_new_above_int(idp, ptr, 0);
278 	/*
279 	 * This is a cheap hack until the IDR code can be fixed to
280 	 * return proper error values.
281 	 */
282 	if (rv < 0) {
283 		if (rv == -1)
284 			return -EAGAIN;
285 		else /* Will be -3 */
286 			return -ENOSPC;
287 	}
288 	*id = rv;
289 	return 0;
290 }
291 EXPORT_SYMBOL(idr_get_new);
292 
293 static void idr_remove_warning(int id)
294 {
295 	printk("idr_remove called for id=%d which is not allocated.\n", id);
296 	dump_stack();
297 }
298 
299 static void sub_remove(struct idr *idp, int shift, int id)
300 {
301 	struct idr_layer *p = idp->top;
302 	struct idr_layer **pa[MAX_LEVEL];
303 	struct idr_layer ***paa = &pa[0];
304 	int n;
305 
306 	*paa = NULL;
307 	*++paa = &idp->top;
308 
309 	while ((shift > 0) && p) {
310 		n = (id >> shift) & IDR_MASK;
311 		__clear_bit(n, &p->bitmap);
312 		*++paa = &p->ary[n];
313 		p = p->ary[n];
314 		shift -= IDR_BITS;
315 	}
316 	n = id & IDR_MASK;
317 	if (likely(p != NULL && test_bit(n, &p->bitmap))){
318 		__clear_bit(n, &p->bitmap);
319 		p->ary[n] = NULL;
320 		while(*paa && ! --((**paa)->count)){
321 			free_layer(idp, **paa);
322 			**paa-- = NULL;
323 		}
324 		if (!*paa)
325 			idp->layers = 0;
326 	} else
327 		idr_remove_warning(id);
328 }
329 
330 /**
331  * idr_remove - remove the given id and free it's slot
332  * @idp: idr handle
333  * @id: unique key
334  */
335 void idr_remove(struct idr *idp, int id)
336 {
337 	struct idr_layer *p;
338 
339 	/* Mask off upper bits we don't use for the search. */
340 	id &= MAX_ID_MASK;
341 
342 	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
343 	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
344 	    idp->top->ary[0]) {  // We can drop a layer
345 
346 		p = idp->top->ary[0];
347 		idp->top->bitmap = idp->top->count = 0;
348 		free_layer(idp, idp->top);
349 		idp->top = p;
350 		--idp->layers;
351 	}
352 	while (idp->id_free_cnt >= IDR_FREE_MAX) {
353 		p = alloc_layer(idp);
354 		kmem_cache_free(idr_layer_cache, p);
355 		return;
356 	}
357 }
358 EXPORT_SYMBOL(idr_remove);
359 
360 /**
361  * idr_destroy - release all cached layers within an idr tree
362  * idp: idr handle
363  */
364 void idr_destroy(struct idr *idp)
365 {
366 	while (idp->id_free_cnt) {
367 		struct idr_layer *p = alloc_layer(idp);
368 		kmem_cache_free(idr_layer_cache, p);
369 	}
370 }
371 EXPORT_SYMBOL(idr_destroy);
372 
373 /**
374  * idr_find - return pointer for given id
375  * @idp: idr handle
376  * @id: lookup key
377  *
378  * Return the pointer given the id it has been registered with.  A %NULL
379  * return indicates that @id is not valid or you passed %NULL in
380  * idr_get_new().
381  *
382  * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
383  */
384 void *idr_find(struct idr *idp, int id)
385 {
386 	int n;
387 	struct idr_layer *p;
388 
389 	n = idp->layers * IDR_BITS;
390 	p = idp->top;
391 
392 	/* Mask off upper bits we don't use for the search. */
393 	id &= MAX_ID_MASK;
394 
395 	if (id >= (1 << n))
396 		return NULL;
397 
398 	while (n > 0 && p) {
399 		n -= IDR_BITS;
400 		p = p->ary[(id >> n) & IDR_MASK];
401 	}
402 	return((void *)p);
403 }
404 EXPORT_SYMBOL(idr_find);
405 
406 /**
407  * idr_replace - replace pointer for given id
408  * @idp: idr handle
409  * @ptr: pointer you want associated with the id
410  * @id: lookup key
411  *
412  * Replace the pointer registered with an id and return the old value.
413  * A -ENOENT return indicates that @id was not found.
414  * A -EINVAL return indicates that @id was not within valid constraints.
415  *
416  * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
417  */
418 void *idr_replace(struct idr *idp, void *ptr, int id)
419 {
420 	int n;
421 	struct idr_layer *p, *old_p;
422 
423 	n = idp->layers * IDR_BITS;
424 	p = idp->top;
425 
426 	id &= MAX_ID_MASK;
427 
428 	if (id >= (1 << n))
429 		return ERR_PTR(-EINVAL);
430 
431 	n -= IDR_BITS;
432 	while ((n > 0) && p) {
433 		p = p->ary[(id >> n) & IDR_MASK];
434 		n -= IDR_BITS;
435 	}
436 
437 	n = id & IDR_MASK;
438 	if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
439 		return ERR_PTR(-ENOENT);
440 
441 	old_p = p->ary[n];
442 	p->ary[n] = ptr;
443 
444 	return old_p;
445 }
446 EXPORT_SYMBOL(idr_replace);
447 
448 static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
449 		unsigned long flags)
450 {
451 	memset(idr_layer, 0, sizeof(struct idr_layer));
452 }
453 
454 static  int init_id_cache(void)
455 {
456 	if (!idr_layer_cache)
457 		idr_layer_cache = kmem_cache_create("idr_layer_cache",
458 			sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
459 	return 0;
460 }
461 
462 /**
463  * idr_init - initialize idr handle
464  * @idp:	idr handle
465  *
466  * This function is use to set up the handle (@idp) that you will pass
467  * to the rest of the functions.
468  */
469 void idr_init(struct idr *idp)
470 {
471 	init_id_cache();
472 	memset(idp, 0, sizeof(struct idr));
473 	spin_lock_init(&idp->lock);
474 }
475 EXPORT_SYMBOL(idr_init);
476