Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2#include <linux/bitmap.h>
  3#include <linux/bug.h>
  4#include <linux/export.h>
  5#include <linux/idr.h>
  6#include <linux/slab.h>
  7#include <linux/spinlock.h>
  8#include <linux/xarray.h>
 
 
  9
 10/**
 11 * idr_alloc_u32() - Allocate an ID.
 12 * @idr: IDR handle.
 13 * @ptr: Pointer to be associated with the new ID.
 14 * @nextid: Pointer to an ID.
 15 * @max: The maximum ID to allocate (inclusive).
 16 * @gfp: Memory allocation flags.
 17 *
 18 * Allocates an unused ID in the range specified by @nextid and @max.
 19 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
 20 * is exclusive.  The new ID is assigned to @nextid before the pointer
 21 * is inserted into the IDR, so if @nextid points into the object pointed
 22 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
 23 *
 24 * The caller should provide their own locking to ensure that two
 25 * concurrent modifications to the IDR are not possible.  Read-only
 26 * accesses to the IDR may be done under the RCU read lock or may
 27 * exclude simultaneous writers.
 28 *
 29 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
 30 * or -ENOSPC if no free IDs could be found.  If an error occurred,
 31 * @nextid is unchanged.
 32 */
 33int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
 34			unsigned long max, gfp_t gfp)
 35{
 36	struct radix_tree_iter iter;
 37	void __rcu **slot;
 38	unsigned int base = idr->idr_base;
 39	unsigned int id = *nextid;
 40
 41	if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
 42		idr->idr_rt.xa_flags |= IDR_RT_MARKER;
 
 
 43
 44	id = (id < base) ? 0 : id - base;
 45	radix_tree_iter_init(&iter, id);
 46	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
 47	if (IS_ERR(slot))
 48		return PTR_ERR(slot);
 49
 50	*nextid = iter.index + base;
 51	/* there is a memory barrier inside radix_tree_iter_replace() */
 52	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
 53	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
 54
 55	return 0;
 56}
 57EXPORT_SYMBOL_GPL(idr_alloc_u32);
 58
 59/**
 60 * idr_alloc() - Allocate an ID.
 61 * @idr: IDR handle.
 62 * @ptr: Pointer to be associated with the new ID.
 63 * @start: The minimum ID (inclusive).
 64 * @end: The maximum ID (exclusive).
 65 * @gfp: Memory allocation flags.
 66 *
 67 * Allocates an unused ID in the range specified by @start and @end.  If
 68 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
 69 * callers to use @start + N as @end as long as N is within integer range.
 70 *
 71 * The caller should provide their own locking to ensure that two
 72 * concurrent modifications to the IDR are not possible.  Read-only
 73 * accesses to the IDR may be done under the RCU read lock or may
 74 * exclude simultaneous writers.
 75 *
 76 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
 77 * or -ENOSPC if no free IDs could be found.
 78 */
 79int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
 80{
 81	u32 id = start;
 82	int ret;
 83
 84	if (WARN_ON_ONCE(start < 0))
 85		return -EINVAL;
 86
 87	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
 88	if (ret)
 89		return ret;
 90
 91	return id;
 92}
 93EXPORT_SYMBOL_GPL(idr_alloc);
 94
 95/**
 96 * idr_alloc_cyclic() - Allocate an ID cyclically.
 97 * @idr: IDR handle.
 98 * @ptr: Pointer to be associated with the new ID.
 99 * @start: The minimum ID (inclusive).
100 * @end: The maximum ID (exclusive).
101 * @gfp: Memory allocation flags.
102 *
103 * Allocates an unused ID in the range specified by @nextid and @end.  If
104 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
105 * callers to use @start + N as @end as long as N is within integer range.
106 * The search for an unused ID will start at the last ID allocated and will
107 * wrap around to @start if no free IDs are found before reaching @end.
108 *
109 * The caller should provide their own locking to ensure that two
110 * concurrent modifications to the IDR are not possible.  Read-only
111 * accesses to the IDR may be done under the RCU read lock or may
112 * exclude simultaneous writers.
113 *
114 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
115 * or -ENOSPC if no free IDs could be found.
116 */
117int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
118{
119	u32 id = idr->idr_next;
120	int err, max = end > 0 ? end - 1 : INT_MAX;
121
122	if ((int)id < start)
123		id = start;
124
125	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
126	if ((err == -ENOSPC) && (id > start)) {
127		id = start;
128		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129	}
130	if (err)
131		return err;
132
133	idr->idr_next = id + 1;
134	return id;
135}
136EXPORT_SYMBOL(idr_alloc_cyclic);
137
138/**
139 * idr_remove() - Remove an ID from the IDR.
140 * @idr: IDR handle.
141 * @id: Pointer ID.
142 *
143 * Removes this ID from the IDR.  If the ID was not previously in the IDR,
144 * this function returns %NULL.
145 *
146 * Since this function modifies the IDR, the caller should provide their
147 * own locking to ensure that concurrent modification of the same IDR is
148 * not possible.
149 *
150 * Return: The pointer formerly associated with this ID.
151 */
152void *idr_remove(struct idr *idr, unsigned long id)
153{
154	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
155}
156EXPORT_SYMBOL_GPL(idr_remove);
157
158/**
159 * idr_find() - Return pointer for given ID.
160 * @idr: IDR handle.
161 * @id: Pointer ID.
162 *
163 * Looks up the pointer associated with this ID.  A %NULL pointer may
164 * indicate that @id is not allocated or that the %NULL pointer was
165 * associated with this ID.
166 *
167 * This function can be called under rcu_read_lock(), given that the leaf
168 * pointers lifetimes are correctly managed.
169 *
170 * Return: The pointer associated with this ID.
171 */
172void *idr_find(const struct idr *idr, unsigned long id)
173{
174	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
175}
176EXPORT_SYMBOL_GPL(idr_find);
177
178/**
179 * idr_for_each() - Iterate through all stored pointers.
180 * @idr: IDR handle.
181 * @fn: Function to be called for each pointer.
182 * @data: Data passed to callback function.
183 *
184 * The callback function will be called for each entry in @idr, passing
185 * the ID, the entry and @data.
186 *
187 * If @fn returns anything other than %0, the iteration stops and that
188 * value is returned from this function.
189 *
190 * idr_for_each() can be called concurrently with idr_alloc() and
191 * idr_remove() if protected by RCU.  Newly added entries may not be
192 * seen and deleted entries may be seen, but adding and removing entries
193 * will not cause other entries to be skipped, nor spurious ones to be seen.
194 */
195int idr_for_each(const struct idr *idr,
196		int (*fn)(int id, void *p, void *data), void *data)
197{
198	struct radix_tree_iter iter;
199	void __rcu **slot;
200	int base = idr->idr_base;
201
202	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
203		int ret;
204		unsigned long id = iter.index + base;
205
206		if (WARN_ON_ONCE(id > INT_MAX))
207			break;
208		ret = fn(id, rcu_dereference_raw(*slot), data);
209		if (ret)
210			return ret;
211	}
212
213	return 0;
214}
215EXPORT_SYMBOL(idr_for_each);
216
217/**
218 * idr_get_next_ul() - Find next populated entry.
219 * @idr: IDR handle.
220 * @nextid: Pointer to an ID.
221 *
222 * Returns the next populated entry in the tree with an ID greater than
223 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
224 * to the ID of the found value.  To use in a loop, the value pointed to by
225 * nextid must be incremented by the user.
226 */
227void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
228{
229	struct radix_tree_iter iter;
230	void __rcu **slot;
231	void *entry = NULL;
232	unsigned long base = idr->idr_base;
233	unsigned long id = *nextid;
234
235	id = (id < base) ? 0 : id - base;
236	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, id) {
237		entry = rcu_dereference_raw(*slot);
238		if (!entry)
239			continue;
240		if (!xa_is_internal(entry))
241			break;
242		if (slot != &idr->idr_rt.xa_head && !xa_is_retry(entry))
243			break;
244		slot = radix_tree_iter_retry(&iter);
245	}
246	if (!slot)
247		return NULL;
 
248
249	*nextid = iter.index + base;
250	return entry;
 
 
 
251}
252EXPORT_SYMBOL(idr_get_next_ul);
253
254/**
255 * idr_get_next() - Find next populated entry.
256 * @idr: IDR handle.
257 * @nextid: Pointer to an ID.
258 *
259 * Returns the next populated entry in the tree with an ID greater than
260 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
261 * to the ID of the found value.  To use in a loop, the value pointed to by
262 * nextid must be incremented by the user.
263 */
264void *idr_get_next(struct idr *idr, int *nextid)
265{
 
 
 
266	unsigned long id = *nextid;
267	void *entry = idr_get_next_ul(idr, &id);
268
269	if (WARN_ON_ONCE(id > INT_MAX))
 
 
270		return NULL;
271	*nextid = id;
272	return entry;
 
273}
274EXPORT_SYMBOL(idr_get_next);
275
276/**
277 * idr_replace() - replace pointer for given ID.
278 * @idr: IDR handle.
279 * @ptr: New pointer to associate with the ID.
280 * @id: ID to change.
281 *
282 * Replace the pointer registered with an ID and return the old value.
283 * This function can be called under the RCU read lock concurrently with
284 * idr_alloc() and idr_remove() (as long as the ID being removed is not
285 * the one being replaced!).
286 *
287 * Returns: the old value on success.  %-ENOENT indicates that @id was not
288 * found.  %-EINVAL indicates that @ptr was not valid.
289 */
290void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
291{
292	struct radix_tree_node *node;
293	void __rcu **slot = NULL;
294	void *entry;
295
 
 
296	id -= idr->idr_base;
297
298	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
299	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
300		return ERR_PTR(-ENOENT);
301
302	__radix_tree_replace(&idr->idr_rt, node, slot, ptr);
303
304	return entry;
305}
306EXPORT_SYMBOL(idr_replace);
307
308/**
309 * DOC: IDA description
310 *
311 * The IDA is an ID allocator which does not provide the ability to
312 * associate an ID with a pointer.  As such, it only needs to store one
313 * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
314 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
315 * then initialise it using ida_init()).  To allocate a new ID, call
316 * ida_alloc(), ida_alloc_min(), ida_alloc_max() or ida_alloc_range().
317 * To free an ID, call ida_free().
318 *
319 * ida_destroy() can be used to dispose of an IDA without needing to
320 * free the individual IDs in it.  You can use ida_is_empty() to find
321 * out whether the IDA has any IDs currently allocated.
322 *
323 * The IDA handles its own locking.  It is safe to call any of the IDA
324 * functions without synchronisation in your code.
 
 
 
 
 
 
 
 
325 *
326 * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
327 * limitation, it should be quite straightforward to raise the maximum.
328 */
329
330/*
331 * Developer's notes:
332 *
333 * The IDA uses the functionality provided by the XArray to store bitmaps in
334 * each entry.  The XA_FREE_MARK is only cleared when all bits in the bitmap
335 * have been set.
336 *
337 * I considered telling the XArray that each slot is an order-10 node
338 * and indexing by bit number, but the XArray can't allow a single multi-index
339 * entry in the head, which would significantly increase memory consumption
340 * for the IDA.  So instead we divide the index by the number of bits in the
341 * leaf bitmap before doing a radix tree lookup.
342 *
343 * As an optimisation, if there are only a few low bits set in any given
344 * leaf, instead of allocating a 128-byte bitmap, we store the bits
345 * as a value entry.  Value entries never have the XA_FREE_MARK cleared
346 * because we can always convert them into a bitmap entry.
347 *
348 * It would be possible to optimise further; once we've run out of a
349 * single 128-byte bitmap, we currently switch to a 576-byte node, put
350 * the 128-byte bitmap in the first entry and then start allocating extra
351 * 128-byte entries.  We could instead use the 512 bytes of the node's
352 * data as a bitmap before moving to that scheme.  I do not believe this
353 * is a worthwhile optimisation; Rasmus Villemoes surveyed the current
354 * users of the IDA and almost none of them use more than 1024 entries.
355 * Those that do use more than the 8192 IDs that the 512 bytes would
356 * provide.
357 *
358 * The IDA always uses a lock to alloc/free.  If we add a 'test_bit'
359 * equivalent, it will still need locking.  Going to RCU lookup would require
360 * using RCU to free bitmaps, and that's not trivial without embedding an
361 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
362 * bitmap, which is excessive.
363 */
364
 
 
365/**
366 * ida_alloc_range() - Allocate an unused ID.
367 * @ida: IDA handle.
368 * @min: Lowest ID to allocate.
369 * @max: Highest ID to allocate.
370 * @gfp: Memory allocation flags.
 
 
 
 
 
371 *
372 * Allocate an ID between @min and @max, inclusive.  The allocated ID will
373 * not exceed %INT_MAX, even if @max is larger.
 
374 *
375 * Context: Any context.
376 * Return: The allocated ID, or %-ENOMEM if memory could not be allocated,
377 * or %-ENOSPC if there are no free IDs.
378 */
379int ida_alloc_range(struct ida *ida, unsigned int min, unsigned int max,
380			gfp_t gfp)
381{
382	XA_STATE(xas, &ida->xa, min / IDA_BITMAP_BITS);
383	unsigned bit = min % IDA_BITMAP_BITS;
384	unsigned long flags;
385	struct ida_bitmap *bitmap, *alloc = NULL;
386
387	if ((int)min < 0)
388		return -ENOSPC;
389
390	if ((int)max < 0)
391		max = INT_MAX;
392
393retry:
394	xas_lock_irqsave(&xas, flags);
395next:
396	bitmap = xas_find_marked(&xas, max / IDA_BITMAP_BITS, XA_FREE_MARK);
397	if (xas.xa_index > min / IDA_BITMAP_BITS)
398		bit = 0;
399	if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
400		goto nospc;
401
402	if (xa_is_value(bitmap)) {
403		unsigned long tmp = xa_to_value(bitmap);
404
405		if (bit < BITS_PER_XA_VALUE) {
406			bit = find_next_zero_bit(&tmp, BITS_PER_XA_VALUE, bit);
407			if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
408				goto nospc;
409			if (bit < BITS_PER_XA_VALUE) {
410				tmp |= 1UL << bit;
411				xas_store(&xas, xa_mk_value(tmp));
412				goto out;
413			}
414		}
415		bitmap = alloc;
416		if (!bitmap)
417			bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
418		if (!bitmap)
419			goto alloc;
420		bitmap->bitmap[0] = tmp;
421		xas_store(&xas, bitmap);
422		if (xas_error(&xas)) {
423			bitmap->bitmap[0] = 0;
424			goto out;
 
 
 
 
 
 
 
 
 
 
425		}
426	}
427
428	if (bitmap) {
429		bit = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, bit);
430		if (xas.xa_index * IDA_BITMAP_BITS + bit > max)
431			goto nospc;
432		if (bit == IDA_BITMAP_BITS)
433			goto next;
434
435		__set_bit(bit, bitmap->bitmap);
436		if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
437			xas_clear_mark(&xas, XA_FREE_MARK);
438	} else {
439		if (bit < BITS_PER_XA_VALUE) {
440			bitmap = xa_mk_value(1UL << bit);
441		} else {
442			bitmap = alloc;
443			if (!bitmap)
444				bitmap = kzalloc(sizeof(*bitmap), GFP_NOWAIT);
 
 
 
 
 
 
 
 
 
445			if (!bitmap)
446				goto alloc;
447			__set_bit(bit, bitmap->bitmap);
 
448		}
449		xas_store(&xas, bitmap);
450	}
451out:
452	xas_unlock_irqrestore(&xas, flags);
453	if (xas_nomem(&xas, gfp)) {
454		xas.xa_index = min / IDA_BITMAP_BITS;
455		bit = min % IDA_BITMAP_BITS;
456		goto retry;
457	}
458	if (bitmap != alloc)
459		kfree(alloc);
460	if (xas_error(&xas))
461		return xas_error(&xas);
462	return xas.xa_index * IDA_BITMAP_BITS + bit;
463alloc:
464	xas_unlock_irqrestore(&xas, flags);
465	alloc = kzalloc(sizeof(*bitmap), gfp);
466	if (!alloc)
467		return -ENOMEM;
468	xas_set(&xas, min / IDA_BITMAP_BITS);
469	bit = min % IDA_BITMAP_BITS;
470	goto retry;
471nospc:
472	xas_unlock_irqrestore(&xas, flags);
473	return -ENOSPC;
474}
475EXPORT_SYMBOL(ida_alloc_range);
476
477/**
478 * ida_free() - Release an allocated ID.
479 * @ida: IDA handle.
480 * @id: Previously allocated ID.
481 *
482 * Context: Any context.
483 */
484void ida_free(struct ida *ida, unsigned int id)
485{
486	XA_STATE(xas, &ida->xa, id / IDA_BITMAP_BITS);
487	unsigned bit = id % IDA_BITMAP_BITS;
488	struct ida_bitmap *bitmap;
489	unsigned long flags;
490
491	BUG_ON((int)id < 0);
492
493	xas_lock_irqsave(&xas, flags);
494	bitmap = xas_load(&xas);
 
495
496	if (xa_is_value(bitmap)) {
497		unsigned long v = xa_to_value(bitmap);
498		if (bit >= BITS_PER_XA_VALUE)
499			goto err;
500		if (!(v & (1UL << bit)))
501			goto err;
502		v &= ~(1UL << bit);
503		if (!v)
504			goto delete;
505		xas_store(&xas, xa_mk_value(v));
506	} else {
507		if (!test_bit(bit, bitmap->bitmap))
508			goto err;
509		__clear_bit(bit, bitmap->bitmap);
510		xas_set_mark(&xas, XA_FREE_MARK);
511		if (bitmap_empty(bitmap->bitmap, IDA_BITMAP_BITS)) {
512			kfree(bitmap);
513delete:
514			xas_store(&xas, NULL);
515		}
 
 
 
 
 
516	}
517	xas_unlock_irqrestore(&xas, flags);
518	return;
519 err:
520	xas_unlock_irqrestore(&xas, flags);
521	WARN(1, "ida_free called for id=%d which is not allocated.\n", id);
522}
523EXPORT_SYMBOL(ida_free);
524
525/**
526 * ida_destroy() - Free all IDs.
527 * @ida: IDA handle.
528 *
529 * Calling this function frees all IDs and releases all resources used
530 * by an IDA.  When this call returns, the IDA is empty and can be reused
531 * or freed.  If the IDA is already empty, there is no need to call this
532 * function.
533 *
534 * Context: Any context.
 
 
 
535 */
536void ida_destroy(struct ida *ida)
537{
538	XA_STATE(xas, &ida->xa, 0);
539	struct ida_bitmap *bitmap;
540	unsigned long flags;
541
542	xas_lock_irqsave(&xas, flags);
543	xas_for_each(&xas, bitmap, ULONG_MAX) {
544		if (!xa_is_value(bitmap))
545			kfree(bitmap);
546		xas_store(&xas, NULL);
547	}
548	xas_unlock_irqrestore(&xas, flags);
549}
550EXPORT_SYMBOL(ida_destroy);
551
552#ifndef __KERNEL__
553extern void xa_dump_index(unsigned long index, unsigned int shift);
554#define IDA_CHUNK_SHIFT		ilog2(IDA_BITMAP_BITS)
555
556static void ida_dump_entry(void *entry, unsigned long index)
557{
558	unsigned long i;
559
560	if (!entry)
561		return;
562
563	if (xa_is_node(entry)) {
564		struct xa_node *node = xa_to_node(entry);
565		unsigned int shift = node->shift + IDA_CHUNK_SHIFT +
566			XA_CHUNK_SHIFT;
567
568		xa_dump_index(index * IDA_BITMAP_BITS, shift);
569		xa_dump_node(node);
570		for (i = 0; i < XA_CHUNK_SIZE; i++)
571			ida_dump_entry(node->slots[i],
572					index | (i << node->shift));
573	} else if (xa_is_value(entry)) {
574		xa_dump_index(index * IDA_BITMAP_BITS, ilog2(BITS_PER_LONG));
575		pr_cont("value: data %lx [%px]\n", xa_to_value(entry), entry);
576	} else {
577		struct ida_bitmap *bitmap = entry;
 
 
 
 
 
 
 
 
 
578
579		xa_dump_index(index * IDA_BITMAP_BITS, IDA_CHUNK_SHIFT);
580		pr_cont("bitmap: %p data", bitmap);
581		for (i = 0; i < IDA_BITMAP_LONGS; i++)
582			pr_cont(" %lx", bitmap->bitmap[i]);
583		pr_cont("\n");
 
 
 
 
584	}
 
 
 
 
 
 
585}
 
586
587static void ida_dump(struct ida *ida)
 
 
 
 
 
 
 
 
 
 
588{
589	struct xarray *xa = &ida->xa;
590	pr_debug("ida: %p node %p free %d\n", ida, xa->xa_head,
591				xa->xa_flags >> ROOT_TAG_SHIFT);
592	ida_dump_entry(xa->xa_head, 0);
 
 
593}
594#endif
v4.17
 
  1#include <linux/bitmap.h>
  2#include <linux/bug.h>
  3#include <linux/export.h>
  4#include <linux/idr.h>
  5#include <linux/slab.h>
  6#include <linux/spinlock.h>
  7
  8DEFINE_PER_CPU(struct ida_bitmap *, ida_bitmap);
  9static DEFINE_SPINLOCK(simple_ida_lock);
 10
 11/**
 12 * idr_alloc_u32() - Allocate an ID.
 13 * @idr: IDR handle.
 14 * @ptr: Pointer to be associated with the new ID.
 15 * @nextid: Pointer to an ID.
 16 * @max: The maximum ID to allocate (inclusive).
 17 * @gfp: Memory allocation flags.
 18 *
 19 * Allocates an unused ID in the range specified by @nextid and @max.
 20 * Note that @max is inclusive whereas the @end parameter to idr_alloc()
 21 * is exclusive.  The new ID is assigned to @nextid before the pointer
 22 * is inserted into the IDR, so if @nextid points into the object pointed
 23 * to by @ptr, a concurrent lookup will not find an uninitialised ID.
 24 *
 25 * The caller should provide their own locking to ensure that two
 26 * concurrent modifications to the IDR are not possible.  Read-only
 27 * accesses to the IDR may be done under the RCU read lock or may
 28 * exclude simultaneous writers.
 29 *
 30 * Return: 0 if an ID was allocated, -ENOMEM if memory allocation failed,
 31 * or -ENOSPC if no free IDs could be found.  If an error occurred,
 32 * @nextid is unchanged.
 33 */
 34int idr_alloc_u32(struct idr *idr, void *ptr, u32 *nextid,
 35			unsigned long max, gfp_t gfp)
 36{
 37	struct radix_tree_iter iter;
 38	void __rcu **slot;
 39	unsigned int base = idr->idr_base;
 40	unsigned int id = *nextid;
 41
 42	if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
 43		return -EINVAL;
 44	if (WARN_ON_ONCE(!(idr->idr_rt.gfp_mask & ROOT_IS_IDR)))
 45		idr->idr_rt.gfp_mask |= IDR_RT_MARKER;
 46
 47	id = (id < base) ? 0 : id - base;
 48	radix_tree_iter_init(&iter, id);
 49	slot = idr_get_free(&idr->idr_rt, &iter, gfp, max - base);
 50	if (IS_ERR(slot))
 51		return PTR_ERR(slot);
 52
 53	*nextid = iter.index + base;
 54	/* there is a memory barrier inside radix_tree_iter_replace() */
 55	radix_tree_iter_replace(&idr->idr_rt, &iter, slot, ptr);
 56	radix_tree_iter_tag_clear(&idr->idr_rt, &iter, IDR_FREE);
 57
 58	return 0;
 59}
 60EXPORT_SYMBOL_GPL(idr_alloc_u32);
 61
 62/**
 63 * idr_alloc() - Allocate an ID.
 64 * @idr: IDR handle.
 65 * @ptr: Pointer to be associated with the new ID.
 66 * @start: The minimum ID (inclusive).
 67 * @end: The maximum ID (exclusive).
 68 * @gfp: Memory allocation flags.
 69 *
 70 * Allocates an unused ID in the range specified by @start and @end.  If
 71 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
 72 * callers to use @start + N as @end as long as N is within integer range.
 73 *
 74 * The caller should provide their own locking to ensure that two
 75 * concurrent modifications to the IDR are not possible.  Read-only
 76 * accesses to the IDR may be done under the RCU read lock or may
 77 * exclude simultaneous writers.
 78 *
 79 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
 80 * or -ENOSPC if no free IDs could be found.
 81 */
 82int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
 83{
 84	u32 id = start;
 85	int ret;
 86
 87	if (WARN_ON_ONCE(start < 0))
 88		return -EINVAL;
 89
 90	ret = idr_alloc_u32(idr, ptr, &id, end > 0 ? end - 1 : INT_MAX, gfp);
 91	if (ret)
 92		return ret;
 93
 94	return id;
 95}
 96EXPORT_SYMBOL_GPL(idr_alloc);
 97
 98/**
 99 * idr_alloc_cyclic() - Allocate an ID cyclically.
100 * @idr: IDR handle.
101 * @ptr: Pointer to be associated with the new ID.
102 * @start: The minimum ID (inclusive).
103 * @end: The maximum ID (exclusive).
104 * @gfp: Memory allocation flags.
105 *
106 * Allocates an unused ID in the range specified by @nextid and @end.  If
107 * @end is <= 0, it is treated as one larger than %INT_MAX.  This allows
108 * callers to use @start + N as @end as long as N is within integer range.
109 * The search for an unused ID will start at the last ID allocated and will
110 * wrap around to @start if no free IDs are found before reaching @end.
111 *
112 * The caller should provide their own locking to ensure that two
113 * concurrent modifications to the IDR are not possible.  Read-only
114 * accesses to the IDR may be done under the RCU read lock or may
115 * exclude simultaneous writers.
116 *
117 * Return: The newly allocated ID, -ENOMEM if memory allocation failed,
118 * or -ENOSPC if no free IDs could be found.
119 */
120int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp)
121{
122	u32 id = idr->idr_next;
123	int err, max = end > 0 ? end - 1 : INT_MAX;
124
125	if ((int)id < start)
126		id = start;
127
128	err = idr_alloc_u32(idr, ptr, &id, max, gfp);
129	if ((err == -ENOSPC) && (id > start)) {
130		id = start;
131		err = idr_alloc_u32(idr, ptr, &id, max, gfp);
132	}
133	if (err)
134		return err;
135
136	idr->idr_next = id + 1;
137	return id;
138}
139EXPORT_SYMBOL(idr_alloc_cyclic);
140
141/**
142 * idr_remove() - Remove an ID from the IDR.
143 * @idr: IDR handle.
144 * @id: Pointer ID.
145 *
146 * Removes this ID from the IDR.  If the ID was not previously in the IDR,
147 * this function returns %NULL.
148 *
149 * Since this function modifies the IDR, the caller should provide their
150 * own locking to ensure that concurrent modification of the same IDR is
151 * not possible.
152 *
153 * Return: The pointer formerly associated with this ID.
154 */
155void *idr_remove(struct idr *idr, unsigned long id)
156{
157	return radix_tree_delete_item(&idr->idr_rt, id - idr->idr_base, NULL);
158}
159EXPORT_SYMBOL_GPL(idr_remove);
160
161/**
162 * idr_find() - Return pointer for given ID.
163 * @idr: IDR handle.
164 * @id: Pointer ID.
165 *
166 * Looks up the pointer associated with this ID.  A %NULL pointer may
167 * indicate that @id is not allocated or that the %NULL pointer was
168 * associated with this ID.
169 *
170 * This function can be called under rcu_read_lock(), given that the leaf
171 * pointers lifetimes are correctly managed.
172 *
173 * Return: The pointer associated with this ID.
174 */
175void *idr_find(const struct idr *idr, unsigned long id)
176{
177	return radix_tree_lookup(&idr->idr_rt, id - idr->idr_base);
178}
179EXPORT_SYMBOL_GPL(idr_find);
180
181/**
182 * idr_for_each() - Iterate through all stored pointers.
183 * @idr: IDR handle.
184 * @fn: Function to be called for each pointer.
185 * @data: Data passed to callback function.
186 *
187 * The callback function will be called for each entry in @idr, passing
188 * the ID, the entry and @data.
189 *
190 * If @fn returns anything other than %0, the iteration stops and that
191 * value is returned from this function.
192 *
193 * idr_for_each() can be called concurrently with idr_alloc() and
194 * idr_remove() if protected by RCU.  Newly added entries may not be
195 * seen and deleted entries may be seen, but adding and removing entries
196 * will not cause other entries to be skipped, nor spurious ones to be seen.
197 */
198int idr_for_each(const struct idr *idr,
199		int (*fn)(int id, void *p, void *data), void *data)
200{
201	struct radix_tree_iter iter;
202	void __rcu **slot;
203	int base = idr->idr_base;
204
205	radix_tree_for_each_slot(slot, &idr->idr_rt, &iter, 0) {
206		int ret;
207		unsigned long id = iter.index + base;
208
209		if (WARN_ON_ONCE(id > INT_MAX))
210			break;
211		ret = fn(id, rcu_dereference_raw(*slot), data);
212		if (ret)
213			return ret;
214	}
215
216	return 0;
217}
218EXPORT_SYMBOL(idr_for_each);
219
220/**
221 * idr_get_next() - Find next populated entry.
222 * @idr: IDR handle.
223 * @nextid: Pointer to an ID.
224 *
225 * Returns the next populated entry in the tree with an ID greater than
226 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
227 * to the ID of the found value.  To use in a loop, the value pointed to by
228 * nextid must be incremented by the user.
229 */
230void *idr_get_next(struct idr *idr, int *nextid)
231{
232	struct radix_tree_iter iter;
233	void __rcu **slot;
 
234	unsigned long base = idr->idr_base;
235	unsigned long id = *nextid;
236
237	id = (id < base) ? 0 : id - base;
238	slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
 
 
 
 
 
 
 
 
 
239	if (!slot)
240		return NULL;
241	id = iter.index + base;
242
243	if (WARN_ON_ONCE(id > INT_MAX))
244		return NULL;
245
246	*nextid = id;
247	return rcu_dereference_raw(*slot);
248}
249EXPORT_SYMBOL(idr_get_next);
250
251/**
252 * idr_get_next_ul() - Find next populated entry.
253 * @idr: IDR handle.
254 * @nextid: Pointer to an ID.
255 *
256 * Returns the next populated entry in the tree with an ID greater than
257 * or equal to the value pointed to by @nextid.  On exit, @nextid is updated
258 * to the ID of the found value.  To use in a loop, the value pointed to by
259 * nextid must be incremented by the user.
260 */
261void *idr_get_next_ul(struct idr *idr, unsigned long *nextid)
262{
263	struct radix_tree_iter iter;
264	void __rcu **slot;
265	unsigned long base = idr->idr_base;
266	unsigned long id = *nextid;
 
267
268	id = (id < base) ? 0 : id - base;
269	slot = radix_tree_iter_find(&idr->idr_rt, &iter, id);
270	if (!slot)
271		return NULL;
272
273	*nextid = iter.index + base;
274	return rcu_dereference_raw(*slot);
275}
276EXPORT_SYMBOL(idr_get_next_ul);
277
278/**
279 * idr_replace() - replace pointer for given ID.
280 * @idr: IDR handle.
281 * @ptr: New pointer to associate with the ID.
282 * @id: ID to change.
283 *
284 * Replace the pointer registered with an ID and return the old value.
285 * This function can be called under the RCU read lock concurrently with
286 * idr_alloc() and idr_remove() (as long as the ID being removed is not
287 * the one being replaced!).
288 *
289 * Returns: the old value on success.  %-ENOENT indicates that @id was not
290 * found.  %-EINVAL indicates that @ptr was not valid.
291 */
292void *idr_replace(struct idr *idr, void *ptr, unsigned long id)
293{
294	struct radix_tree_node *node;
295	void __rcu **slot = NULL;
296	void *entry;
297
298	if (WARN_ON_ONCE(radix_tree_is_internal_node(ptr)))
299		return ERR_PTR(-EINVAL);
300	id -= idr->idr_base;
301
302	entry = __radix_tree_lookup(&idr->idr_rt, id, &node, &slot);
303	if (!slot || radix_tree_tag_get(&idr->idr_rt, id, IDR_FREE))
304		return ERR_PTR(-ENOENT);
305
306	__radix_tree_replace(&idr->idr_rt, node, slot, ptr, NULL);
307
308	return entry;
309}
310EXPORT_SYMBOL(idr_replace);
311
312/**
313 * DOC: IDA description
314 *
315 * The IDA is an ID allocator which does not provide the ability to
316 * associate an ID with a pointer.  As such, it only needs to store one
317 * bit per ID, and so is more space efficient than an IDR.  To use an IDA,
318 * define it using DEFINE_IDA() (or embed a &struct ida in a data structure,
319 * then initialise it using ida_init()).  To allocate a new ID, call
320 * ida_simple_get().  To free an ID, call ida_simple_remove().
 
 
 
 
 
321 *
322 * If you have more complex locking requirements, use a loop around
323 * ida_pre_get() and ida_get_new() to allocate a new ID.  Then use
324 * ida_remove() to free an ID.  You must make sure that ida_get_new() and
325 * ida_remove() cannot be called at the same time as each other for the
326 * same IDA.
327 *
328 * You can also use ida_get_new_above() if you need an ID to be allocated
329 * above a particular number.  ida_destroy() can be used to dispose of an
330 * IDA without needing to free the individual IDs in it.  You can use
331 * ida_is_empty() to find out whether the IDA has any IDs currently allocated.
332 *
333 * IDs are currently limited to the range [0-INT_MAX].  If this is an awkward
334 * limitation, it should be quite straightforward to raise the maximum.
335 */
336
337/*
338 * Developer's notes:
339 *
340 * The IDA uses the functionality provided by the IDR & radix tree to store
341 * bitmaps in each entry.  The IDR_FREE tag means there is at least one bit
342 * free, unlike the IDR where it means at least one entry is free.
343 *
344 * I considered telling the radix tree that each slot is an order-10 node
345 * and storing the bit numbers in the radix tree, but the radix tree can't
346 * allow a single multiorder entry at index 0, which would significantly
347 * increase memory consumption for the IDA.  So instead we divide the index
348 * by the number of bits in the leaf bitmap before doing a radix tree lookup.
349 *
350 * As an optimisation, if there are only a few low bits set in any given
351 * leaf, instead of allocating a 128-byte bitmap, we use the 'exceptional
352 * entry' functionality of the radix tree to store BITS_PER_LONG - 2 bits
353 * directly in the entry.  By being really tricksy, we could store
354 * BITS_PER_LONG - 1 bits, but there're diminishing returns after optimising
355 * for 0-3 allocated IDs.
356 *
357 * We allow the radix tree 'exceptional' count to get out of date.  Nothing
358 * in the IDA nor the radix tree code checks it.  If it becomes important
359 * to maintain an accurate exceptional count, switch the rcu_assign_pointer()
360 * calls to radix_tree_iter_replace() which will correct the exceptional
361 * count.
 
 
362 *
363 * The IDA always requires a lock to alloc/free.  If we add a 'test_bit'
364 * equivalent, it will still need locking.  Going to RCU lookup would require
365 * using RCU to free bitmaps, and that's not trivial without embedding an
366 * RCU head in the bitmap, which adds a 2-pointer overhead to each 128-byte
367 * bitmap, which is excessive.
368 */
369
370#define IDA_MAX (0x80000000U / IDA_BITMAP_BITS - 1)
371
372/**
373 * ida_get_new_above - allocate new ID above or equal to a start id
374 * @ida: ida handle
375 * @start: id to start search at
376 * @id: pointer to the allocated handle
377 *
378 * Allocate new ID above or equal to @start.  It should be called
379 * with any required locks to ensure that concurrent calls to
380 * ida_get_new_above() / ida_get_new() / ida_remove() are not allowed.
381 * Consider using ida_simple_get() if you do not have complex locking
382 * requirements.
383 *
384 * If memory is required, it will return %-EAGAIN, you should unlock
385 * and go back to the ida_pre_get() call.  If the ida is full, it will
386 * return %-ENOSPC.  On success, it will return 0.
387 *
388 * @id returns a value in the range @start ... %0x7fffffff.
 
 
389 */
390int ida_get_new_above(struct ida *ida, int start, int *id)
 
391{
392	struct radix_tree_root *root = &ida->ida_rt;
393	void __rcu **slot;
394	struct radix_tree_iter iter;
395	struct ida_bitmap *bitmap;
396	unsigned long index;
397	unsigned bit, ebit;
398	int new;
399
400	index = start / IDA_BITMAP_BITS;
401	bit = start % IDA_BITMAP_BITS;
402	ebit = bit + RADIX_TREE_EXCEPTIONAL_SHIFT;
403
404	slot = radix_tree_iter_init(&iter, index);
405	for (;;) {
406		if (slot)
407			slot = radix_tree_next_slot(slot, &iter,
408						RADIX_TREE_ITER_TAGGED);
409		if (!slot) {
410			slot = idr_get_free(root, &iter, GFP_NOWAIT, IDA_MAX);
411			if (IS_ERR(slot)) {
412				if (slot == ERR_PTR(-ENOMEM))
413					return -EAGAIN;
414				return PTR_ERR(slot);
 
 
 
 
 
 
 
 
415			}
416		}
417		if (iter.index > index) {
418			bit = 0;
419			ebit = RADIX_TREE_EXCEPTIONAL_SHIFT;
420		}
421		new = iter.index * IDA_BITMAP_BITS;
422		bitmap = rcu_dereference_raw(*slot);
423		if (radix_tree_exception(bitmap)) {
424			unsigned long tmp = (unsigned long)bitmap;
425			ebit = find_next_zero_bit(&tmp, BITS_PER_LONG, ebit);
426			if (ebit < BITS_PER_LONG) {
427				tmp |= 1UL << ebit;
428				rcu_assign_pointer(*slot, (void *)tmp);
429				*id = new + ebit - RADIX_TREE_EXCEPTIONAL_SHIFT;
430				return 0;
431			}
432			bitmap = this_cpu_xchg(ida_bitmap, NULL);
433			if (!bitmap)
434				return -EAGAIN;
435			bitmap->bitmap[0] = tmp >> RADIX_TREE_EXCEPTIONAL_SHIFT;
436			rcu_assign_pointer(*slot, bitmap);
437		}
 
438
439		if (bitmap) {
440			bit = find_next_zero_bit(bitmap->bitmap,
441							IDA_BITMAP_BITS, bit);
442			new += bit;
443			if (new < 0)
444				return -ENOSPC;
445			if (bit == IDA_BITMAP_BITS)
446				continue;
447
448			__set_bit(bit, bitmap->bitmap);
449			if (bitmap_full(bitmap->bitmap, IDA_BITMAP_BITS))
450				radix_tree_iter_tag_clear(root, &iter,
451								IDR_FREE);
452		} else {
453			new += bit;
454			if (new < 0)
455				return -ENOSPC;
456			if (ebit < BITS_PER_LONG) {
457				bitmap = (void *)((1UL << ebit) |
458						RADIX_TREE_EXCEPTIONAL_ENTRY);
459				radix_tree_iter_replace(root, &iter, slot,
460						bitmap);
461				*id = new;
462				return 0;
463			}
464			bitmap = this_cpu_xchg(ida_bitmap, NULL);
465			if (!bitmap)
466				return -EAGAIN;
467			__set_bit(bit, bitmap->bitmap);
468			radix_tree_iter_replace(root, &iter, slot, bitmap);
469		}
470
471		*id = new;
472		return 0;
 
 
 
 
 
473	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474}
475EXPORT_SYMBOL(ida_get_new_above);
476
477/**
478 * ida_remove - Free the given ID
479 * @ida: ida handle
480 * @id: ID to free
481 *
482 * This function should not be called at the same time as ida_get_new_above().
483 */
484void ida_remove(struct ida *ida, int id)
485{
486	unsigned long index = id / IDA_BITMAP_BITS;
487	unsigned offset = id % IDA_BITMAP_BITS;
488	struct ida_bitmap *bitmap;
489	unsigned long *btmp;
490	struct radix_tree_iter iter;
491	void __rcu **slot;
492
493	slot = radix_tree_iter_lookup(&ida->ida_rt, &iter, index);
494	if (!slot)
495		goto err;
496
497	bitmap = rcu_dereference_raw(*slot);
498	if (radix_tree_exception(bitmap)) {
499		btmp = (unsigned long *)slot;
500		offset += RADIX_TREE_EXCEPTIONAL_SHIFT;
501		if (offset >= BITS_PER_LONG)
502			goto err;
 
 
 
 
503	} else {
504		btmp = bitmap->bitmap;
505	}
506	if (!test_bit(offset, btmp))
507		goto err;
508
509	__clear_bit(offset, btmp);
510	radix_tree_iter_tag_set(&ida->ida_rt, &iter, IDR_FREE);
511	if (radix_tree_exception(bitmap)) {
512		if (rcu_dereference_raw(*slot) ==
513					(void *)RADIX_TREE_EXCEPTIONAL_ENTRY)
514			radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
515	} else if (bitmap_empty(btmp, IDA_BITMAP_BITS)) {
516		kfree(bitmap);
517		radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
518	}
 
519	return;
520 err:
521	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
 
522}
523EXPORT_SYMBOL(ida_remove);
524
525/**
526 * ida_destroy - Free the contents of an ida
527 * @ida: ida handle
 
 
 
 
 
528 *
529 * Calling this function releases all resources associated with an IDA.  When
530 * this call returns, the IDA is empty and can be reused or freed.  The caller
531 * should not allow ida_remove() or ida_get_new_above() to be called at the
532 * same time.
533 */
534void ida_destroy(struct ida *ida)
535{
536	struct radix_tree_iter iter;
537	void __rcu **slot;
 
538
539	radix_tree_for_each_slot(slot, &ida->ida_rt, &iter, 0) {
540		struct ida_bitmap *bitmap = rcu_dereference_raw(*slot);
541		if (!radix_tree_exception(bitmap))
542			kfree(bitmap);
543		radix_tree_iter_delete(&ida->ida_rt, &iter, slot);
544	}
 
545}
546EXPORT_SYMBOL(ida_destroy);
547
548/**
549 * ida_simple_get - get a new id.
550 * @ida: the (initialized) ida.
551 * @start: the minimum id (inclusive, < 0x8000000)
552 * @end: the maximum id (exclusive, < 0x8000000 or 0)
553 * @gfp_mask: memory allocation flags
554 *
555 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
556 * On memory allocation failure, returns -ENOMEM.
557 *
558 * Compared to ida_get_new_above() this function does its own locking, and
559 * should be used unless there are special requirements.
560 *
561 * Use ida_simple_remove() to get rid of an id.
562 */
563int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
564		   gfp_t gfp_mask)
565{
566	int ret, id;
567	unsigned int max;
568	unsigned long flags;
569
570	BUG_ON((int)start < 0);
571	BUG_ON((int)end < 0);
572
573	if (end == 0)
574		max = 0x80000000;
575	else {
576		BUG_ON(end < start);
577		max = end - 1;
578	}
579
580again:
581	if (!ida_pre_get(ida, gfp_mask))
582		return -ENOMEM;
583
584	spin_lock_irqsave(&simple_ida_lock, flags);
585	ret = ida_get_new_above(ida, start, &id);
586	if (!ret) {
587		if (id > max) {
588			ida_remove(ida, id);
589			ret = -ENOSPC;
590		} else {
591			ret = id;
592		}
593	}
594	spin_unlock_irqrestore(&simple_ida_lock, flags);
595
596	if (unlikely(ret == -EAGAIN))
597		goto again;
598
599	return ret;
600}
601EXPORT_SYMBOL(ida_simple_get);
602
603/**
604 * ida_simple_remove - remove an allocated id.
605 * @ida: the (initialized) ida.
606 * @id: the id returned by ida_simple_get.
607 *
608 * Use to release an id allocated with ida_simple_get().
609 *
610 * Compared to ida_remove() this function does its own locking, and should be
611 * used unless there are special requirements.
612 */
613void ida_simple_remove(struct ida *ida, unsigned int id)
614{
615	unsigned long flags;
616
617	BUG_ON((int)id < 0);
618	spin_lock_irqsave(&simple_ida_lock, flags);
619	ida_remove(ida, id);
620	spin_unlock_irqrestore(&simple_ida_lock, flags);
621}
622EXPORT_SYMBOL(ida_simple_remove);