Linux Audio

Check our new training course

Loading...
   1/*
   2 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
   3 *	Copyright (C) 2002 by Concurrent Computer Corporation
   4 *	Distributed under the GNU GPL license version 2.
   5 *
   6 * Modified by George Anzinger to reuse immediately and to use
   7 * find bit instructions.  Also removed _irq on spinlocks.
   8 *
   9 * Modified by Nadia Derbey to make it RCU safe.
  10 *
  11 * Small id to pointer translation service.
  12 *
  13 * It uses a radix tree like structure as a sparse array indexed
  14 * by the id to obtain the pointer.  The bitmap makes allocating
  15 * a new id quick.
  16 *
  17 * You call it to allocate an id (an int) an associate with that id a
  18 * pointer or what ever, we treat it as a (void *).  You can pass this
  19 * id to a user for him to pass back at a later time.  You then pass
  20 * that id to this code and it returns your pointer.
  21
  22 * You can release ids at any time. When all ids are released, most of
  23 * the memory is returned (we keep MAX_IDR_FREE) in a local pool so we
  24 * don't need to go to the memory "store" during an id allocate, just
  25 * so you don't need to be too concerned about locking and conflicts
  26 * with the slab allocator.
  27 */
  28
  29#ifndef TEST                        // to test in user space...
  30#include <linux/slab.h>
  31#include <linux/init.h>
  32#include <linux/export.h>
  33#endif
  34#include <linux/err.h>
  35#include <linux/string.h>
  36#include <linux/idr.h>
  37#include <linux/spinlock.h>
  38#include <linux/percpu.h>
  39#include <linux/hardirq.h>
  40
  41#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
  42#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
  43
  44/* Leave the possibility of an incomplete final layer */
  45#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
  46
  47/* Number of id_layer structs to leave in free list */
  48#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
  49
  50static struct kmem_cache *idr_layer_cache;
  51static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
  52static DEFINE_PER_CPU(int, idr_preload_cnt);
  53static DEFINE_SPINLOCK(simple_ida_lock);
  54
  55/* the maximum ID which can be allocated given idr->layers */
  56static int idr_max(int layers)
  57{
  58	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
  59
  60	return (1 << bits) - 1;
  61}
  62
  63/*
  64 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
  65 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
  66 * so on.
  67 */
  68static int idr_layer_prefix_mask(int layer)
  69{
  70	return ~idr_max(layer + 1);
  71}
  72
  73static struct idr_layer *get_from_free_list(struct idr *idp)
  74{
  75	struct idr_layer *p;
  76	unsigned long flags;
  77
  78	spin_lock_irqsave(&idp->lock, flags);
  79	if ((p = idp->id_free)) {
  80		idp->id_free = p->ary[0];
  81		idp->id_free_cnt--;
  82		p->ary[0] = NULL;
  83	}
  84	spin_unlock_irqrestore(&idp->lock, flags);
  85	return(p);
  86}
  87
  88/**
  89 * idr_layer_alloc - allocate a new idr_layer
  90 * @gfp_mask: allocation mask
  91 * @layer_idr: optional idr to allocate from
  92 *
  93 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
  94 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
  95 * an idr_layer from @idr->id_free.
  96 *
  97 * @layer_idr is to maintain backward compatibility with the old alloc
  98 * interface - idr_pre_get() and idr_get_new*() - and will be removed
  99 * together with per-pool preload buffer.
 100 */
 101static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
 102{
 103	struct idr_layer *new;
 104
 105	/* this is the old path, bypass to get_from_free_list() */
 106	if (layer_idr)
 107		return get_from_free_list(layer_idr);
 108
 109	/*
 110	 * Try to allocate directly from kmem_cache.  We want to try this
 111	 * before preload buffer; otherwise, non-preloading idr_alloc()
 112	 * users will end up taking advantage of preloading ones.  As the
 113	 * following is allowed to fail for preloaded cases, suppress
 114	 * warning this time.
 115	 */
 116	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
 117	if (new)
 118		return new;
 119
 120	/*
 121	 * Try to fetch one from the per-cpu preload buffer if in process
 122	 * context.  See idr_preload() for details.
 123	 */
 124	if (!in_interrupt()) {
 125		preempt_disable();
 126		new = __this_cpu_read(idr_preload_head);
 127		if (new) {
 128			__this_cpu_write(idr_preload_head, new->ary[0]);
 129			__this_cpu_dec(idr_preload_cnt);
 130			new->ary[0] = NULL;
 131		}
 132		preempt_enable();
 133		if (new)
 134			return new;
 135	}
 136
 137	/*
 138	 * Both failed.  Try kmem_cache again w/o adding __GFP_NOWARN so
 139	 * that memory allocation failure warning is printed as intended.
 140	 */
 141	return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 142}
 143
 144static void idr_layer_rcu_free(struct rcu_head *head)
 145{
 146	struct idr_layer *layer;
 147
 148	layer = container_of(head, struct idr_layer, rcu_head);
 149	kmem_cache_free(idr_layer_cache, layer);
 150}
 151
 152static inline void free_layer(struct idr *idr, struct idr_layer *p)
 153{
 154	if (idr->hint && idr->hint == p)
 155		RCU_INIT_POINTER(idr->hint, NULL);
 156	call_rcu(&p->rcu_head, idr_layer_rcu_free);
 157}
 158
 159/* only called when idp->lock is held */
 160static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
 161{
 162	p->ary[0] = idp->id_free;
 163	idp->id_free = p;
 164	idp->id_free_cnt++;
 165}
 166
 167static void move_to_free_list(struct idr *idp, struct idr_layer *p)
 168{
 169	unsigned long flags;
 170
 171	/*
 172	 * Depends on the return element being zeroed.
 173	 */
 174	spin_lock_irqsave(&idp->lock, flags);
 175	__move_to_free_list(idp, p);
 176	spin_unlock_irqrestore(&idp->lock, flags);
 177}
 178
 179static void idr_mark_full(struct idr_layer **pa, int id)
 180{
 181	struct idr_layer *p = pa[0];
 182	int l = 0;
 183
 184	__set_bit(id & IDR_MASK, p->bitmap);
 185	/*
 186	 * If this layer is full mark the bit in the layer above to
 187	 * show that this part of the radix tree is full.  This may
 188	 * complete the layer above and require walking up the radix
 189	 * tree.
 190	 */
 191	while (bitmap_full(p->bitmap, IDR_SIZE)) {
 192		if (!(p = pa[++l]))
 193			break;
 194		id = id >> IDR_BITS;
 195		__set_bit((id & IDR_MASK), p->bitmap);
 196	}
 197}
 198
 199static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 200{
 201	while (idp->id_free_cnt < MAX_IDR_FREE) {
 202		struct idr_layer *new;
 203		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 204		if (new == NULL)
 205			return (0);
 206		move_to_free_list(idp, new);
 207	}
 208	return 1;
 209}
 210
 211/**
 212 * sub_alloc - try to allocate an id without growing the tree depth
 213 * @idp: idr handle
 214 * @starting_id: id to start search at
 215 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
 216 * @gfp_mask: allocation mask for idr_layer_alloc()
 217 * @layer_idr: optional idr passed to idr_layer_alloc()
 218 *
 219 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
 220 * growing its depth.  Returns
 221 *
 222 *  the allocated id >= 0 if successful,
 223 *  -EAGAIN if the tree needs to grow for allocation to succeed,
 224 *  -ENOSPC if the id space is exhausted,
 225 *  -ENOMEM if more idr_layers need to be allocated.
 226 */
 227static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
 228		     gfp_t gfp_mask, struct idr *layer_idr)
 229{
 230	int n, m, sh;
 231	struct idr_layer *p, *new;
 232	int l, id, oid;
 233
 234	id = *starting_id;
 235 restart:
 236	p = idp->top;
 237	l = idp->layers;
 238	pa[l--] = NULL;
 239	while (1) {
 240		/*
 241		 * We run around this while until we reach the leaf node...
 242		 */
 243		n = (id >> (IDR_BITS*l)) & IDR_MASK;
 244		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
 245		if (m == IDR_SIZE) {
 246			/* no space available go back to previous layer. */
 247			l++;
 248			oid = id;
 249			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 250
 251			/* if already at the top layer, we need to grow */
 252			if (id >= 1 << (idp->layers * IDR_BITS)) {
 253				*starting_id = id;
 254				return -EAGAIN;
 255			}
 256			p = pa[l];
 257			BUG_ON(!p);
 258
 259			/* If we need to go up one layer, continue the
 260			 * loop; otherwise, restart from the top.
 261			 */
 262			sh = IDR_BITS * (l + 1);
 263			if (oid >> sh == id >> sh)
 264				continue;
 265			else
 266				goto restart;
 267		}
 268		if (m != n) {
 269			sh = IDR_BITS*l;
 270			id = ((id >> sh) ^ n ^ m) << sh;
 271		}
 272		if ((id >= MAX_IDR_BIT) || (id < 0))
 273			return -ENOSPC;
 274		if (l == 0)
 275			break;
 276		/*
 277		 * Create the layer below if it is missing.
 278		 */
 279		if (!p->ary[m]) {
 280			new = idr_layer_alloc(gfp_mask, layer_idr);
 281			if (!new)
 282				return -ENOMEM;
 283			new->layer = l-1;
 284			new->prefix = id & idr_layer_prefix_mask(new->layer);
 285			rcu_assign_pointer(p->ary[m], new);
 286			p->count++;
 287		}
 288		pa[l--] = p;
 289		p = p->ary[m];
 290	}
 291
 292	pa[l] = p;
 293	return id;
 294}
 295
 296static int idr_get_empty_slot(struct idr *idp, int starting_id,
 297			      struct idr_layer **pa, gfp_t gfp_mask,
 298			      struct idr *layer_idr)
 299{
 300	struct idr_layer *p, *new;
 301	int layers, v, id;
 302	unsigned long flags;
 303
 304	id = starting_id;
 305build_up:
 306	p = idp->top;
 307	layers = idp->layers;
 308	if (unlikely(!p)) {
 309		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
 310			return -ENOMEM;
 311		p->layer = 0;
 312		layers = 1;
 313	}
 314	/*
 315	 * Add a new layer to the top of the tree if the requested
 316	 * id is larger than the currently allocated space.
 317	 */
 318	while (id > idr_max(layers)) {
 319		layers++;
 320		if (!p->count) {
 321			/* special case: if the tree is currently empty,
 322			 * then we grow the tree by moving the top node
 323			 * upwards.
 324			 */
 325			p->layer++;
 326			WARN_ON_ONCE(p->prefix);
 327			continue;
 328		}
 329		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
 330			/*
 331			 * The allocation failed.  If we built part of
 332			 * the structure tear it down.
 333			 */
 334			spin_lock_irqsave(&idp->lock, flags);
 335			for (new = p; p && p != idp->top; new = p) {
 336				p = p->ary[0];
 337				new->ary[0] = NULL;
 338				new->count = 0;
 339				bitmap_clear(new->bitmap, 0, IDR_SIZE);
 340				__move_to_free_list(idp, new);
 341			}
 342			spin_unlock_irqrestore(&idp->lock, flags);
 343			return -ENOMEM;
 344		}
 345		new->ary[0] = p;
 346		new->count = 1;
 347		new->layer = layers-1;
 348		new->prefix = id & idr_layer_prefix_mask(new->layer);
 349		if (bitmap_full(p->bitmap, IDR_SIZE))
 350			__set_bit(0, new->bitmap);
 351		p = new;
 352	}
 353	rcu_assign_pointer(idp->top, p);
 354	idp->layers = layers;
 355	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
 356	if (v == -EAGAIN)
 357		goto build_up;
 358	return(v);
 359}
 360
 361/*
 362 * @id and @pa are from a successful allocation from idr_get_empty_slot().
 363 * Install the user pointer @ptr and mark the slot full.
 364 */
 365static void idr_fill_slot(struct idr *idr, void *ptr, int id,
 366			  struct idr_layer **pa)
 367{
 368	/* update hint used for lookup, cleared from free_layer() */
 369	rcu_assign_pointer(idr->hint, pa[0]);
 370
 371	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
 372	pa[0]->count++;
 373	idr_mark_full(pa, id);
 374}
 375
 376
 377/**
 378 * idr_preload - preload for idr_alloc()
 379 * @gfp_mask: allocation mask to use for preloading
 380 *
 381 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
 382 * process context and each idr_preload() invocation should be matched with
 383 * idr_preload_end().  Note that preemption is disabled while preloaded.
 384 *
 385 * The first idr_alloc() in the preloaded section can be treated as if it
 386 * were invoked with @gfp_mask used for preloading.  This allows using more
 387 * permissive allocation masks for idrs protected by spinlocks.
 388 *
 389 * For example, if idr_alloc() below fails, the failure can be treated as
 390 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
 391 *
 392 *	idr_preload(GFP_KERNEL);
 393 *	spin_lock(lock);
 394 *
 395 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
 396 *
 397 *	spin_unlock(lock);
 398 *	idr_preload_end();
 399 *	if (id < 0)
 400 *		error;
 401 */
 402void idr_preload(gfp_t gfp_mask)
 403{
 404	/*
 405	 * Consuming preload buffer from non-process context breaks preload
 406	 * allocation guarantee.  Disallow usage from those contexts.
 407	 */
 408	WARN_ON_ONCE(in_interrupt());
 409	might_sleep_if(gfp_mask & __GFP_WAIT);
 410
 411	preempt_disable();
 412
 413	/*
 414	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
 415	 * return value from idr_alloc() needs to be checked for failure
 416	 * anyway.  Silently give up if allocation fails.  The caller can
 417	 * treat failures from idr_alloc() as if idr_alloc() were called
 418	 * with @gfp_mask which should be enough.
 419	 */
 420	while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
 421		struct idr_layer *new;
 422
 423		preempt_enable();
 424		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 425		preempt_disable();
 426		if (!new)
 427			break;
 428
 429		/* link the new one to per-cpu preload list */
 430		new->ary[0] = __this_cpu_read(idr_preload_head);
 431		__this_cpu_write(idr_preload_head, new);
 432		__this_cpu_inc(idr_preload_cnt);
 433	}
 434}
 435EXPORT_SYMBOL(idr_preload);
 436
 437/**
 438 * idr_alloc - allocate new idr entry
 439 * @idr: the (initialized) idr
 440 * @ptr: pointer to be associated with the new id
 441 * @start: the minimum id (inclusive)
 442 * @end: the maximum id (exclusive, <= 0 for max)
 443 * @gfp_mask: memory allocation flags
 444 *
 445 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
 446 * available in the specified range, returns -ENOSPC.  On memory allocation
 447 * failure, returns -ENOMEM.
 448 *
 449 * Note that @end is treated as max when <= 0.  This is to always allow
 450 * using @start + N as @end as long as N is inside integer range.
 451 *
 452 * The user is responsible for exclusively synchronizing all operations
 453 * which may modify @idr.  However, read-only accesses such as idr_find()
 454 * or iteration can be performed under RCU read lock provided the user
 455 * destroys @ptr in RCU-safe way after removal from idr.
 456 */
 457int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
 458{
 459	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
 460	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 461	int id;
 462
 463	might_sleep_if(gfp_mask & __GFP_WAIT);
 464
 465	/* sanity checks */
 466	if (WARN_ON_ONCE(start < 0))
 467		return -EINVAL;
 468	if (unlikely(max < start))
 469		return -ENOSPC;
 470
 471	/* allocate id */
 472	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
 473	if (unlikely(id < 0))
 474		return id;
 475	if (unlikely(id > max))
 476		return -ENOSPC;
 477
 478	idr_fill_slot(idr, ptr, id, pa);
 479	return id;
 480}
 481EXPORT_SYMBOL_GPL(idr_alloc);
 482
 483/**
 484 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
 485 * @idr: the (initialized) idr
 486 * @ptr: pointer to be associated with the new id
 487 * @start: the minimum id (inclusive)
 488 * @end: the maximum id (exclusive, <= 0 for max)
 489 * @gfp_mask: memory allocation flags
 490 *
 491 * Essentially the same as idr_alloc, but prefers to allocate progressively
 492 * higher ids if it can. If the "cur" counter wraps, then it will start again
 493 * at the "start" end of the range and allocate one that has already been used.
 494 */
 495int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
 496			gfp_t gfp_mask)
 497{
 498	int id;
 499
 500	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
 501	if (id == -ENOSPC)
 502		id = idr_alloc(idr, ptr, start, end, gfp_mask);
 503
 504	if (likely(id >= 0))
 505		idr->cur = id + 1;
 506	return id;
 507}
 508EXPORT_SYMBOL(idr_alloc_cyclic);
 509
 510static void idr_remove_warning(int id)
 511{
 512	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
 513}
 514
 515static void sub_remove(struct idr *idp, int shift, int id)
 516{
 517	struct idr_layer *p = idp->top;
 518	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
 519	struct idr_layer ***paa = &pa[0];
 520	struct idr_layer *to_free;
 521	int n;
 522
 523	*paa = NULL;
 524	*++paa = &idp->top;
 525
 526	while ((shift > 0) && p) {
 527		n = (id >> shift) & IDR_MASK;
 528		__clear_bit(n, p->bitmap);
 529		*++paa = &p->ary[n];
 530		p = p->ary[n];
 531		shift -= IDR_BITS;
 532	}
 533	n = id & IDR_MASK;
 534	if (likely(p != NULL && test_bit(n, p->bitmap))) {
 535		__clear_bit(n, p->bitmap);
 536		RCU_INIT_POINTER(p->ary[n], NULL);
 537		to_free = NULL;
 538		while(*paa && ! --((**paa)->count)){
 539			if (to_free)
 540				free_layer(idp, to_free);
 541			to_free = **paa;
 542			**paa-- = NULL;
 543		}
 544		if (!*paa)
 545			idp->layers = 0;
 546		if (to_free)
 547			free_layer(idp, to_free);
 548	} else
 549		idr_remove_warning(id);
 550}
 551
 552/**
 553 * idr_remove - remove the given id and free its slot
 554 * @idp: idr handle
 555 * @id: unique key
 556 */
 557void idr_remove(struct idr *idp, int id)
 558{
 559	struct idr_layer *p;
 560	struct idr_layer *to_free;
 561
 562	if (id < 0)
 563		return;
 564
 
 
 
 
 
 565	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
 566	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
 567	    idp->top->ary[0]) {
 568		/*
 569		 * Single child at leftmost slot: we can shrink the tree.
 570		 * This level is not needed anymore since when layers are
 571		 * inserted, they are inserted at the top of the existing
 572		 * tree.
 573		 */
 574		to_free = idp->top;
 575		p = idp->top->ary[0];
 576		rcu_assign_pointer(idp->top, p);
 577		--idp->layers;
 578		to_free->count = 0;
 579		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
 580		free_layer(idp, to_free);
 581	}
 582	while (idp->id_free_cnt >= MAX_IDR_FREE) {
 583		p = get_from_free_list(idp);
 584		/*
 585		 * Note: we don't call the rcu callback here, since the only
 586		 * layers that fall into the freelist are those that have been
 587		 * preallocated.
 588		 */
 589		kmem_cache_free(idr_layer_cache, p);
 590	}
 591	return;
 592}
 593EXPORT_SYMBOL(idr_remove);
 594
 595static void __idr_remove_all(struct idr *idp)
 596{
 597	int n, id, max;
 598	int bt_mask;
 599	struct idr_layer *p;
 600	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 601	struct idr_layer **paa = &pa[0];
 602
 603	n = idp->layers * IDR_BITS;
 604	p = idp->top;
 605	RCU_INIT_POINTER(idp->top, NULL);
 606	max = idr_max(idp->layers);
 607
 608	id = 0;
 609	while (id >= 0 && id <= max) {
 
 610		while (n > IDR_BITS && p) {
 611			n -= IDR_BITS;
 612			*paa++ = p;
 613			p = p->ary[(id >> n) & IDR_MASK];
 
 614		}
 615
 616		bt_mask = id;
 617		id += 1 << n;
 618		/* Get the highest bit that the above add changed from 0->1. */
 619		while (n < fls(id ^ bt_mask)) {
 620			if (p)
 621				free_layer(idp, p);
 622			n += IDR_BITS;
 623			p = *--paa;
 624		}
 625	}
 626	idp->layers = 0;
 627}
 628
 629/**
 630 * idr_destroy - release all cached layers within an idr tree
 631 * @idp: idr handle
 632 *
 633 * Free all id mappings and all idp_layers.  After this function, @idp is
 634 * completely unused and can be freed / recycled.  The caller is
 635 * responsible for ensuring that no one else accesses @idp during or after
 636 * idr_destroy().
 637 *
 638 * A typical clean-up sequence for objects stored in an idr tree will use
 639 * idr_for_each() to free all objects, if necessay, then idr_destroy() to
 640 * free up the id mappings and cached idr_layers.
 641 */
 642void idr_destroy(struct idr *idp)
 643{
 644	__idr_remove_all(idp);
 645
 646	while (idp->id_free_cnt) {
 647		struct idr_layer *p = get_from_free_list(idp);
 648		kmem_cache_free(idr_layer_cache, p);
 649	}
 650}
 651EXPORT_SYMBOL(idr_destroy);
 652
 653void *idr_find_slowpath(struct idr *idp, int id)
 654{
 655	int n;
 656	struct idr_layer *p;
 657
 658	if (id < 0)
 659		return NULL;
 660
 661	p = rcu_dereference_raw(idp->top);
 662	if (!p)
 663		return NULL;
 664	n = (p->layer+1) * IDR_BITS;
 665
 666	if (id > idr_max(p->layer + 1))
 667		return NULL;
 668	BUG_ON(n == 0);
 669
 670	while (n > 0 && p) {
 671		n -= IDR_BITS;
 672		BUG_ON(n != p->layer*IDR_BITS);
 673		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 674	}
 675	return((void *)p);
 676}
 677EXPORT_SYMBOL(idr_find_slowpath);
 678
 679/**
 680 * idr_for_each - iterate through all stored pointers
 681 * @idp: idr handle
 682 * @fn: function to be called for each pointer
 683 * @data: data passed back to callback function
 684 *
 685 * Iterate over the pointers registered with the given idr.  The
 686 * callback function will be called for each pointer currently
 687 * registered, passing the id, the pointer and the data pointer passed
 688 * to this function.  It is not safe to modify the idr tree while in
 689 * the callback, so functions such as idr_get_new and idr_remove are
 690 * not allowed.
 691 *
 692 * We check the return of @fn each time. If it returns anything other
 693 * than %0, we break out and return that value.
 694 *
 695 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
 696 */
 697int idr_for_each(struct idr *idp,
 698		 int (*fn)(int id, void *p, void *data), void *data)
 699{
 700	int n, id, max, error = 0;
 701	struct idr_layer *p;
 702	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 703	struct idr_layer **paa = &pa[0];
 704
 705	n = idp->layers * IDR_BITS;
 706	p = rcu_dereference_raw(idp->top);
 707	max = idr_max(idp->layers);
 708
 709	id = 0;
 710	while (id >= 0 && id <= max) {
 
 711		while (n > 0 && p) {
 712			n -= IDR_BITS;
 713			*paa++ = p;
 714			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 
 715		}
 716
 717		if (p) {
 718			error = fn(id, (void *)p, data);
 719			if (error)
 720				break;
 721		}
 722
 723		id += 1 << n;
 724		while (n < fls(id)) {
 725			n += IDR_BITS;
 726			p = *--paa;
 727		}
 728	}
 729
 730	return error;
 731}
 732EXPORT_SYMBOL(idr_for_each);
 733
 734/**
 735 * idr_get_next - lookup next object of id to given id.
 736 * @idp: idr handle
 737 * @nextidp:  pointer to lookup key
 738 *
 739 * Returns pointer to registered object with id, which is next number to
 740 * given id. After being looked up, *@nextidp will be updated for the next
 741 * iteration.
 742 *
 743 * This function can be called under rcu_read_lock(), given that the leaf
 744 * pointers lifetimes are correctly managed.
 745 */
 746void *idr_get_next(struct idr *idp, int *nextidp)
 747{
 748	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
 749	struct idr_layer **paa = &pa[0];
 750	int id = *nextidp;
 751	int n, max;
 752
 753	/* find first ent */
 754	p = rcu_dereference_raw(idp->top);
 755	if (!p)
 756		return NULL;
 757	n = (p->layer + 1) * IDR_BITS;
 758	max = idr_max(p->layer + 1);
 759
 760	while (id >= 0 && id <= max) {
 
 761		while (n > 0 && p) {
 762			n -= IDR_BITS;
 763			*paa++ = p;
 764			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 
 765		}
 766
 767		if (p) {
 768			*nextidp = id;
 769			return p;
 770		}
 771
 772		/*
 773		 * Proceed to the next layer at the current level.  Unlike
 774		 * idr_for_each(), @id isn't guaranteed to be aligned to
 775		 * layer boundary at this point and adding 1 << n may
 776		 * incorrectly skip IDs.  Make sure we jump to the
 777		 * beginning of the next layer using round_up().
 778		 */
 779		id = round_up(id + 1, 1 << n);
 780		while (n < fls(id)) {
 781			n += IDR_BITS;
 782			p = *--paa;
 783		}
 784	}
 785	return NULL;
 786}
 787EXPORT_SYMBOL(idr_get_next);
 788
 789
 790/**
 791 * idr_replace - replace pointer for given id
 792 * @idp: idr handle
 793 * @ptr: pointer you want associated with the id
 794 * @id: lookup key
 795 *
 796 * Replace the pointer registered with an id and return the old value.
 797 * A %-ENOENT return indicates that @id was not found.
 798 * A %-EINVAL return indicates that @id was not within valid constraints.
 799 *
 800 * The caller must serialize with writers.
 801 */
 802void *idr_replace(struct idr *idp, void *ptr, int id)
 803{
 804	int n;
 805	struct idr_layer *p, *old_p;
 806
 807	if (id < 0)
 808		return ERR_PTR(-EINVAL);
 809
 810	p = idp->top;
 811	if (!p)
 812		return ERR_PTR(-EINVAL);
 813
 814	n = (p->layer+1) * IDR_BITS;
 815
 816	if (id >= (1 << n))
 817		return ERR_PTR(-EINVAL);
 818
 819	n -= IDR_BITS;
 820	while ((n > 0) && p) {
 821		p = p->ary[(id >> n) & IDR_MASK];
 822		n -= IDR_BITS;
 823	}
 824
 825	n = id & IDR_MASK;
 826	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
 827		return ERR_PTR(-ENOENT);
 828
 829	old_p = p->ary[n];
 830	rcu_assign_pointer(p->ary[n], ptr);
 831
 832	return old_p;
 833}
 834EXPORT_SYMBOL(idr_replace);
 835
 836void __init idr_init_cache(void)
 837{
 838	idr_layer_cache = kmem_cache_create("idr_layer_cache",
 839				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
 840}
 841
 842/**
 843 * idr_init - initialize idr handle
 844 * @idp:	idr handle
 845 *
 846 * This function is use to set up the handle (@idp) that you will pass
 847 * to the rest of the functions.
 848 */
 849void idr_init(struct idr *idp)
 850{
 851	memset(idp, 0, sizeof(struct idr));
 852	spin_lock_init(&idp->lock);
 853}
 854EXPORT_SYMBOL(idr_init);
 855
 856static int idr_has_entry(int id, void *p, void *data)
 857{
 858	return 1;
 859}
 860
 861bool idr_is_empty(struct idr *idp)
 862{
 863	return !idr_for_each(idp, idr_has_entry, NULL);
 864}
 865EXPORT_SYMBOL(idr_is_empty);
 866
 867/**
 868 * DOC: IDA description
 869 * IDA - IDR based ID allocator
 870 *
 871 * This is id allocator without id -> pointer translation.  Memory
 872 * usage is much lower than full blown idr because each id only
 873 * occupies a bit.  ida uses a custom leaf node which contains
 874 * IDA_BITMAP_BITS slots.
 875 *
 876 * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
 877 */
 878
 879static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
 880{
 881	unsigned long flags;
 882
 883	if (!ida->free_bitmap) {
 884		spin_lock_irqsave(&ida->idr.lock, flags);
 885		if (!ida->free_bitmap) {
 886			ida->free_bitmap = bitmap;
 887			bitmap = NULL;
 888		}
 889		spin_unlock_irqrestore(&ida->idr.lock, flags);
 890	}
 891
 892	kfree(bitmap);
 893}
 894
 895/**
 896 * ida_pre_get - reserve resources for ida allocation
 897 * @ida:	ida handle
 898 * @gfp_mask:	memory allocation flag
 899 *
 900 * This function should be called prior to locking and calling the
 901 * following function.  It preallocates enough memory to satisfy the
 902 * worst possible allocation.
 903 *
 904 * If the system is REALLY out of memory this function returns %0,
 905 * otherwise %1.
 906 */
 907int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
 908{
 909	/* allocate idr_layers */
 910	if (!__idr_pre_get(&ida->idr, gfp_mask))
 911		return 0;
 912
 913	/* allocate free_bitmap */
 914	if (!ida->free_bitmap) {
 915		struct ida_bitmap *bitmap;
 916
 917		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
 918		if (!bitmap)
 919			return 0;
 920
 921		free_bitmap(ida, bitmap);
 922	}
 923
 924	return 1;
 925}
 926EXPORT_SYMBOL(ida_pre_get);
 927
 928/**
 929 * ida_get_new_above - allocate new ID above or equal to a start id
 930 * @ida:	ida handle
 931 * @starting_id: id to start search at
 932 * @p_id:	pointer to the allocated handle
 933 *
 934 * Allocate new ID above or equal to @starting_id.  It should be called
 935 * with any required locks.
 936 *
 937 * If memory is required, it will return %-EAGAIN, you should unlock
 938 * and go back to the ida_pre_get() call.  If the ida is full, it will
 939 * return %-ENOSPC.
 940 *
 941 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
 942 */
 943int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 944{
 945	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 946	struct ida_bitmap *bitmap;
 947	unsigned long flags;
 948	int idr_id = starting_id / IDA_BITMAP_BITS;
 949	int offset = starting_id % IDA_BITMAP_BITS;
 950	int t, id;
 951
 952 restart:
 953	/* get vacant slot */
 954	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
 955	if (t < 0)
 956		return t == -ENOMEM ? -EAGAIN : t;
 957
 958	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
 959		return -ENOSPC;
 960
 961	if (t != idr_id)
 962		offset = 0;
 963	idr_id = t;
 964
 965	/* if bitmap isn't there, create a new one */
 966	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
 967	if (!bitmap) {
 968		spin_lock_irqsave(&ida->idr.lock, flags);
 969		bitmap = ida->free_bitmap;
 970		ida->free_bitmap = NULL;
 971		spin_unlock_irqrestore(&ida->idr.lock, flags);
 972
 973		if (!bitmap)
 974			return -EAGAIN;
 975
 976		memset(bitmap, 0, sizeof(struct ida_bitmap));
 977		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
 978				(void *)bitmap);
 979		pa[0]->count++;
 980	}
 981
 982	/* lookup for empty slot */
 983	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
 984	if (t == IDA_BITMAP_BITS) {
 985		/* no empty slot after offset, continue to the next chunk */
 986		idr_id++;
 987		offset = 0;
 988		goto restart;
 989	}
 990
 991	id = idr_id * IDA_BITMAP_BITS + t;
 992	if (id >= MAX_IDR_BIT)
 993		return -ENOSPC;
 994
 995	__set_bit(t, bitmap->bitmap);
 996	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
 997		idr_mark_full(pa, idr_id);
 998
 999	*p_id = id;
1000
1001	/* Each leaf node can handle nearly a thousand slots and the
1002	 * whole idea of ida is to have small memory foot print.
1003	 * Throw away extra resources one by one after each successful
1004	 * allocation.
1005	 */
1006	if (ida->idr.id_free_cnt || ida->free_bitmap) {
1007		struct idr_layer *p = get_from_free_list(&ida->idr);
1008		if (p)
1009			kmem_cache_free(idr_layer_cache, p);
1010	}
1011
1012	return 0;
1013}
1014EXPORT_SYMBOL(ida_get_new_above);
1015
1016/**
1017 * ida_remove - remove the given ID
1018 * @ida:	ida handle
1019 * @id:		ID to free
1020 */
1021void ida_remove(struct ida *ida, int id)
1022{
1023	struct idr_layer *p = ida->idr.top;
1024	int shift = (ida->idr.layers - 1) * IDR_BITS;
1025	int idr_id = id / IDA_BITMAP_BITS;
1026	int offset = id % IDA_BITMAP_BITS;
1027	int n;
1028	struct ida_bitmap *bitmap;
1029
 
 
 
1030	/* clear full bits while looking up the leaf idr_layer */
1031	while ((shift > 0) && p) {
1032		n = (idr_id >> shift) & IDR_MASK;
1033		__clear_bit(n, p->bitmap);
1034		p = p->ary[n];
1035		shift -= IDR_BITS;
1036	}
1037
1038	if (p == NULL)
1039		goto err;
1040
1041	n = idr_id & IDR_MASK;
1042	__clear_bit(n, p->bitmap);
1043
1044	bitmap = (void *)p->ary[n];
1045	if (!test_bit(offset, bitmap->bitmap))
1046		goto err;
1047
1048	/* update bitmap and remove it if empty */
1049	__clear_bit(offset, bitmap->bitmap);
1050	if (--bitmap->nr_busy == 0) {
1051		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1052		idr_remove(&ida->idr, idr_id);
1053		free_bitmap(ida, bitmap);
1054	}
1055
1056	return;
1057
1058 err:
1059	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1060}
1061EXPORT_SYMBOL(ida_remove);
1062
1063/**
1064 * ida_destroy - release all cached layers within an ida tree
1065 * @ida:		ida handle
1066 */
1067void ida_destroy(struct ida *ida)
1068{
1069	idr_destroy(&ida->idr);
1070	kfree(ida->free_bitmap);
1071}
1072EXPORT_SYMBOL(ida_destroy);
1073
1074/**
1075 * ida_simple_get - get a new id.
1076 * @ida: the (initialized) ida.
1077 * @start: the minimum id (inclusive, < 0x8000000)
1078 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1079 * @gfp_mask: memory allocation flags
1080 *
1081 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1082 * On memory allocation failure, returns -ENOMEM.
1083 *
1084 * Use ida_simple_remove() to get rid of an id.
1085 */
1086int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1087		   gfp_t gfp_mask)
1088{
1089	int ret, id;
1090	unsigned int max;
1091	unsigned long flags;
1092
1093	BUG_ON((int)start < 0);
1094	BUG_ON((int)end < 0);
1095
1096	if (end == 0)
1097		max = 0x80000000;
1098	else {
1099		BUG_ON(end < start);
1100		max = end - 1;
1101	}
1102
1103again:
1104	if (!ida_pre_get(ida, gfp_mask))
1105		return -ENOMEM;
1106
1107	spin_lock_irqsave(&simple_ida_lock, flags);
1108	ret = ida_get_new_above(ida, start, &id);
1109	if (!ret) {
1110		if (id > max) {
1111			ida_remove(ida, id);
1112			ret = -ENOSPC;
1113		} else {
1114			ret = id;
1115		}
1116	}
1117	spin_unlock_irqrestore(&simple_ida_lock, flags);
1118
1119	if (unlikely(ret == -EAGAIN))
1120		goto again;
1121
1122	return ret;
1123}
1124EXPORT_SYMBOL(ida_simple_get);
1125
1126/**
1127 * ida_simple_remove - remove an allocated id.
1128 * @ida: the (initialized) ida.
1129 * @id: the id returned by ida_simple_get.
1130 */
1131void ida_simple_remove(struct ida *ida, unsigned int id)
1132{
1133	unsigned long flags;
1134
1135	BUG_ON((int)id < 0);
1136	spin_lock_irqsave(&simple_ida_lock, flags);
1137	ida_remove(ida, id);
1138	spin_unlock_irqrestore(&simple_ida_lock, flags);
1139}
1140EXPORT_SYMBOL(ida_simple_remove);
1141
1142/**
1143 * ida_init - initialize ida handle
1144 * @ida:	ida handle
1145 *
1146 * This function is use to set up the handle (@ida) that you will pass
1147 * to the rest of the functions.
1148 */
1149void ida_init(struct ida *ida)
1150{
1151	memset(ida, 0, sizeof(struct ida));
1152	idr_init(&ida->idr);
1153
1154}
1155EXPORT_SYMBOL(ida_init);
   1/*
   2 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
   3 *	Copyright (C) 2002 by Concurrent Computer Corporation
   4 *	Distributed under the GNU GPL license version 2.
   5 *
   6 * Modified by George Anzinger to reuse immediately and to use
   7 * find bit instructions.  Also removed _irq on spinlocks.
   8 *
   9 * Modified by Nadia Derbey to make it RCU safe.
  10 *
  11 * Small id to pointer translation service.
  12 *
  13 * It uses a radix tree like structure as a sparse array indexed
  14 * by the id to obtain the pointer.  The bitmap makes allocating
  15 * a new id quick.
  16 *
  17 * You call it to allocate an id (an int) an associate with that id a
  18 * pointer or what ever, we treat it as a (void *).  You can pass this
  19 * id to a user for him to pass back at a later time.  You then pass
  20 * that id to this code and it returns your pointer.
 
 
 
 
 
 
  21 */
  22
  23#ifndef TEST                        // to test in user space...
  24#include <linux/slab.h>
  25#include <linux/init.h>
  26#include <linux/export.h>
  27#endif
  28#include <linux/err.h>
  29#include <linux/string.h>
  30#include <linux/idr.h>
  31#include <linux/spinlock.h>
  32#include <linux/percpu.h>
 
  33
  34#define MAX_IDR_SHIFT		(sizeof(int) * 8 - 1)
  35#define MAX_IDR_BIT		(1U << MAX_IDR_SHIFT)
  36
  37/* Leave the possibility of an incomplete final layer */
  38#define MAX_IDR_LEVEL ((MAX_IDR_SHIFT + IDR_BITS - 1) / IDR_BITS)
  39
  40/* Number of id_layer structs to leave in free list */
  41#define MAX_IDR_FREE (MAX_IDR_LEVEL * 2)
  42
  43static struct kmem_cache *idr_layer_cache;
  44static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head);
  45static DEFINE_PER_CPU(int, idr_preload_cnt);
  46static DEFINE_SPINLOCK(simple_ida_lock);
  47
  48/* the maximum ID which can be allocated given idr->layers */
  49static int idr_max(int layers)
  50{
  51	int bits = min_t(int, layers * IDR_BITS, MAX_IDR_SHIFT);
  52
  53	return (1 << bits) - 1;
  54}
  55
  56/*
  57 * Prefix mask for an idr_layer at @layer.  For layer 0, the prefix mask is
  58 * all bits except for the lower IDR_BITS.  For layer 1, 2 * IDR_BITS, and
  59 * so on.
  60 */
  61static int idr_layer_prefix_mask(int layer)
  62{
  63	return ~idr_max(layer + 1);
  64}
  65
  66static struct idr_layer *get_from_free_list(struct idr *idp)
  67{
  68	struct idr_layer *p;
  69	unsigned long flags;
  70
  71	spin_lock_irqsave(&idp->lock, flags);
  72	if ((p = idp->id_free)) {
  73		idp->id_free = p->ary[0];
  74		idp->id_free_cnt--;
  75		p->ary[0] = NULL;
  76	}
  77	spin_unlock_irqrestore(&idp->lock, flags);
  78	return(p);
  79}
  80
  81/**
  82 * idr_layer_alloc - allocate a new idr_layer
  83 * @gfp_mask: allocation mask
  84 * @layer_idr: optional idr to allocate from
  85 *
  86 * If @layer_idr is %NULL, directly allocate one using @gfp_mask or fetch
  87 * one from the per-cpu preload buffer.  If @layer_idr is not %NULL, fetch
  88 * an idr_layer from @idr->id_free.
  89 *
  90 * @layer_idr is to maintain backward compatibility with the old alloc
  91 * interface - idr_pre_get() and idr_get_new*() - and will be removed
  92 * together with per-pool preload buffer.
  93 */
  94static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)
  95{
  96	struct idr_layer *new;
  97
  98	/* this is the old path, bypass to get_from_free_list() */
  99	if (layer_idr)
 100		return get_from_free_list(layer_idr);
 101
 102	/*
 103	 * Try to allocate directly from kmem_cache.  We want to try this
 104	 * before preload buffer; otherwise, non-preloading idr_alloc()
 105	 * users will end up taking advantage of preloading ones.  As the
 106	 * following is allowed to fail for preloaded cases, suppress
 107	 * warning this time.
 108	 */
 109	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);
 110	if (new)
 111		return new;
 112
 113	/*
 114	 * Try to fetch one from the per-cpu preload buffer if in process
 115	 * context.  See idr_preload() for details.
 116	 */
 117	if (!in_interrupt()) {
 118		preempt_disable();
 119		new = __this_cpu_read(idr_preload_head);
 120		if (new) {
 121			__this_cpu_write(idr_preload_head, new->ary[0]);
 122			__this_cpu_dec(idr_preload_cnt);
 123			new->ary[0] = NULL;
 124		}
 125		preempt_enable();
 126		if (new)
 127			return new;
 128	}
 129
 130	/*
 131	 * Both failed.  Try kmem_cache again w/o adding __GFP_NOWARN so
 132	 * that memory allocation failure warning is printed as intended.
 133	 */
 134	return kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 135}
 136
 137static void idr_layer_rcu_free(struct rcu_head *head)
 138{
 139	struct idr_layer *layer;
 140
 141	layer = container_of(head, struct idr_layer, rcu_head);
 142	kmem_cache_free(idr_layer_cache, layer);
 143}
 144
 145static inline void free_layer(struct idr *idr, struct idr_layer *p)
 146{
 147	if (idr->hint == p)
 148		RCU_INIT_POINTER(idr->hint, NULL);
 149	call_rcu(&p->rcu_head, idr_layer_rcu_free);
 150}
 151
 152/* only called when idp->lock is held */
 153static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
 154{
 155	p->ary[0] = idp->id_free;
 156	idp->id_free = p;
 157	idp->id_free_cnt++;
 158}
 159
 160static void move_to_free_list(struct idr *idp, struct idr_layer *p)
 161{
 162	unsigned long flags;
 163
 164	/*
 165	 * Depends on the return element being zeroed.
 166	 */
 167	spin_lock_irqsave(&idp->lock, flags);
 168	__move_to_free_list(idp, p);
 169	spin_unlock_irqrestore(&idp->lock, flags);
 170}
 171
 172static void idr_mark_full(struct idr_layer **pa, int id)
 173{
 174	struct idr_layer *p = pa[0];
 175	int l = 0;
 176
 177	__set_bit(id & IDR_MASK, p->bitmap);
 178	/*
 179	 * If this layer is full mark the bit in the layer above to
 180	 * show that this part of the radix tree is full.  This may
 181	 * complete the layer above and require walking up the radix
 182	 * tree.
 183	 */
 184	while (bitmap_full(p->bitmap, IDR_SIZE)) {
 185		if (!(p = pa[++l]))
 186			break;
 187		id = id >> IDR_BITS;
 188		__set_bit((id & IDR_MASK), p->bitmap);
 189	}
 190}
 191
 192static int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)
 193{
 194	while (idp->id_free_cnt < MAX_IDR_FREE) {
 195		struct idr_layer *new;
 196		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 197		if (new == NULL)
 198			return (0);
 199		move_to_free_list(idp, new);
 200	}
 201	return 1;
 202}
 203
 204/**
 205 * sub_alloc - try to allocate an id without growing the tree depth
 206 * @idp: idr handle
 207 * @starting_id: id to start search at
 208 * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer
 209 * @gfp_mask: allocation mask for idr_layer_alloc()
 210 * @layer_idr: optional idr passed to idr_layer_alloc()
 211 *
 212 * Allocate an id in range [@starting_id, INT_MAX] from @idp without
 213 * growing its depth.  Returns
 214 *
 215 *  the allocated id >= 0 if successful,
 216 *  -EAGAIN if the tree needs to grow for allocation to succeed,
 217 *  -ENOSPC if the id space is exhausted,
 218 *  -ENOMEM if more idr_layers need to be allocated.
 219 */
 220static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
 221		     gfp_t gfp_mask, struct idr *layer_idr)
 222{
 223	int n, m, sh;
 224	struct idr_layer *p, *new;
 225	int l, id, oid;
 226
 227	id = *starting_id;
 228 restart:
 229	p = idp->top;
 230	l = idp->layers;
 231	pa[l--] = NULL;
 232	while (1) {
 233		/*
 234		 * We run around this while until we reach the leaf node...
 235		 */
 236		n = (id >> (IDR_BITS*l)) & IDR_MASK;
 237		m = find_next_zero_bit(p->bitmap, IDR_SIZE, n);
 238		if (m == IDR_SIZE) {
 239			/* no space available go back to previous layer. */
 240			l++;
 241			oid = id;
 242			id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 243
 244			/* if already at the top layer, we need to grow */
 245			if (id > idr_max(idp->layers)) {
 246				*starting_id = id;
 247				return -EAGAIN;
 248			}
 249			p = pa[l];
 250			BUG_ON(!p);
 251
 252			/* If we need to go up one layer, continue the
 253			 * loop; otherwise, restart from the top.
 254			 */
 255			sh = IDR_BITS * (l + 1);
 256			if (oid >> sh == id >> sh)
 257				continue;
 258			else
 259				goto restart;
 260		}
 261		if (m != n) {
 262			sh = IDR_BITS*l;
 263			id = ((id >> sh) ^ n ^ m) << sh;
 264		}
 265		if ((id >= MAX_IDR_BIT) || (id < 0))
 266			return -ENOSPC;
 267		if (l == 0)
 268			break;
 269		/*
 270		 * Create the layer below if it is missing.
 271		 */
 272		if (!p->ary[m]) {
 273			new = idr_layer_alloc(gfp_mask, layer_idr);
 274			if (!new)
 275				return -ENOMEM;
 276			new->layer = l-1;
 277			new->prefix = id & idr_layer_prefix_mask(new->layer);
 278			rcu_assign_pointer(p->ary[m], new);
 279			p->count++;
 280		}
 281		pa[l--] = p;
 282		p = p->ary[m];
 283	}
 284
 285	pa[l] = p;
 286	return id;
 287}
 288
 289static int idr_get_empty_slot(struct idr *idp, int starting_id,
 290			      struct idr_layer **pa, gfp_t gfp_mask,
 291			      struct idr *layer_idr)
 292{
 293	struct idr_layer *p, *new;
 294	int layers, v, id;
 295	unsigned long flags;
 296
 297	id = starting_id;
 298build_up:
 299	p = idp->top;
 300	layers = idp->layers;
 301	if (unlikely(!p)) {
 302		if (!(p = idr_layer_alloc(gfp_mask, layer_idr)))
 303			return -ENOMEM;
 304		p->layer = 0;
 305		layers = 1;
 306	}
 307	/*
 308	 * Add a new layer to the top of the tree if the requested
 309	 * id is larger than the currently allocated space.
 310	 */
 311	while (id > idr_max(layers)) {
 312		layers++;
 313		if (!p->count) {
 314			/* special case: if the tree is currently empty,
 315			 * then we grow the tree by moving the top node
 316			 * upwards.
 317			 */
 318			p->layer++;
 319			WARN_ON_ONCE(p->prefix);
 320			continue;
 321		}
 322		if (!(new = idr_layer_alloc(gfp_mask, layer_idr))) {
 323			/*
 324			 * The allocation failed.  If we built part of
 325			 * the structure tear it down.
 326			 */
 327			spin_lock_irqsave(&idp->lock, flags);
 328			for (new = p; p && p != idp->top; new = p) {
 329				p = p->ary[0];
 330				new->ary[0] = NULL;
 331				new->count = 0;
 332				bitmap_clear(new->bitmap, 0, IDR_SIZE);
 333				__move_to_free_list(idp, new);
 334			}
 335			spin_unlock_irqrestore(&idp->lock, flags);
 336			return -ENOMEM;
 337		}
 338		new->ary[0] = p;
 339		new->count = 1;
 340		new->layer = layers-1;
 341		new->prefix = id & idr_layer_prefix_mask(new->layer);
 342		if (bitmap_full(p->bitmap, IDR_SIZE))
 343			__set_bit(0, new->bitmap);
 344		p = new;
 345	}
 346	rcu_assign_pointer(idp->top, p);
 347	idp->layers = layers;
 348	v = sub_alloc(idp, &id, pa, gfp_mask, layer_idr);
 349	if (v == -EAGAIN)
 350		goto build_up;
 351	return(v);
 352}
 353
 354/*
 355 * @id and @pa are from a successful allocation from idr_get_empty_slot().
 356 * Install the user pointer @ptr and mark the slot full.
 357 */
 358static void idr_fill_slot(struct idr *idr, void *ptr, int id,
 359			  struct idr_layer **pa)
 360{
 361	/* update hint used for lookup, cleared from free_layer() */
 362	rcu_assign_pointer(idr->hint, pa[0]);
 363
 364	rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], (struct idr_layer *)ptr);
 365	pa[0]->count++;
 366	idr_mark_full(pa, id);
 367}
 368
 369
 370/**
 371 * idr_preload - preload for idr_alloc()
 372 * @gfp_mask: allocation mask to use for preloading
 373 *
 374 * Preload per-cpu layer buffer for idr_alloc().  Can only be used from
 375 * process context and each idr_preload() invocation should be matched with
 376 * idr_preload_end().  Note that preemption is disabled while preloaded.
 377 *
 378 * The first idr_alloc() in the preloaded section can be treated as if it
 379 * were invoked with @gfp_mask used for preloading.  This allows using more
 380 * permissive allocation masks for idrs protected by spinlocks.
 381 *
 382 * For example, if idr_alloc() below fails, the failure can be treated as
 383 * if idr_alloc() were called with GFP_KERNEL rather than GFP_NOWAIT.
 384 *
 385 *	idr_preload(GFP_KERNEL);
 386 *	spin_lock(lock);
 387 *
 388 *	id = idr_alloc(idr, ptr, start, end, GFP_NOWAIT);
 389 *
 390 *	spin_unlock(lock);
 391 *	idr_preload_end();
 392 *	if (id < 0)
 393 *		error;
 394 */
 395void idr_preload(gfp_t gfp_mask)
 396{
 397	/*
 398	 * Consuming preload buffer from non-process context breaks preload
 399	 * allocation guarantee.  Disallow usage from those contexts.
 400	 */
 401	WARN_ON_ONCE(in_interrupt());
 402	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
 403
 404	preempt_disable();
 405
 406	/*
 407	 * idr_alloc() is likely to succeed w/o full idr_layer buffer and
 408	 * return value from idr_alloc() needs to be checked for failure
 409	 * anyway.  Silently give up if allocation fails.  The caller can
 410	 * treat failures from idr_alloc() as if idr_alloc() were called
 411	 * with @gfp_mask which should be enough.
 412	 */
 413	while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
 414		struct idr_layer *new;
 415
 416		preempt_enable();
 417		new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
 418		preempt_disable();
 419		if (!new)
 420			break;
 421
 422		/* link the new one to per-cpu preload list */
 423		new->ary[0] = __this_cpu_read(idr_preload_head);
 424		__this_cpu_write(idr_preload_head, new);
 425		__this_cpu_inc(idr_preload_cnt);
 426	}
 427}
 428EXPORT_SYMBOL(idr_preload);
 429
 430/**
 431 * idr_alloc - allocate new idr entry
 432 * @idr: the (initialized) idr
 433 * @ptr: pointer to be associated with the new id
 434 * @start: the minimum id (inclusive)
 435 * @end: the maximum id (exclusive, <= 0 for max)
 436 * @gfp_mask: memory allocation flags
 437 *
 438 * Allocate an id in [start, end) and associate it with @ptr.  If no ID is
 439 * available in the specified range, returns -ENOSPC.  On memory allocation
 440 * failure, returns -ENOMEM.
 441 *
 442 * Note that @end is treated as max when <= 0.  This is to always allow
 443 * using @start + N as @end as long as N is inside integer range.
 444 *
 445 * The user is responsible for exclusively synchronizing all operations
 446 * which may modify @idr.  However, read-only accesses such as idr_find()
 447 * or iteration can be performed under RCU read lock provided the user
 448 * destroys @ptr in RCU-safe way after removal from idr.
 449 */
 450int idr_alloc(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask)
 451{
 452	int max = end > 0 ? end - 1 : INT_MAX;	/* inclusive upper limit */
 453	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 454	int id;
 455
 456	might_sleep_if(gfpflags_allow_blocking(gfp_mask));
 457
 458	/* sanity checks */
 459	if (WARN_ON_ONCE(start < 0))
 460		return -EINVAL;
 461	if (unlikely(max < start))
 462		return -ENOSPC;
 463
 464	/* allocate id */
 465	id = idr_get_empty_slot(idr, start, pa, gfp_mask, NULL);
 466	if (unlikely(id < 0))
 467		return id;
 468	if (unlikely(id > max))
 469		return -ENOSPC;
 470
 471	idr_fill_slot(idr, ptr, id, pa);
 472	return id;
 473}
 474EXPORT_SYMBOL_GPL(idr_alloc);
 475
 476/**
 477 * idr_alloc_cyclic - allocate new idr entry in a cyclical fashion
 478 * @idr: the (initialized) idr
 479 * @ptr: pointer to be associated with the new id
 480 * @start: the minimum id (inclusive)
 481 * @end: the maximum id (exclusive, <= 0 for max)
 482 * @gfp_mask: memory allocation flags
 483 *
 484 * Essentially the same as idr_alloc, but prefers to allocate progressively
 485 * higher ids if it can. If the "cur" counter wraps, then it will start again
 486 * at the "start" end of the range and allocate one that has already been used.
 487 */
 488int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end,
 489			gfp_t gfp_mask)
 490{
 491	int id;
 492
 493	id = idr_alloc(idr, ptr, max(start, idr->cur), end, gfp_mask);
 494	if (id == -ENOSPC)
 495		id = idr_alloc(idr, ptr, start, end, gfp_mask);
 496
 497	if (likely(id >= 0))
 498		idr->cur = id + 1;
 499	return id;
 500}
 501EXPORT_SYMBOL(idr_alloc_cyclic);
 502
 503static void idr_remove_warning(int id)
 504{
 505	WARN(1, "idr_remove called for id=%d which is not allocated.\n", id);
 506}
 507
 508static void sub_remove(struct idr *idp, int shift, int id)
 509{
 510	struct idr_layer *p = idp->top;
 511	struct idr_layer **pa[MAX_IDR_LEVEL + 1];
 512	struct idr_layer ***paa = &pa[0];
 513	struct idr_layer *to_free;
 514	int n;
 515
 516	*paa = NULL;
 517	*++paa = &idp->top;
 518
 519	while ((shift > 0) && p) {
 520		n = (id >> shift) & IDR_MASK;
 521		__clear_bit(n, p->bitmap);
 522		*++paa = &p->ary[n];
 523		p = p->ary[n];
 524		shift -= IDR_BITS;
 525	}
 526	n = id & IDR_MASK;
 527	if (likely(p != NULL && test_bit(n, p->bitmap))) {
 528		__clear_bit(n, p->bitmap);
 529		RCU_INIT_POINTER(p->ary[n], NULL);
 530		to_free = NULL;
 531		while(*paa && ! --((**paa)->count)){
 532			if (to_free)
 533				free_layer(idp, to_free);
 534			to_free = **paa;
 535			**paa-- = NULL;
 536		}
 537		if (!*paa)
 538			idp->layers = 0;
 539		if (to_free)
 540			free_layer(idp, to_free);
 541	} else
 542		idr_remove_warning(id);
 543}
 544
 545/**
 546 * idr_remove - remove the given id and free its slot
 547 * @idp: idr handle
 548 * @id: unique key
 549 */
 550void idr_remove(struct idr *idp, int id)
 551{
 552	struct idr_layer *p;
 553	struct idr_layer *to_free;
 554
 555	if (id < 0)
 556		return;
 557
 558	if (id > idr_max(idp->layers)) {
 559		idr_remove_warning(id);
 560		return;
 561	}
 562
 563	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
 564	if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
 565	    idp->top->ary[0]) {
 566		/*
 567		 * Single child at leftmost slot: we can shrink the tree.
 568		 * This level is not needed anymore since when layers are
 569		 * inserted, they are inserted at the top of the existing
 570		 * tree.
 571		 */
 572		to_free = idp->top;
 573		p = idp->top->ary[0];
 574		rcu_assign_pointer(idp->top, p);
 575		--idp->layers;
 576		to_free->count = 0;
 577		bitmap_clear(to_free->bitmap, 0, IDR_SIZE);
 578		free_layer(idp, to_free);
 579	}
 
 
 
 
 
 
 
 
 
 
 580}
 581EXPORT_SYMBOL(idr_remove);
 582
 583static void __idr_remove_all(struct idr *idp)
 584{
 585	int n, id, max;
 586	int bt_mask;
 587	struct idr_layer *p;
 588	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 589	struct idr_layer **paa = &pa[0];
 590
 591	n = idp->layers * IDR_BITS;
 592	*paa = idp->top;
 593	RCU_INIT_POINTER(idp->top, NULL);
 594	max = idr_max(idp->layers);
 595
 596	id = 0;
 597	while (id >= 0 && id <= max) {
 598		p = *paa;
 599		while (n > IDR_BITS && p) {
 600			n -= IDR_BITS;
 
 601			p = p->ary[(id >> n) & IDR_MASK];
 602			*++paa = p;
 603		}
 604
 605		bt_mask = id;
 606		id += 1 << n;
 607		/* Get the highest bit that the above add changed from 0->1. */
 608		while (n < fls(id ^ bt_mask)) {
 609			if (*paa)
 610				free_layer(idp, *paa);
 611			n += IDR_BITS;
 612			--paa;
 613		}
 614	}
 615	idp->layers = 0;
 616}
 617
 618/**
 619 * idr_destroy - release all cached layers within an idr tree
 620 * @idp: idr handle
 621 *
 622 * Free all id mappings and all idp_layers.  After this function, @idp is
 623 * completely unused and can be freed / recycled.  The caller is
 624 * responsible for ensuring that no one else accesses @idp during or after
 625 * idr_destroy().
 626 *
 627 * A typical clean-up sequence for objects stored in an idr tree will use
 628 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
 629 * free up the id mappings and cached idr_layers.
 630 */
 631void idr_destroy(struct idr *idp)
 632{
 633	__idr_remove_all(idp);
 634
 635	while (idp->id_free_cnt) {
 636		struct idr_layer *p = get_from_free_list(idp);
 637		kmem_cache_free(idr_layer_cache, p);
 638	}
 639}
 640EXPORT_SYMBOL(idr_destroy);
 641
 642void *idr_find_slowpath(struct idr *idp, int id)
 643{
 644	int n;
 645	struct idr_layer *p;
 646
 647	if (id < 0)
 648		return NULL;
 649
 650	p = rcu_dereference_raw(idp->top);
 651	if (!p)
 652		return NULL;
 653	n = (p->layer+1) * IDR_BITS;
 654
 655	if (id > idr_max(p->layer + 1))
 656		return NULL;
 657	BUG_ON(n == 0);
 658
 659	while (n > 0 && p) {
 660		n -= IDR_BITS;
 661		BUG_ON(n != p->layer*IDR_BITS);
 662		p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 663	}
 664	return((void *)p);
 665}
 666EXPORT_SYMBOL(idr_find_slowpath);
 667
 668/**
 669 * idr_for_each - iterate through all stored pointers
 670 * @idp: idr handle
 671 * @fn: function to be called for each pointer
 672 * @data: data passed back to callback function
 673 *
 674 * Iterate over the pointers registered with the given idr.  The
 675 * callback function will be called for each pointer currently
 676 * registered, passing the id, the pointer and the data pointer passed
 677 * to this function.  It is not safe to modify the idr tree while in
 678 * the callback, so functions such as idr_get_new and idr_remove are
 679 * not allowed.
 680 *
 681 * We check the return of @fn each time. If it returns anything other
 682 * than %0, we break out and return that value.
 683 *
 684 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
 685 */
 686int idr_for_each(struct idr *idp,
 687		 int (*fn)(int id, void *p, void *data), void *data)
 688{
 689	int n, id, max, error = 0;
 690	struct idr_layer *p;
 691	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 692	struct idr_layer **paa = &pa[0];
 693
 694	n = idp->layers * IDR_BITS;
 695	*paa = rcu_dereference_raw(idp->top);
 696	max = idr_max(idp->layers);
 697
 698	id = 0;
 699	while (id >= 0 && id <= max) {
 700		p = *paa;
 701		while (n > 0 && p) {
 702			n -= IDR_BITS;
 
 703			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 704			*++paa = p;
 705		}
 706
 707		if (p) {
 708			error = fn(id, (void *)p, data);
 709			if (error)
 710				break;
 711		}
 712
 713		id += 1 << n;
 714		while (n < fls(id)) {
 715			n += IDR_BITS;
 716			--paa;
 717		}
 718	}
 719
 720	return error;
 721}
 722EXPORT_SYMBOL(idr_for_each);
 723
 724/**
 725 * idr_get_next - lookup next object of id to given id.
 726 * @idp: idr handle
 727 * @nextidp:  pointer to lookup key
 728 *
 729 * Returns pointer to registered object with id, which is next number to
 730 * given id. After being looked up, *@nextidp will be updated for the next
 731 * iteration.
 732 *
 733 * This function can be called under rcu_read_lock(), given that the leaf
 734 * pointers lifetimes are correctly managed.
 735 */
 736void *idr_get_next(struct idr *idp, int *nextidp)
 737{
 738	struct idr_layer *p, *pa[MAX_IDR_LEVEL + 1];
 739	struct idr_layer **paa = &pa[0];
 740	int id = *nextidp;
 741	int n, max;
 742
 743	/* find first ent */
 744	p = *paa = rcu_dereference_raw(idp->top);
 745	if (!p)
 746		return NULL;
 747	n = (p->layer + 1) * IDR_BITS;
 748	max = idr_max(p->layer + 1);
 749
 750	while (id >= 0 && id <= max) {
 751		p = *paa;
 752		while (n > 0 && p) {
 753			n -= IDR_BITS;
 
 754			p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
 755			*++paa = p;
 756		}
 757
 758		if (p) {
 759			*nextidp = id;
 760			return p;
 761		}
 762
 763		/*
 764		 * Proceed to the next layer at the current level.  Unlike
 765		 * idr_for_each(), @id isn't guaranteed to be aligned to
 766		 * layer boundary at this point and adding 1 << n may
 767		 * incorrectly skip IDs.  Make sure we jump to the
 768		 * beginning of the next layer using round_up().
 769		 */
 770		id = round_up(id + 1, 1 << n);
 771		while (n < fls(id)) {
 772			n += IDR_BITS;
 773			--paa;
 774		}
 775	}
 776	return NULL;
 777}
 778EXPORT_SYMBOL(idr_get_next);
 779
 780
 781/**
 782 * idr_replace - replace pointer for given id
 783 * @idp: idr handle
 784 * @ptr: pointer you want associated with the id
 785 * @id: lookup key
 786 *
 787 * Replace the pointer registered with an id and return the old value.
 788 * A %-ENOENT return indicates that @id was not found.
 789 * A %-EINVAL return indicates that @id was not within valid constraints.
 790 *
 791 * The caller must serialize with writers.
 792 */
 793void *idr_replace(struct idr *idp, void *ptr, int id)
 794{
 795	int n;
 796	struct idr_layer *p, *old_p;
 797
 798	if (id < 0)
 799		return ERR_PTR(-EINVAL);
 800
 801	p = idp->top;
 802	if (!p)
 803		return ERR_PTR(-ENOENT);
 
 
 804
 805	if (id > idr_max(p->layer + 1))
 806		return ERR_PTR(-ENOENT);
 807
 808	n = p->layer * IDR_BITS;
 809	while ((n > 0) && p) {
 810		p = p->ary[(id >> n) & IDR_MASK];
 811		n -= IDR_BITS;
 812	}
 813
 814	n = id & IDR_MASK;
 815	if (unlikely(p == NULL || !test_bit(n, p->bitmap)))
 816		return ERR_PTR(-ENOENT);
 817
 818	old_p = p->ary[n];
 819	rcu_assign_pointer(p->ary[n], ptr);
 820
 821	return old_p;
 822}
 823EXPORT_SYMBOL(idr_replace);
 824
 825void __init idr_init_cache(void)
 826{
 827	idr_layer_cache = kmem_cache_create("idr_layer_cache",
 828				sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
 829}
 830
 831/**
 832 * idr_init - initialize idr handle
 833 * @idp:	idr handle
 834 *
 835 * This function is use to set up the handle (@idp) that you will pass
 836 * to the rest of the functions.
 837 */
 838void idr_init(struct idr *idp)
 839{
 840	memset(idp, 0, sizeof(struct idr));
 841	spin_lock_init(&idp->lock);
 842}
 843EXPORT_SYMBOL(idr_init);
 844
 845static int idr_has_entry(int id, void *p, void *data)
 846{
 847	return 1;
 848}
 849
 850bool idr_is_empty(struct idr *idp)
 851{
 852	return !idr_for_each(idp, idr_has_entry, NULL);
 853}
 854EXPORT_SYMBOL(idr_is_empty);
 855
 856/**
 857 * DOC: IDA description
 858 * IDA - IDR based ID allocator
 859 *
 860 * This is id allocator without id -> pointer translation.  Memory
 861 * usage is much lower than full blown idr because each id only
 862 * occupies a bit.  ida uses a custom leaf node which contains
 863 * IDA_BITMAP_BITS slots.
 864 *
 865 * 2007-04-25  written by Tejun Heo <htejun@gmail.com>
 866 */
 867
 868static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
 869{
 870	unsigned long flags;
 871
 872	if (!ida->free_bitmap) {
 873		spin_lock_irqsave(&ida->idr.lock, flags);
 874		if (!ida->free_bitmap) {
 875			ida->free_bitmap = bitmap;
 876			bitmap = NULL;
 877		}
 878		spin_unlock_irqrestore(&ida->idr.lock, flags);
 879	}
 880
 881	kfree(bitmap);
 882}
 883
 884/**
 885 * ida_pre_get - reserve resources for ida allocation
 886 * @ida:	ida handle
 887 * @gfp_mask:	memory allocation flag
 888 *
 889 * This function should be called prior to locking and calling the
 890 * following function.  It preallocates enough memory to satisfy the
 891 * worst possible allocation.
 892 *
 893 * If the system is REALLY out of memory this function returns %0,
 894 * otherwise %1.
 895 */
 896int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
 897{
 898	/* allocate idr_layers */
 899	if (!__idr_pre_get(&ida->idr, gfp_mask))
 900		return 0;
 901
 902	/* allocate free_bitmap */
 903	if (!ida->free_bitmap) {
 904		struct ida_bitmap *bitmap;
 905
 906		bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
 907		if (!bitmap)
 908			return 0;
 909
 910		free_bitmap(ida, bitmap);
 911	}
 912
 913	return 1;
 914}
 915EXPORT_SYMBOL(ida_pre_get);
 916
 917/**
 918 * ida_get_new_above - allocate new ID above or equal to a start id
 919 * @ida:	ida handle
 920 * @starting_id: id to start search at
 921 * @p_id:	pointer to the allocated handle
 922 *
 923 * Allocate new ID above or equal to @starting_id.  It should be called
 924 * with any required locks.
 925 *
 926 * If memory is required, it will return %-EAGAIN, you should unlock
 927 * and go back to the ida_pre_get() call.  If the ida is full, it will
 928 * return %-ENOSPC.
 929 *
 930 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
 931 */
 932int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
 933{
 934	struct idr_layer *pa[MAX_IDR_LEVEL + 1];
 935	struct ida_bitmap *bitmap;
 936	unsigned long flags;
 937	int idr_id = starting_id / IDA_BITMAP_BITS;
 938	int offset = starting_id % IDA_BITMAP_BITS;
 939	int t, id;
 940
 941 restart:
 942	/* get vacant slot */
 943	t = idr_get_empty_slot(&ida->idr, idr_id, pa, 0, &ida->idr);
 944	if (t < 0)
 945		return t == -ENOMEM ? -EAGAIN : t;
 946
 947	if (t * IDA_BITMAP_BITS >= MAX_IDR_BIT)
 948		return -ENOSPC;
 949
 950	if (t != idr_id)
 951		offset = 0;
 952	idr_id = t;
 953
 954	/* if bitmap isn't there, create a new one */
 955	bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
 956	if (!bitmap) {
 957		spin_lock_irqsave(&ida->idr.lock, flags);
 958		bitmap = ida->free_bitmap;
 959		ida->free_bitmap = NULL;
 960		spin_unlock_irqrestore(&ida->idr.lock, flags);
 961
 962		if (!bitmap)
 963			return -EAGAIN;
 964
 965		memset(bitmap, 0, sizeof(struct ida_bitmap));
 966		rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
 967				(void *)bitmap);
 968		pa[0]->count++;
 969	}
 970
 971	/* lookup for empty slot */
 972	t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
 973	if (t == IDA_BITMAP_BITS) {
 974		/* no empty slot after offset, continue to the next chunk */
 975		idr_id++;
 976		offset = 0;
 977		goto restart;
 978	}
 979
 980	id = idr_id * IDA_BITMAP_BITS + t;
 981	if (id >= MAX_IDR_BIT)
 982		return -ENOSPC;
 983
 984	__set_bit(t, bitmap->bitmap);
 985	if (++bitmap->nr_busy == IDA_BITMAP_BITS)
 986		idr_mark_full(pa, idr_id);
 987
 988	*p_id = id;
 989
 990	/* Each leaf node can handle nearly a thousand slots and the
 991	 * whole idea of ida is to have small memory foot print.
 992	 * Throw away extra resources one by one after each successful
 993	 * allocation.
 994	 */
 995	if (ida->idr.id_free_cnt || ida->free_bitmap) {
 996		struct idr_layer *p = get_from_free_list(&ida->idr);
 997		if (p)
 998			kmem_cache_free(idr_layer_cache, p);
 999	}
1000
1001	return 0;
1002}
1003EXPORT_SYMBOL(ida_get_new_above);
1004
1005/**
1006 * ida_remove - remove the given ID
1007 * @ida:	ida handle
1008 * @id:		ID to free
1009 */
1010void ida_remove(struct ida *ida, int id)
1011{
1012	struct idr_layer *p = ida->idr.top;
1013	int shift = (ida->idr.layers - 1) * IDR_BITS;
1014	int idr_id = id / IDA_BITMAP_BITS;
1015	int offset = id % IDA_BITMAP_BITS;
1016	int n;
1017	struct ida_bitmap *bitmap;
1018
1019	if (idr_id > idr_max(ida->idr.layers))
1020		goto err;
1021
1022	/* clear full bits while looking up the leaf idr_layer */
1023	while ((shift > 0) && p) {
1024		n = (idr_id >> shift) & IDR_MASK;
1025		__clear_bit(n, p->bitmap);
1026		p = p->ary[n];
1027		shift -= IDR_BITS;
1028	}
1029
1030	if (p == NULL)
1031		goto err;
1032
1033	n = idr_id & IDR_MASK;
1034	__clear_bit(n, p->bitmap);
1035
1036	bitmap = (void *)p->ary[n];
1037	if (!bitmap || !test_bit(offset, bitmap->bitmap))
1038		goto err;
1039
1040	/* update bitmap and remove it if empty */
1041	__clear_bit(offset, bitmap->bitmap);
1042	if (--bitmap->nr_busy == 0) {
1043		__set_bit(n, p->bitmap);	/* to please idr_remove() */
1044		idr_remove(&ida->idr, idr_id);
1045		free_bitmap(ida, bitmap);
1046	}
1047
1048	return;
1049
1050 err:
1051	WARN(1, "ida_remove called for id=%d which is not allocated.\n", id);
1052}
1053EXPORT_SYMBOL(ida_remove);
1054
1055/**
1056 * ida_destroy - release all cached layers within an ida tree
1057 * @ida:		ida handle
1058 */
1059void ida_destroy(struct ida *ida)
1060{
1061	idr_destroy(&ida->idr);
1062	kfree(ida->free_bitmap);
1063}
1064EXPORT_SYMBOL(ida_destroy);
1065
1066/**
1067 * ida_simple_get - get a new id.
1068 * @ida: the (initialized) ida.
1069 * @start: the minimum id (inclusive, < 0x8000000)
1070 * @end: the maximum id (exclusive, < 0x8000000 or 0)
1071 * @gfp_mask: memory allocation flags
1072 *
1073 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
1074 * On memory allocation failure, returns -ENOMEM.
1075 *
1076 * Use ida_simple_remove() to get rid of an id.
1077 */
1078int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
1079		   gfp_t gfp_mask)
1080{
1081	int ret, id;
1082	unsigned int max;
1083	unsigned long flags;
1084
1085	BUG_ON((int)start < 0);
1086	BUG_ON((int)end < 0);
1087
1088	if (end == 0)
1089		max = 0x80000000;
1090	else {
1091		BUG_ON(end < start);
1092		max = end - 1;
1093	}
1094
1095again:
1096	if (!ida_pre_get(ida, gfp_mask))
1097		return -ENOMEM;
1098
1099	spin_lock_irqsave(&simple_ida_lock, flags);
1100	ret = ida_get_new_above(ida, start, &id);
1101	if (!ret) {
1102		if (id > max) {
1103			ida_remove(ida, id);
1104			ret = -ENOSPC;
1105		} else {
1106			ret = id;
1107		}
1108	}
1109	spin_unlock_irqrestore(&simple_ida_lock, flags);
1110
1111	if (unlikely(ret == -EAGAIN))
1112		goto again;
1113
1114	return ret;
1115}
1116EXPORT_SYMBOL(ida_simple_get);
1117
1118/**
1119 * ida_simple_remove - remove an allocated id.
1120 * @ida: the (initialized) ida.
1121 * @id: the id returned by ida_simple_get.
1122 */
1123void ida_simple_remove(struct ida *ida, unsigned int id)
1124{
1125	unsigned long flags;
1126
1127	BUG_ON((int)id < 0);
1128	spin_lock_irqsave(&simple_ida_lock, flags);
1129	ida_remove(ida, id);
1130	spin_unlock_irqrestore(&simple_ida_lock, flags);
1131}
1132EXPORT_SYMBOL(ida_simple_remove);
1133
1134/**
1135 * ida_init - initialize ida handle
1136 * @ida:	ida handle
1137 *
1138 * This function is use to set up the handle (@ida) that you will pass
1139 * to the rest of the functions.
1140 */
1141void ida_init(struct ida *ida)
1142{
1143	memset(ida, 0, sizeof(struct ida));
1144	idr_init(&ida->idr);
1145
1146}
1147EXPORT_SYMBOL(ida_init);