Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * Copyright (C) 2001 Momchil Velikov
   3 * Portions Copyright (C) 2001 Christoph Hellwig
   4 * Copyright (C) 2005 SGI, Christoph Lameter
   5 * Copyright (C) 2006 Nick Piggin
   6 * Copyright (C) 2012 Konstantin Khlebnikov
   7 * Copyright (C) 2016 Intel, Matthew Wilcox
   8 * Copyright (C) 2016 Intel, Ross Zwisler
   9 *
  10 * This program is free software; you can redistribute it and/or
  11 * modify it under the terms of the GNU General Public License as
  12 * published by the Free Software Foundation; either version 2, or (at
  13 * your option) any later version.
  14 *
  15 * This program is distributed in the hope that it will be useful, but
  16 * WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  18 * General Public License for more details.
  19 *
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  23 */
  24
 
 
 
  25#include <linux/cpu.h>
  26#include <linux/errno.h>
 
 
  27#include <linux/init.h>
  28#include <linux/kernel.h>
  29#include <linux/export.h>
  30#include <linux/radix-tree.h>
  31#include <linux/percpu.h>
 
 
 
  32#include <linux/slab.h>
  33#include <linux/kmemleak.h>
  34#include <linux/cpu.h>
  35#include <linux/string.h>
  36#include <linux/bitops.h>
  37#include <linux/rcupdate.h>
  38#include <linux/preempt.h>		/* in_interrupt() */
  39
  40
  41/* Number of nodes in fully populated tree of given height */
  42static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
  43
  44/*
  45 * Radix tree node cache.
  46 */
  47static struct kmem_cache *radix_tree_node_cachep;
  48
  49/*
  50 * The radix tree is variable-height, so an insert operation not only has
  51 * to build the branch to its corresponding item, it also has to build the
  52 * branch to existing items if the size has to be increased (by
  53 * radix_tree_extend).
  54 *
  55 * The worst case is a zero height tree with just a single item at index 0,
  56 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  57 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
  58 * Hence:
  59 */
  60#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
  61
  62/*
 
 
 
 
 
 
 
 
 
  63 * Per-cpu pool of preloaded nodes
  64 */
  65struct radix_tree_preload {
  66	unsigned nr;
  67	/* nodes->private_data points to next preallocated node */
  68	struct radix_tree_node *nodes;
  69};
  70static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
  71
  72static inline struct radix_tree_node *entry_to_node(void *ptr)
  73{
  74	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
  75}
  76
  77static inline void *node_to_entry(void *ptr)
  78{
  79	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
  80}
  81
  82#define RADIX_TREE_RETRY	node_to_entry(NULL)
  83
  84#ifdef CONFIG_RADIX_TREE_MULTIORDER
  85/* Sibling slots point directly to another slot in the same node */
  86static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
  87{
  88	void **ptr = node;
  89	return (parent->slots <= ptr) &&
  90			(ptr < parent->slots + RADIX_TREE_MAP_SIZE);
  91}
  92#else
  93static inline bool is_sibling_entry(struct radix_tree_node *parent, void *node)
  94{
  95	return false;
  96}
  97#endif
  98
  99static inline unsigned long get_slot_offset(struct radix_tree_node *parent,
 100						 void **slot)
 101{
 102	return slot - parent->slots;
 103}
 104
 105static unsigned int radix_tree_descend(struct radix_tree_node *parent,
 106			struct radix_tree_node **nodep, unsigned long index)
 107{
 108	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
 109	void **entry = rcu_dereference_raw(parent->slots[offset]);
 110
 111#ifdef CONFIG_RADIX_TREE_MULTIORDER
 112	if (radix_tree_is_internal_node(entry)) {
 113		if (is_sibling_entry(parent, entry)) {
 114			void **sibentry = (void **) entry_to_node(entry);
 115			offset = get_slot_offset(parent, sibentry);
 116			entry = rcu_dereference_raw(*sibentry);
 117		}
 118	}
 119#endif
 120
 121	*nodep = (void *)entry;
 122	return offset;
 123}
 124
 125static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
 126{
 127	return root->gfp_mask & __GFP_BITS_MASK;
 128}
 129
 130static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
 131		int offset)
 132{
 133	__set_bit(offset, node->tags[tag]);
 134}
 135
 136static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
 137		int offset)
 138{
 139	__clear_bit(offset, node->tags[tag]);
 140}
 141
 142static inline int tag_get(struct radix_tree_node *node, unsigned int tag,
 143		int offset)
 144{
 145	return test_bit(offset, node->tags[tag]);
 146}
 147
 148static inline void root_tag_set(struct radix_tree_root *root, unsigned int tag)
 149{
 150	root->gfp_mask |= (__force gfp_t)(1 << (tag + __GFP_BITS_SHIFT));
 151}
 152
 153static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
 154{
 155	root->gfp_mask &= (__force gfp_t)~(1 << (tag + __GFP_BITS_SHIFT));
 156}
 157
 158static inline void root_tag_clear_all(struct radix_tree_root *root)
 159{
 160	root->gfp_mask &= __GFP_BITS_MASK;
 
 
 
 
 
 161}
 162
 163static inline int root_tag_get(struct radix_tree_root *root, unsigned int tag)
 164{
 165	return (__force int)root->gfp_mask & (1 << (tag + __GFP_BITS_SHIFT));
 166}
 167
 168static inline unsigned root_tags_get(struct radix_tree_root *root)
 169{
 170	return (__force unsigned)root->gfp_mask >> __GFP_BITS_SHIFT;
 171}
 172
 173/*
 174 * Returns 1 if any slot in the node has this tag set.
 175 * Otherwise returns 0.
 176 */
 177static inline int any_tag_set(struct radix_tree_node *node, unsigned int tag)
 
 178{
 179	unsigned idx;
 180	for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
 181		if (node->tags[tag][idx])
 182			return 1;
 183	}
 184	return 0;
 185}
 186
 
 
 
 
 
 187/**
 188 * radix_tree_find_next_bit - find the next set bit in a memory region
 189 *
 190 * @addr: The address to base the search on
 191 * @size: The bitmap size in bits
 192 * @offset: The bitnumber to start searching at
 193 *
 194 * Unrollable variant of find_next_bit() for constant size arrays.
 195 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
 196 * Returns next bit offset, or size if nothing found.
 197 */
 198static __always_inline unsigned long
 199radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
 200			 unsigned long offset)
 201{
 202	const unsigned long *addr = node->tags[tag];
 203
 204	if (offset < RADIX_TREE_MAP_SIZE) {
 205		unsigned long tmp;
 206
 207		addr += offset / BITS_PER_LONG;
 208		tmp = *addr >> (offset % BITS_PER_LONG);
 209		if (tmp)
 210			return __ffs(tmp) + offset;
 211		offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
 212		while (offset < RADIX_TREE_MAP_SIZE) {
 213			tmp = *++addr;
 214			if (tmp)
 215				return __ffs(tmp) + offset;
 216			offset += BITS_PER_LONG;
 217		}
 218	}
 219	return RADIX_TREE_MAP_SIZE;
 220}
 221
 222static unsigned int iter_offset(const struct radix_tree_iter *iter)
 223{
 224	return (iter->index >> iter_shift(iter)) & RADIX_TREE_MAP_MASK;
 225}
 226
 227/*
 228 * The maximum index which can be stored in a radix tree
 229 */
 230static inline unsigned long shift_maxindex(unsigned int shift)
 231{
 232	return (RADIX_TREE_MAP_SIZE << shift) - 1;
 233}
 234
 235static inline unsigned long node_maxindex(struct radix_tree_node *node)
 236{
 237	return shift_maxindex(node->shift);
 238}
 239
 240#ifndef __KERNEL__
 241static void dump_node(struct radix_tree_node *node, unsigned long index)
 242{
 243	unsigned long i;
 244
 245	pr_debug("radix node: %p offset %d indices %lu-%lu parent %p tags %lx %lx %lx shift %d count %d exceptional %d\n",
 246		node, node->offset, index, index | node_maxindex(node),
 247		node->parent,
 248		node->tags[0][0], node->tags[1][0], node->tags[2][0],
 249		node->shift, node->count, node->exceptional);
 250
 251	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
 252		unsigned long first = index | (i << node->shift);
 253		unsigned long last = first | ((1UL << node->shift) - 1);
 254		void *entry = node->slots[i];
 255		if (!entry)
 256			continue;
 257		if (entry == RADIX_TREE_RETRY) {
 258			pr_debug("radix retry offset %ld indices %lu-%lu parent %p\n",
 259					i, first, last, node);
 260		} else if (!radix_tree_is_internal_node(entry)) {
 261			pr_debug("radix entry %p offset %ld indices %lu-%lu parent %p\n",
 262					entry, i, first, last, node);
 263		} else if (is_sibling_entry(node, entry)) {
 264			pr_debug("radix sblng %p offset %ld indices %lu-%lu parent %p val %p\n",
 265					entry, i, first, last, node,
 266					*(void **)entry_to_node(entry));
 267		} else {
 268			dump_node(entry_to_node(entry), first);
 269		}
 270	}
 271}
 272
 273/* For debug */
 274static void radix_tree_dump(struct radix_tree_root *root)
 275{
 276	pr_debug("radix root: %p rnode %p tags %x\n",
 277			root, root->rnode,
 278			root->gfp_mask >> __GFP_BITS_SHIFT);
 279	if (!radix_tree_is_internal_node(root->rnode))
 280		return;
 281	dump_node(entry_to_node(root->rnode), 0);
 282}
 283#endif
 284
 285/*
 286 * This assumes that the caller has performed appropriate preallocation, and
 287 * that the caller has pinned this thread of control to the current CPU.
 288 */
 289static struct radix_tree_node *
 290radix_tree_node_alloc(struct radix_tree_root *root,
 291			struct radix_tree_node *parent,
 292			unsigned int shift, unsigned int offset,
 293			unsigned int count, unsigned int exceptional)
 294{
 295	struct radix_tree_node *ret = NULL;
 296	gfp_t gfp_mask = root_gfp_mask(root);
 297
 298	/*
 299	 * Preload code isn't irq safe and it doesn't make sense to use
 300	 * preloading during an interrupt anyway as all the allocations have
 301	 * to be atomic. So just do normal allocation when in interrupt.
 302	 */
 303	if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
 304		struct radix_tree_preload *rtp;
 305
 306		/*
 307		 * Even if the caller has preloaded, try to allocate from the
 308		 * cache first for the new node to get accounted to the memory
 309		 * cgroup.
 310		 */
 311		ret = kmem_cache_alloc(radix_tree_node_cachep,
 312				       gfp_mask | __GFP_NOWARN);
 313		if (ret)
 314			goto out;
 315
 316		/*
 317		 * Provided the caller has preloaded here, we will always
 318		 * succeed in getting a node here (and never reach
 319		 * kmem_cache_alloc)
 320		 */
 321		rtp = this_cpu_ptr(&radix_tree_preloads);
 322		if (rtp->nr) {
 323			ret = rtp->nodes;
 324			rtp->nodes = ret->private_data;
 325			ret->private_data = NULL;
 326			rtp->nr--;
 327		}
 328		/*
 329		 * Update the allocation stack trace as this is more useful
 330		 * for debugging.
 331		 */
 332		kmemleak_update_trace(ret);
 333		goto out;
 334	}
 335	ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 336out:
 337	BUG_ON(radix_tree_is_internal_node(ret));
 338	if (ret) {
 339		ret->parent = parent;
 340		ret->shift = shift;
 341		ret->offset = offset;
 342		ret->count = count;
 343		ret->exceptional = exceptional;
 
 
 344	}
 345	return ret;
 346}
 347
 348static void radix_tree_node_rcu_free(struct rcu_head *head)
 349{
 350	struct radix_tree_node *node =
 351			container_of(head, struct radix_tree_node, rcu_head);
 352
 353	/*
 354	 * Must only free zeroed nodes into the slab.  We can be left with
 355	 * non-NULL entries by radix_tree_free_nodes, so clear the entries
 356	 * and tags here.
 357	 */
 358	memset(node->slots, 0, sizeof(node->slots));
 359	memset(node->tags, 0, sizeof(node->tags));
 360	INIT_LIST_HEAD(&node->private_list);
 361
 362	kmem_cache_free(radix_tree_node_cachep, node);
 363}
 364
 365static inline void
 366radix_tree_node_free(struct radix_tree_node *node)
 367{
 368	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 369}
 370
 371/*
 372 * Load up this CPU's radix_tree_node buffer with sufficient objects to
 373 * ensure that the addition of a single element in the tree cannot fail.  On
 374 * success, return zero, with preemption disabled.  On error, return -ENOMEM
 375 * with preemption not disabled.
 376 *
 377 * To make use of this facility, the radix tree must be initialised without
 378 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
 379 */
 380static int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
 381{
 382	struct radix_tree_preload *rtp;
 383	struct radix_tree_node *node;
 384	int ret = -ENOMEM;
 385
 386	/*
 387	 * Nodes preloaded by one cgroup can be be used by another cgroup, so
 388	 * they should never be accounted to any particular memory cgroup.
 389	 */
 390	gfp_mask &= ~__GFP_ACCOUNT;
 391
 392	preempt_disable();
 393	rtp = this_cpu_ptr(&radix_tree_preloads);
 394	while (rtp->nr < nr) {
 395		preempt_enable();
 396		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 397		if (node == NULL)
 398			goto out;
 399		preempt_disable();
 400		rtp = this_cpu_ptr(&radix_tree_preloads);
 401		if (rtp->nr < nr) {
 402			node->private_data = rtp->nodes;
 403			rtp->nodes = node;
 404			rtp->nr++;
 405		} else {
 406			kmem_cache_free(radix_tree_node_cachep, node);
 407		}
 408	}
 409	ret = 0;
 410out:
 411	return ret;
 412}
 413
 414/*
 415 * Load up this CPU's radix_tree_node buffer with sufficient objects to
 416 * ensure that the addition of a single element in the tree cannot fail.  On
 417 * success, return zero, with preemption disabled.  On error, return -ENOMEM
 418 * with preemption not disabled.
 419 *
 420 * To make use of this facility, the radix tree must be initialised without
 421 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
 422 */
 423int radix_tree_preload(gfp_t gfp_mask)
 424{
 425	/* Warn on non-sensical use... */
 426	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
 427	return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
 428}
 429EXPORT_SYMBOL(radix_tree_preload);
 430
 431/*
 432 * The same as above function, except we don't guarantee preloading happens.
 433 * We do it, if we decide it helps. On success, return zero with preemption
 434 * disabled. On error, return -ENOMEM with preemption not disabled.
 435 */
 436int radix_tree_maybe_preload(gfp_t gfp_mask)
 437{
 438	if (gfpflags_allow_blocking(gfp_mask))
 439		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
 440	/* Preloading doesn't help anything with this gfp mask, skip it */
 441	preempt_disable();
 442	return 0;
 443}
 444EXPORT_SYMBOL(radix_tree_maybe_preload);
 445
 446#ifdef CONFIG_RADIX_TREE_MULTIORDER
 447/*
 448 * Preload with enough objects to ensure that we can split a single entry
 449 * of order @old_order into many entries of size @new_order
 450 */
 451int radix_tree_split_preload(unsigned int old_order, unsigned int new_order,
 452							gfp_t gfp_mask)
 453{
 454	unsigned top = 1 << (old_order % RADIX_TREE_MAP_SHIFT);
 455	unsigned layers = (old_order / RADIX_TREE_MAP_SHIFT) -
 456				(new_order / RADIX_TREE_MAP_SHIFT);
 457	unsigned nr = 0;
 458
 459	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
 460	BUG_ON(new_order >= old_order);
 461
 462	while (layers--)
 463		nr = nr * RADIX_TREE_MAP_SIZE + 1;
 464	return __radix_tree_preload(gfp_mask, top * nr);
 465}
 466#endif
 467
 468/*
 469 * The same as function above, but preload number of nodes required to insert
 470 * (1 << order) continuous naturally-aligned elements.
 471 */
 472int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order)
 473{
 474	unsigned long nr_subtrees;
 475	int nr_nodes, subtree_height;
 476
 477	/* Preloading doesn't help anything with this gfp mask, skip it */
 478	if (!gfpflags_allow_blocking(gfp_mask)) {
 479		preempt_disable();
 480		return 0;
 481	}
 482
 483	/*
 484	 * Calculate number and height of fully populated subtrees it takes to
 485	 * store (1 << order) elements.
 486	 */
 487	nr_subtrees = 1 << order;
 488	for (subtree_height = 0; nr_subtrees > RADIX_TREE_MAP_SIZE;
 489			subtree_height++)
 490		nr_subtrees >>= RADIX_TREE_MAP_SHIFT;
 491
 492	/*
 493	 * The worst case is zero height tree with a single item at index 0 and
 494	 * then inserting items starting at ULONG_MAX - (1 << order).
 495	 *
 496	 * This requires RADIX_TREE_MAX_PATH nodes to build branch from root to
 497	 * 0-index item.
 498	 */
 499	nr_nodes = RADIX_TREE_MAX_PATH;
 500
 501	/* Plus branch to fully populated subtrees. */
 502	nr_nodes += RADIX_TREE_MAX_PATH - subtree_height;
 503
 504	/* Root node is shared. */
 505	nr_nodes--;
 506
 507	/* Plus nodes required to build subtrees. */
 508	nr_nodes += nr_subtrees * height_to_maxnodes[subtree_height];
 509
 510	return __radix_tree_preload(gfp_mask, nr_nodes);
 511}
 512
 513static unsigned radix_tree_load_root(struct radix_tree_root *root,
 514		struct radix_tree_node **nodep, unsigned long *maxindex)
 515{
 516	struct radix_tree_node *node = rcu_dereference_raw(root->rnode);
 517
 518	*nodep = node;
 519
 520	if (likely(radix_tree_is_internal_node(node))) {
 521		node = entry_to_node(node);
 522		*maxindex = node_maxindex(node);
 523		return node->shift + RADIX_TREE_MAP_SHIFT;
 524	}
 525
 526	*maxindex = 0;
 527	return 0;
 528}
 529
 530/*
 531 *	Extend a radix tree so it can store key @index.
 532 */
 533static int radix_tree_extend(struct radix_tree_root *root,
 534				unsigned long index, unsigned int shift)
 535{
 536	struct radix_tree_node *slot;
 537	unsigned int maxshift;
 538	int tag;
 539
 540	/* Figure out what the shift should be.  */
 541	maxshift = shift;
 542	while (index > shift_maxindex(maxshift))
 543		maxshift += RADIX_TREE_MAP_SHIFT;
 544
 545	slot = root->rnode;
 546	if (!slot)
 547		goto out;
 548
 549	do {
 550		struct radix_tree_node *node = radix_tree_node_alloc(root,
 551							NULL, shift, 0, 1, 0);
 552		if (!node)
 553			return -ENOMEM;
 554
 555		/* Propagate the aggregated tag info into the new root */
 556		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
 557			if (root_tag_get(root, tag))
 558				tag_set(node, tag, 0);
 
 
 
 
 
 
 
 
 559		}
 560
 561		BUG_ON(shift > BITS_PER_LONG);
 562		if (radix_tree_is_internal_node(slot)) {
 563			entry_to_node(slot)->parent = node;
 564		} else if (radix_tree_exceptional_entry(slot)) {
 565			/* Moving an exceptional root->rnode to a node */
 566			node->exceptional = 1;
 567		}
 568		node->slots[0] = slot;
 569		slot = node_to_entry(node);
 570		rcu_assign_pointer(root->rnode, slot);
 
 
 
 
 571		shift += RADIX_TREE_MAP_SHIFT;
 572	} while (shift <= maxshift);
 573out:
 574	return maxshift + RADIX_TREE_MAP_SHIFT;
 575}
 576
 577/**
 578 *	radix_tree_shrink    -    shrink radix tree to minimum height
 579 *	@root		radix tree root
 580 */
 581static inline void radix_tree_shrink(struct radix_tree_root *root,
 582				     radix_tree_update_node_t update_node,
 583				     void *private)
 584{
 
 
 585	for (;;) {
 586		struct radix_tree_node *node = root->rnode;
 587		struct radix_tree_node *child;
 588
 589		if (!radix_tree_is_internal_node(node))
 590			break;
 591		node = entry_to_node(node);
 592
 593		/*
 594		 * The candidate node has more than one child, or its child
 595		 * is not at the leftmost slot, or the child is a multiorder
 596		 * entry, we cannot shrink.
 597		 */
 598		if (node->count != 1)
 599			break;
 600		child = node->slots[0];
 601		if (!child)
 602			break;
 603		if (!radix_tree_is_internal_node(child) && node->shift)
 
 
 
 
 
 
 604			break;
 605
 606		if (radix_tree_is_internal_node(child))
 607			entry_to_node(child)->parent = NULL;
 608
 609		/*
 610		 * We don't need rcu_assign_pointer(), since we are simply
 611		 * moving the node from one part of the tree to another: if it
 612		 * was safe to dereference the old pointer to it
 613		 * (node->slots[0]), it will be safe to dereference the new
 614		 * one (root->rnode) as far as dependent read barriers go.
 615		 */
 616		root->rnode = child;
 
 
 617
 618		/*
 619		 * We have a dilemma here. The node's slot[0] must not be
 620		 * NULLed in case there are concurrent lookups expecting to
 621		 * find the item. However if this was a bottom-level node,
 622		 * then it may be subject to the slot pointer being visible
 623		 * to callers dereferencing it. If item corresponding to
 624		 * slot[0] is subsequently deleted, these callers would expect
 625		 * their slot to become empty sooner or later.
 626		 *
 627		 * For example, lockless pagecache will look up a slot, deref
 628		 * the page pointer, and if the page has 0 refcount it means it
 629		 * was concurrently deleted from pagecache so try the deref
 630		 * again. Fortunately there is already a requirement for logic
 631		 * to retry the entire slot lookup -- the indirect pointer
 632		 * problem (replacing direct root node with an indirect pointer
 633		 * also results in a stale slot). So tag the slot as indirect
 634		 * to force callers to retry.
 635		 */
 636		node->count = 0;
 637		if (!radix_tree_is_internal_node(child)) {
 638			node->slots[0] = RADIX_TREE_RETRY;
 639			if (update_node)
 640				update_node(node, private);
 641		}
 642
 643		WARN_ON_ONCE(!list_empty(&node->private_list));
 644		radix_tree_node_free(node);
 
 645	}
 
 
 646}
 647
 648static void delete_node(struct radix_tree_root *root,
 649			struct radix_tree_node *node,
 650			radix_tree_update_node_t update_node, void *private)
 651{
 
 
 652	do {
 653		struct radix_tree_node *parent;
 654
 655		if (node->count) {
 656			if (node == entry_to_node(root->rnode))
 657				radix_tree_shrink(root, update_node, private);
 658			return;
 
 659		}
 660
 661		parent = node->parent;
 662		if (parent) {
 663			parent->slots[node->offset] = NULL;
 664			parent->count--;
 665		} else {
 666			root_tag_clear_all(root);
 667			root->rnode = NULL;
 
 
 
 
 
 668		}
 669
 670		WARN_ON_ONCE(!list_empty(&node->private_list));
 671		radix_tree_node_free(node);
 
 672
 673		node = parent;
 674	} while (node);
 
 
 675}
 676
 677/**
 678 *	__radix_tree_create	-	create a slot in a radix tree
 679 *	@root:		radix tree root
 680 *	@index:		index key
 681 *	@order:		index occupies 2^order aligned slots
 682 *	@nodep:		returns node
 683 *	@slotp:		returns slot
 684 *
 685 *	Create, if necessary, and return the node and slot for an item
 686 *	at position @index in the radix tree @root.
 687 *
 688 *	Until there is more than one item in the tree, no nodes are
 689 *	allocated and @root->rnode is used as a direct slot instead of
 690 *	pointing to a node, in which case *@nodep will be NULL.
 691 *
 692 *	Returns -ENOMEM, or 0 for success.
 693 */
 694int __radix_tree_create(struct radix_tree_root *root, unsigned long index,
 695			unsigned order, struct radix_tree_node **nodep,
 696			void ***slotp)
 697{
 698	struct radix_tree_node *node = NULL, *child;
 699	void **slot = (void **)&root->rnode;
 700	unsigned long maxindex;
 701	unsigned int shift, offset = 0;
 702	unsigned long max = index | ((1UL << order) - 1);
 
 703
 704	shift = radix_tree_load_root(root, &child, &maxindex);
 705
 706	/* Make sure the tree is high enough.  */
 707	if (order > 0 && max == ((1UL << order) - 1))
 708		max++;
 709	if (max > maxindex) {
 710		int error = radix_tree_extend(root, max, shift);
 711		if (error < 0)
 712			return error;
 713		shift = error;
 714		child = root->rnode;
 715	}
 716
 717	while (shift > order) {
 718		shift -= RADIX_TREE_MAP_SHIFT;
 719		if (child == NULL) {
 720			/* Have to add a child node.  */
 721			child = radix_tree_node_alloc(root, node, shift,
 722							offset, 0, 0);
 723			if (!child)
 724				return -ENOMEM;
 725			rcu_assign_pointer(*slot, node_to_entry(child));
 726			if (node)
 727				node->count++;
 728		} else if (!radix_tree_is_internal_node(child))
 729			break;
 730
 731		/* Go a level down */
 732		node = entry_to_node(child);
 733		offset = radix_tree_descend(node, &child, index);
 734		slot = &node->slots[offset];
 735	}
 736
 737	if (nodep)
 738		*nodep = node;
 739	if (slotp)
 740		*slotp = slot;
 741	return 0;
 742}
 743
 744#ifdef CONFIG_RADIX_TREE_MULTIORDER
 745/*
 746 * Free any nodes below this node.  The tree is presumed to not need
 747 * shrinking, and any user data in the tree is presumed to not need a
 748 * destructor called on it.  If we need to add a destructor, we can
 749 * add that functionality later.  Note that we may not clear tags or
 750 * slots from the tree as an RCU walker may still have a pointer into
 751 * this subtree.  We could replace the entries with RADIX_TREE_RETRY,
 752 * but we'll still have to clear those in rcu_free.
 753 */
 754static void radix_tree_free_nodes(struct radix_tree_node *node)
 755{
 756	unsigned offset = 0;
 757	struct radix_tree_node *child = entry_to_node(node);
 758
 759	for (;;) {
 760		void *entry = child->slots[offset];
 761		if (radix_tree_is_internal_node(entry) &&
 762					!is_sibling_entry(child, entry)) {
 763			child = entry_to_node(entry);
 764			offset = 0;
 765			continue;
 766		}
 767		offset++;
 768		while (offset == RADIX_TREE_MAP_SIZE) {
 769			struct radix_tree_node *old = child;
 770			offset = child->offset + 1;
 771			child = child->parent;
 772			WARN_ON_ONCE(!list_empty(&old->private_list));
 773			radix_tree_node_free(old);
 774			if (old == entry_to_node(node))
 775				return;
 776		}
 777	}
 778}
 779
 780static inline int insert_entries(struct radix_tree_node *node, void **slot,
 781				void *item, unsigned order, bool replace)
 782{
 783	struct radix_tree_node *child;
 784	unsigned i, n, tag, offset, tags = 0;
 785
 786	if (node) {
 787		if (order > node->shift)
 788			n = 1 << (order - node->shift);
 789		else
 790			n = 1;
 791		offset = get_slot_offset(node, slot);
 792	} else {
 793		n = 1;
 794		offset = 0;
 795	}
 796
 797	if (n > 1) {
 798		offset = offset & ~(n - 1);
 799		slot = &node->slots[offset];
 800	}
 801	child = node_to_entry(slot);
 802
 803	for (i = 0; i < n; i++) {
 804		if (slot[i]) {
 805			if (replace) {
 806				node->count--;
 807				for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 808					if (tag_get(node, tag, offset + i))
 809						tags |= 1 << tag;
 810			} else
 811				return -EEXIST;
 812		}
 813	}
 814
 815	for (i = 0; i < n; i++) {
 816		struct radix_tree_node *old = slot[i];
 817		if (i) {
 818			rcu_assign_pointer(slot[i], child);
 819			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 820				if (tags & (1 << tag))
 821					tag_clear(node, tag, offset + i);
 822		} else {
 823			rcu_assign_pointer(slot[i], item);
 824			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
 825				if (tags & (1 << tag))
 826					tag_set(node, tag, offset);
 827		}
 828		if (radix_tree_is_internal_node(old) &&
 829					!is_sibling_entry(node, old) &&
 830					(old != RADIX_TREE_RETRY))
 831			radix_tree_free_nodes(old);
 832		if (radix_tree_exceptional_entry(old))
 833			node->exceptional--;
 834	}
 835	if (node) {
 836		node->count += n;
 837		if (radix_tree_exceptional_entry(item))
 838			node->exceptional += n;
 839	}
 840	return n;
 841}
 842#else
 843static inline int insert_entries(struct radix_tree_node *node, void **slot,
 844				void *item, unsigned order, bool replace)
 845{
 846	if (*slot)
 847		return -EEXIST;
 848	rcu_assign_pointer(*slot, item);
 849	if (node) {
 850		node->count++;
 851		if (radix_tree_exceptional_entry(item))
 852			node->exceptional++;
 853	}
 854	return 1;
 855}
 856#endif
 857
 858/**
 859 *	__radix_tree_insert    -    insert into a radix tree
 860 *	@root:		radix tree root
 861 *	@index:		index key
 862 *	@order:		key covers the 2^order indices around index
 863 *	@item:		item to insert
 864 *
 865 *	Insert an item into the radix tree at position @index.
 866 */
 867int __radix_tree_insert(struct radix_tree_root *root, unsigned long index,
 868			unsigned order, void *item)
 869{
 870	struct radix_tree_node *node;
 871	void **slot;
 872	int error;
 873
 874	BUG_ON(radix_tree_is_internal_node(item));
 875
 876	error = __radix_tree_create(root, index, order, &node, &slot);
 877	if (error)
 878		return error;
 879
 880	error = insert_entries(node, slot, item, order, false);
 881	if (error < 0)
 882		return error;
 883
 884	if (node) {
 885		unsigned offset = get_slot_offset(node, slot);
 886		BUG_ON(tag_get(node, 0, offset));
 887		BUG_ON(tag_get(node, 1, offset));
 888		BUG_ON(tag_get(node, 2, offset));
 889	} else {
 890		BUG_ON(root_tags_get(root));
 891	}
 892
 893	return 0;
 894}
 895EXPORT_SYMBOL(__radix_tree_insert);
 896
 897/**
 898 *	__radix_tree_lookup	-	lookup an item in a radix tree
 899 *	@root:		radix tree root
 900 *	@index:		index key
 901 *	@nodep:		returns node
 902 *	@slotp:		returns slot
 903 *
 904 *	Lookup and return the item at position @index in the radix
 905 *	tree @root.
 906 *
 907 *	Until there is more than one item in the tree, no nodes are
 908 *	allocated and @root->rnode is used as a direct slot instead of
 909 *	pointing to a node, in which case *@nodep will be NULL.
 910 */
 911void *__radix_tree_lookup(struct radix_tree_root *root, unsigned long index,
 912			  struct radix_tree_node **nodep, void ***slotp)
 
 913{
 914	struct radix_tree_node *node, *parent;
 915	unsigned long maxindex;
 916	void **slot;
 917
 918 restart:
 919	parent = NULL;
 920	slot = (void **)&root->rnode;
 921	radix_tree_load_root(root, &node, &maxindex);
 922	if (index > maxindex)
 923		return NULL;
 924
 925	while (radix_tree_is_internal_node(node)) {
 926		unsigned offset;
 927
 928		if (node == RADIX_TREE_RETRY)
 929			goto restart;
 930		parent = entry_to_node(node);
 931		offset = radix_tree_descend(parent, &node, index);
 932		slot = parent->slots + offset;
 
 
 
 
 933	}
 934
 935	if (nodep)
 936		*nodep = parent;
 937	if (slotp)
 938		*slotp = slot;
 939	return node;
 940}
 941
 942/**
 943 *	radix_tree_lookup_slot    -    lookup a slot in a radix tree
 944 *	@root:		radix tree root
 945 *	@index:		index key
 946 *
 947 *	Returns:  the slot corresponding to the position @index in the
 948 *	radix tree @root. This is useful for update-if-exists operations.
 949 *
 950 *	This function can be called under rcu_read_lock iff the slot is not
 951 *	modified by radix_tree_replace_slot, otherwise it must be called
 952 *	exclusive from other writers. Any dereference of the slot must be done
 953 *	using radix_tree_deref_slot.
 954 */
 955void **radix_tree_lookup_slot(struct radix_tree_root *root, unsigned long index)
 
 956{
 957	void **slot;
 958
 959	if (!__radix_tree_lookup(root, index, NULL, &slot))
 960		return NULL;
 961	return slot;
 962}
 963EXPORT_SYMBOL(radix_tree_lookup_slot);
 964
 965/**
 966 *	radix_tree_lookup    -    perform lookup operation on a radix tree
 967 *	@root:		radix tree root
 968 *	@index:		index key
 969 *
 970 *	Lookup the item at the position @index in the radix tree @root.
 971 *
 972 *	This function can be called under rcu_read_lock, however the caller
 973 *	must manage lifetimes of leaf nodes (eg. RCU may also be used to free
 974 *	them safely). No RCU barriers are required to access or modify the
 975 *	returned item, however.
 976 */
 977void *radix_tree_lookup(struct radix_tree_root *root, unsigned long index)
 978{
 979	return __radix_tree_lookup(root, index, NULL, NULL);
 980}
 981EXPORT_SYMBOL(radix_tree_lookup);
 982
 983static inline int slot_count(struct radix_tree_node *node,
 984						void **slot)
 985{
 986	int n = 1;
 987#ifdef CONFIG_RADIX_TREE_MULTIORDER
 988	void *ptr = node_to_entry(slot);
 989	unsigned offset = get_slot_offset(node, slot);
 990	int i;
 991
 992	for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
 993		if (node->slots[offset + i] != ptr)
 994			break;
 995		n++;
 996	}
 997#endif
 998	return n;
 999}
1000
1001static void replace_slot(struct radix_tree_root *root,
1002			 struct radix_tree_node *node,
1003			 void **slot, void *item,
1004			 bool warn_typeswitch)
1005{
1006	void *old = rcu_dereference_raw(*slot);
1007	int count, exceptional;
1008
1009	WARN_ON_ONCE(radix_tree_is_internal_node(item));
1010
1011	count = !!item - !!old;
1012	exceptional = !!radix_tree_exceptional_entry(item) -
1013		      !!radix_tree_exceptional_entry(old);
1014
1015	WARN_ON_ONCE(warn_typeswitch && (count || exceptional));
1016
1017	if (node) {
1018		node->count += count;
1019		if (exceptional) {
1020			exceptional *= slot_count(node, slot);
1021			node->exceptional += exceptional;
1022		}
1023	}
1024
1025	rcu_assign_pointer(*slot, item);
1026}
1027
1028static inline void delete_sibling_entries(struct radix_tree_node *node,
1029						void **slot)
 
1030{
1031#ifdef CONFIG_RADIX_TREE_MULTIORDER
1032	bool exceptional = radix_tree_exceptional_entry(*slot);
1033	void *ptr = node_to_entry(slot);
1034	unsigned offset = get_slot_offset(node, slot);
1035	int i;
1036
1037	for (i = 1; offset + i < RADIX_TREE_MAP_SIZE; i++) {
1038		if (node->slots[offset + i] != ptr)
1039			break;
1040		node->slots[offset + i] = NULL;
1041		node->count--;
1042		if (exceptional)
1043			node->exceptional--;
 
 
 
 
 
 
 
 
 
 
 
1044	}
1045#endif
1046}
1047
1048/**
1049 * __radix_tree_replace		- replace item in a slot
1050 * @root:		radix tree root
1051 * @node:		pointer to tree node
1052 * @slot:		pointer to slot in @node
1053 * @item:		new item to store in the slot.
1054 * @update_node:	callback for changing leaf nodes
1055 * @private:		private data to pass to @update_node
1056 *
1057 * For use with __radix_tree_lookup().  Caller must hold tree write locked
1058 * across slot lookup and replacement.
1059 */
1060void __radix_tree_replace(struct radix_tree_root *root,
1061			  struct radix_tree_node *node,
1062			  void **slot, void *item,
1063			  radix_tree_update_node_t update_node, void *private)
1064{
1065	if (!item)
1066		delete_sibling_entries(node, slot);
 
 
1067	/*
1068	 * This function supports replacing exceptional entries and
1069	 * deleting entries, but that needs accounting against the
1070	 * node unless the slot is root->rnode.
1071	 */
1072	replace_slot(root, node, slot, item,
1073		     !node && slot != (void **)&root->rnode);
 
1074
1075	if (!node)
1076		return;
1077
1078	if (update_node)
1079		update_node(node, private);
1080
1081	delete_node(root, node, update_node, private);
1082}
1083
1084/**
1085 * radix_tree_replace_slot	- replace item in a slot
1086 * @root:	radix tree root
1087 * @slot:	pointer to slot
1088 * @item:	new item to store in the slot.
1089 *
1090 * For use with radix_tree_lookup_slot(), radix_tree_gang_lookup_slot(),
1091 * radix_tree_gang_lookup_tag_slot().  Caller must hold tree write locked
1092 * across slot lookup and replacement.
1093 *
1094 * NOTE: This cannot be used to switch between non-entries (empty slots),
1095 * regular entries, and exceptional entries, as that requires accounting
1096 * inside the radix tree node. When switching from one type of entry or
1097 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
1098 * radix_tree_iter_replace().
1099 */
1100void radix_tree_replace_slot(struct radix_tree_root *root,
1101			     void **slot, void *item)
1102{
1103	replace_slot(root, NULL, slot, item, true);
1104}
 
1105
1106/**
1107 * radix_tree_iter_replace - replace item in a slot
1108 * @root:	radix tree root
 
1109 * @slot:	pointer to slot
1110 * @item:	new item to store in the slot.
1111 *
1112 * For use with radix_tree_split() and radix_tree_for_each_slot().
1113 * Caller must hold tree write locked across split and replacement.
1114 */
1115void radix_tree_iter_replace(struct radix_tree_root *root,
1116		const struct radix_tree_iter *iter, void **slot, void *item)
1117{
1118	__radix_tree_replace(root, iter->node, slot, item, NULL, NULL);
1119}
1120
1121#ifdef CONFIG_RADIX_TREE_MULTIORDER
1122/**
1123 * radix_tree_join - replace multiple entries with one multiorder entry
1124 * @root: radix tree root
1125 * @index: an index inside the new entry
1126 * @order: order of the new entry
1127 * @item: new entry
1128 *
1129 * Call this function to replace several entries with one larger entry.
1130 * The existing entries are presumed to not need freeing as a result of
1131 * this call.
1132 *
1133 * The replacement entry will have all the tags set on it that were set
1134 * on any of the entries it is replacing.
1135 */
1136int radix_tree_join(struct radix_tree_root *root, unsigned long index,
1137			unsigned order, void *item)
1138{
1139	struct radix_tree_node *node;
1140	void **slot;
1141	int error;
1142
1143	BUG_ON(radix_tree_is_internal_node(item));
1144
1145	error = __radix_tree_create(root, index, order, &node, &slot);
1146	if (!error)
1147		error = insert_entries(node, slot, item, order, true);
1148	if (error > 0)
1149		error = 0;
1150
1151	return error;
1152}
1153
1154/**
1155 * radix_tree_split - Split an entry into smaller entries
1156 * @root: radix tree root
1157 * @index: An index within the large entry
1158 * @order: Order of new entries
1159 *
1160 * Call this function as the first step in replacing a multiorder entry
1161 * with several entries of lower order.  After this function returns,
1162 * loop over the relevant portion of the tree using radix_tree_for_each_slot()
1163 * and call radix_tree_iter_replace() to set up each new entry.
1164 *
1165 * The tags from this entry are replicated to all the new entries.
1166 *
1167 * The radix tree should be locked against modification during the entire
1168 * replacement operation.  Lock-free lookups will see RADIX_TREE_RETRY which
1169 * should prompt RCU walkers to restart the lookup from the root.
1170 */
1171int radix_tree_split(struct radix_tree_root *root, unsigned long index,
1172				unsigned order)
1173{
1174	struct radix_tree_node *parent, *node, *child;
1175	void **slot;
1176	unsigned int offset, end;
1177	unsigned n, tag, tags = 0;
1178
1179	if (!__radix_tree_lookup(root, index, &parent, &slot))
1180		return -ENOENT;
1181	if (!parent)
1182		return -ENOENT;
1183
1184	offset = get_slot_offset(parent, slot);
1185
1186	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1187		if (tag_get(parent, tag, offset))
1188			tags |= 1 << tag;
1189
1190	for (end = offset + 1; end < RADIX_TREE_MAP_SIZE; end++) {
1191		if (!is_sibling_entry(parent, parent->slots[end]))
1192			break;
1193		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1194			if (tags & (1 << tag))
1195				tag_set(parent, tag, end);
1196		/* rcu_assign_pointer ensures tags are set before RETRY */
1197		rcu_assign_pointer(parent->slots[end], RADIX_TREE_RETRY);
1198	}
1199	rcu_assign_pointer(parent->slots[offset], RADIX_TREE_RETRY);
1200	parent->exceptional -= (end - offset);
1201
1202	if (order == parent->shift)
1203		return 0;
1204	if (order > parent->shift) {
1205		while (offset < end)
1206			offset += insert_entries(parent, &parent->slots[offset],
1207					RADIX_TREE_RETRY, order, true);
1208		return 0;
1209	}
1210
1211	node = parent;
1212
1213	for (;;) {
1214		if (node->shift > order) {
1215			child = radix_tree_node_alloc(root, node,
1216					node->shift - RADIX_TREE_MAP_SHIFT,
1217					offset, 0, 0);
1218			if (!child)
1219				goto nomem;
1220			if (node != parent) {
1221				node->count++;
1222				node->slots[offset] = node_to_entry(child);
1223				for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1224					if (tags & (1 << tag))
1225						tag_set(node, tag, offset);
1226			}
1227
1228			node = child;
1229			offset = 0;
1230			continue;
1231		}
1232
1233		n = insert_entries(node, &node->slots[offset],
1234					RADIX_TREE_RETRY, order, false);
1235		BUG_ON(n > RADIX_TREE_MAP_SIZE);
1236
1237		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1238			if (tags & (1 << tag))
1239				tag_set(node, tag, offset);
1240		offset += n;
1241
1242		while (offset == RADIX_TREE_MAP_SIZE) {
1243			if (node == parent)
1244				break;
1245			offset = node->offset;
1246			child = node;
1247			node = node->parent;
1248			rcu_assign_pointer(node->slots[offset],
1249						node_to_entry(child));
1250			offset++;
1251		}
1252		if ((node == parent) && (offset == end))
1253			return 0;
1254	}
1255
1256 nomem:
1257	/* Shouldn't happen; did user forget to preload? */
1258	/* TODO: free all the allocated nodes */
1259	WARN_ON(1);
1260	return -ENOMEM;
1261}
1262#endif
1263
1264/**
1265 *	radix_tree_tag_set - set a tag on a radix tree node
1266 *	@root:		radix tree root
1267 *	@index:		index key
1268 *	@tag:		tag index
1269 *
1270 *	Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
1271 *	corresponding to @index in the radix tree.  From
1272 *	the root all the way down to the leaf node.
1273 *
1274 *	Returns the address of the tagged item.  Setting a tag on a not-present
1275 *	item is a bug.
1276 */
1277void *radix_tree_tag_set(struct radix_tree_root *root,
1278			unsigned long index, unsigned int tag)
1279{
1280	struct radix_tree_node *node, *parent;
1281	unsigned long maxindex;
1282
1283	radix_tree_load_root(root, &node, &maxindex);
1284	BUG_ON(index > maxindex);
1285
1286	while (radix_tree_is_internal_node(node)) {
1287		unsigned offset;
1288
1289		parent = entry_to_node(node);
1290		offset = radix_tree_descend(parent, &node, index);
1291		BUG_ON(!node);
1292
1293		if (!tag_get(parent, tag, offset))
1294			tag_set(parent, tag, offset);
1295	}
1296
1297	/* set the root's tag bit */
1298	if (!root_tag_get(root, tag))
1299		root_tag_set(root, tag);
1300
1301	return node;
1302}
1303EXPORT_SYMBOL(radix_tree_tag_set);
1304
1305static void node_tag_clear(struct radix_tree_root *root,
1306				struct radix_tree_node *node,
1307				unsigned int tag, unsigned int offset)
1308{
1309	while (node) {
1310		if (!tag_get(node, tag, offset))
1311			return;
1312		tag_clear(node, tag, offset);
1313		if (any_tag_set(node, tag))
1314			return;
1315
1316		offset = node->offset;
1317		node = node->parent;
1318	}
1319
1320	/* clear the root's tag bit */
1321	if (root_tag_get(root, tag))
1322		root_tag_clear(root, tag);
1323}
1324
1325static void node_tag_set(struct radix_tree_root *root,
1326				struct radix_tree_node *node,
1327				unsigned int tag, unsigned int offset)
1328{
1329	while (node) {
1330		if (tag_get(node, tag, offset))
1331			return;
1332		tag_set(node, tag, offset);
1333		offset = node->offset;
1334		node = node->parent;
1335	}
1336
1337	if (!root_tag_get(root, tag))
1338		root_tag_set(root, tag);
1339}
1340
1341/**
1342 * radix_tree_iter_tag_set - set a tag on the current iterator entry
1343 * @root:	radix tree root
1344 * @iter:	iterator state
1345 * @tag:	tag to set
1346 */
1347void radix_tree_iter_tag_set(struct radix_tree_root *root,
1348			const struct radix_tree_iter *iter, unsigned int tag)
1349{
1350	node_tag_set(root, iter->node, tag, iter_offset(iter));
1351}
1352
1353/**
1354 *	radix_tree_tag_clear - clear a tag on a radix tree node
1355 *	@root:		radix tree root
1356 *	@index:		index key
1357 *	@tag:		tag index
1358 *
1359 *	Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1360 *	corresponding to @index in the radix tree.  If this causes
1361 *	the leaf node to have no tags set then clear the tag in the
1362 *	next-to-leaf node, etc.
1363 *
1364 *	Returns the address of the tagged item on success, else NULL.  ie:
1365 *	has the same return value and semantics as radix_tree_lookup().
1366 */
1367void *radix_tree_tag_clear(struct radix_tree_root *root,
1368			unsigned long index, unsigned int tag)
1369{
1370	struct radix_tree_node *node, *parent;
1371	unsigned long maxindex;
1372	int uninitialized_var(offset);
1373
1374	radix_tree_load_root(root, &node, &maxindex);
1375	if (index > maxindex)
1376		return NULL;
1377
1378	parent = NULL;
1379
1380	while (radix_tree_is_internal_node(node)) {
1381		parent = entry_to_node(node);
1382		offset = radix_tree_descend(parent, &node, index);
1383	}
1384
1385	if (node)
1386		node_tag_clear(root, parent, tag, offset);
1387
1388	return node;
1389}
1390EXPORT_SYMBOL(radix_tree_tag_clear);
1391
1392/**
 
 
 
 
 
 
 
 
 
 
 
 
1393 * radix_tree_tag_get - get a tag on a radix tree node
1394 * @root:		radix tree root
1395 * @index:		index key
1396 * @tag:		tag index (< RADIX_TREE_MAX_TAGS)
1397 *
1398 * Return values:
1399 *
1400 *  0: tag not present or not set
1401 *  1: tag set
1402 *
1403 * Note that the return value of this function may not be relied on, even if
1404 * the RCU lock is held, unless tag modification and node deletion are excluded
1405 * from concurrency.
1406 */
1407int radix_tree_tag_get(struct radix_tree_root *root,
1408			unsigned long index, unsigned int tag)
1409{
1410	struct radix_tree_node *node, *parent;
1411	unsigned long maxindex;
1412
1413	if (!root_tag_get(root, tag))
1414		return 0;
1415
1416	radix_tree_load_root(root, &node, &maxindex);
1417	if (index > maxindex)
1418		return 0;
1419	if (node == NULL)
1420		return 0;
1421
1422	while (radix_tree_is_internal_node(node)) {
1423		unsigned offset;
1424
1425		parent = entry_to_node(node);
1426		offset = radix_tree_descend(parent, &node, index);
1427
1428		if (!node)
1429			return 0;
1430		if (!tag_get(parent, tag, offset))
1431			return 0;
1432		if (node == RADIX_TREE_RETRY)
1433			break;
1434	}
1435
1436	return 1;
1437}
1438EXPORT_SYMBOL(radix_tree_tag_get);
1439
1440static inline void __set_iter_shift(struct radix_tree_iter *iter,
1441					unsigned int shift)
1442{
1443#ifdef CONFIG_RADIX_TREE_MULTIORDER
1444	iter->shift = shift;
1445#endif
1446}
1447
1448/* Construct iter->tags bit-mask from node->tags[tag] array */
1449static void set_iter_tags(struct radix_tree_iter *iter,
1450				struct radix_tree_node *node, unsigned offset,
1451				unsigned tag)
1452{
1453	unsigned tag_long = offset / BITS_PER_LONG;
1454	unsigned tag_bit  = offset % BITS_PER_LONG;
1455
 
 
 
 
 
1456	iter->tags = node->tags[tag][tag_long] >> tag_bit;
1457
1458	/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1459	if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
1460		/* Pick tags from next element */
1461		if (tag_bit)
1462			iter->tags |= node->tags[tag][tag_long + 1] <<
1463						(BITS_PER_LONG - tag_bit);
1464		/* Clip chunk size, here only BITS_PER_LONG tags */
1465		iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
1466	}
1467}
1468
1469#ifdef CONFIG_RADIX_TREE_MULTIORDER
1470static void **skip_siblings(struct radix_tree_node **nodep,
1471			void **slot, struct radix_tree_iter *iter)
1472{
1473	void *sib = node_to_entry(slot - 1);
1474
1475	while (iter->index < iter->next_index) {
1476		*nodep = rcu_dereference_raw(*slot);
1477		if (*nodep && *nodep != sib)
1478			return slot;
1479		slot++;
1480		iter->index = __radix_tree_iter_add(iter, 1);
1481		iter->tags >>= 1;
1482	}
1483
1484	*nodep = NULL;
1485	return NULL;
1486}
1487
1488void ** __radix_tree_next_slot(void **slot, struct radix_tree_iter *iter,
1489					unsigned flags)
1490{
1491	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1492	struct radix_tree_node *node = rcu_dereference_raw(*slot);
1493
1494	slot = skip_siblings(&node, slot, iter);
1495
1496	while (radix_tree_is_internal_node(node)) {
1497		unsigned offset;
1498		unsigned long next_index;
1499
1500		if (node == RADIX_TREE_RETRY)
1501			return slot;
1502		node = entry_to_node(node);
1503		iter->node = node;
1504		iter->shift = node->shift;
1505
1506		if (flags & RADIX_TREE_ITER_TAGGED) {
1507			offset = radix_tree_find_next_bit(node, tag, 0);
1508			if (offset == RADIX_TREE_MAP_SIZE)
1509				return NULL;
1510			slot = &node->slots[offset];
1511			iter->index = __radix_tree_iter_add(iter, offset);
1512			set_iter_tags(iter, node, offset, tag);
1513			node = rcu_dereference_raw(*slot);
1514		} else {
1515			offset = 0;
1516			slot = &node->slots[0];
1517			for (;;) {
1518				node = rcu_dereference_raw(*slot);
1519				if (node)
1520					break;
1521				slot++;
1522				offset++;
1523				if (offset == RADIX_TREE_MAP_SIZE)
1524					return NULL;
1525			}
1526			iter->index = __radix_tree_iter_add(iter, offset);
1527		}
1528		if ((flags & RADIX_TREE_ITER_CONTIG) && (offset > 0))
1529			goto none;
1530		next_index = (iter->index | shift_maxindex(iter->shift)) + 1;
1531		if (next_index < iter->next_index)
1532			iter->next_index = next_index;
1533	}
1534
1535	return slot;
1536 none:
1537	iter->next_index = 0;
1538	return NULL;
1539}
1540EXPORT_SYMBOL(__radix_tree_next_slot);
1541#else
1542static void **skip_siblings(struct radix_tree_node **nodep,
1543			void **slot, struct radix_tree_iter *iter)
1544{
1545	return slot;
1546}
1547#endif
1548
1549void **radix_tree_iter_resume(void **slot, struct radix_tree_iter *iter)
1550{
1551	struct radix_tree_node *node;
1552
1553	slot++;
1554	iter->index = __radix_tree_iter_add(iter, 1);
1555	node = rcu_dereference_raw(*slot);
1556	skip_siblings(&node, slot, iter);
1557	iter->next_index = iter->index;
1558	iter->tags = 0;
1559	return NULL;
1560}
1561EXPORT_SYMBOL(radix_tree_iter_resume);
1562
1563/**
1564 * radix_tree_next_chunk - find next chunk of slots for iteration
1565 *
1566 * @root:	radix tree root
1567 * @iter:	iterator state
1568 * @flags:	RADIX_TREE_ITER_* flags and tag index
1569 * Returns:	pointer to chunk first slot, or NULL if iteration is over
1570 */
1571void **radix_tree_next_chunk(struct radix_tree_root *root,
1572			     struct radix_tree_iter *iter, unsigned flags)
1573{
1574	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1575	struct radix_tree_node *node, *child;
1576	unsigned long index, offset, maxindex;
1577
1578	if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
1579		return NULL;
1580
1581	/*
1582	 * Catch next_index overflow after ~0UL. iter->index never overflows
1583	 * during iterating; it can be zero only at the beginning.
1584	 * And we cannot overflow iter->next_index in a single step,
1585	 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1586	 *
1587	 * This condition also used by radix_tree_next_slot() to stop
1588	 * contiguous iterating, and forbid switching to the next chunk.
1589	 */
1590	index = iter->next_index;
1591	if (!index && iter->index)
1592		return NULL;
1593
1594 restart:
1595	radix_tree_load_root(root, &child, &maxindex);
1596	if (index > maxindex)
1597		return NULL;
1598	if (!child)
1599		return NULL;
1600
1601	if (!radix_tree_is_internal_node(child)) {
1602		/* Single-slot tree */
1603		iter->index = index;
1604		iter->next_index = maxindex + 1;
1605		iter->tags = 1;
1606		iter->node = NULL;
1607		__set_iter_shift(iter, 0);
1608		return (void **)&root->rnode;
1609	}
1610
1611	do {
1612		node = entry_to_node(child);
1613		offset = radix_tree_descend(node, &child, index);
1614
1615		if ((flags & RADIX_TREE_ITER_TAGGED) ?
1616				!tag_get(node, tag, offset) : !child) {
1617			/* Hole detected */
1618			if (flags & RADIX_TREE_ITER_CONTIG)
1619				return NULL;
1620
1621			if (flags & RADIX_TREE_ITER_TAGGED)
1622				offset = radix_tree_find_next_bit(node, tag,
1623						offset + 1);
1624			else
1625				while (++offset	< RADIX_TREE_MAP_SIZE) {
1626					void *slot = node->slots[offset];
1627					if (is_sibling_entry(node, slot))
1628						continue;
1629					if (slot)
1630						break;
1631				}
1632			index &= ~node_maxindex(node);
1633			index += offset << node->shift;
1634			/* Overflow after ~0UL */
1635			if (!index)
1636				return NULL;
1637			if (offset == RADIX_TREE_MAP_SIZE)
1638				goto restart;
1639			child = rcu_dereference_raw(node->slots[offset]);
1640		}
1641
1642		if (!child)
1643			goto restart;
1644		if (child == RADIX_TREE_RETRY)
1645			break;
1646	} while (radix_tree_is_internal_node(child));
1647
1648	/* Update the iterator state */
1649	iter->index = (index &~ node_maxindex(node)) | (offset << node->shift);
1650	iter->next_index = (index | node_maxindex(node)) + 1;
1651	iter->node = node;
1652	__set_iter_shift(iter, node->shift);
1653
1654	if (flags & RADIX_TREE_ITER_TAGGED)
1655		set_iter_tags(iter, node, offset, tag);
1656
1657	return node->slots + offset;
1658}
1659EXPORT_SYMBOL(radix_tree_next_chunk);
1660
1661/**
1662 *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
1663 *	@root:		radix tree root
1664 *	@results:	where the results of the lookup are placed
1665 *	@first_index:	start the lookup from this key
1666 *	@max_items:	place up to this many items at *results
1667 *
1668 *	Performs an index-ascending scan of the tree for present items.  Places
1669 *	them at *@results and returns the number of items which were placed at
1670 *	*@results.
1671 *
1672 *	The implementation is naive.
1673 *
1674 *	Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1675 *	rcu_read_lock. In this case, rather than the returned results being
1676 *	an atomic snapshot of the tree at a single point in time, the
1677 *	semantics of an RCU protected gang lookup are as though multiple
1678 *	radix_tree_lookups have been issued in individual locks, and results
1679 *	stored in 'results'.
1680 */
1681unsigned int
1682radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
1683			unsigned long first_index, unsigned int max_items)
1684{
1685	struct radix_tree_iter iter;
1686	void **slot;
1687	unsigned int ret = 0;
1688
1689	if (unlikely(!max_items))
1690		return 0;
1691
1692	radix_tree_for_each_slot(slot, root, &iter, first_index) {
1693		results[ret] = rcu_dereference_raw(*slot);
1694		if (!results[ret])
1695			continue;
1696		if (radix_tree_is_internal_node(results[ret])) {
1697			slot = radix_tree_iter_retry(&iter);
1698			continue;
1699		}
1700		if (++ret == max_items)
1701			break;
1702	}
1703
1704	return ret;
1705}
1706EXPORT_SYMBOL(radix_tree_gang_lookup);
1707
1708/**
1709 *	radix_tree_gang_lookup_slot - perform multiple slot lookup on radix tree
1710 *	@root:		radix tree root
1711 *	@results:	where the results of the lookup are placed
1712 *	@indices:	where their indices should be placed (but usually NULL)
1713 *	@first_index:	start the lookup from this key
1714 *	@max_items:	place up to this many items at *results
1715 *
1716 *	Performs an index-ascending scan of the tree for present items.  Places
1717 *	their slots at *@results and returns the number of items which were
1718 *	placed at *@results.
1719 *
1720 *	The implementation is naive.
1721 *
1722 *	Like radix_tree_gang_lookup as far as RCU and locking goes. Slots must
1723 *	be dereferenced with radix_tree_deref_slot, and if using only RCU
1724 *	protection, radix_tree_deref_slot may fail requiring a retry.
1725 */
1726unsigned int
1727radix_tree_gang_lookup_slot(struct radix_tree_root *root,
1728			void ***results, unsigned long *indices,
1729			unsigned long first_index, unsigned int max_items)
1730{
1731	struct radix_tree_iter iter;
1732	void **slot;
1733	unsigned int ret = 0;
1734
1735	if (unlikely(!max_items))
1736		return 0;
1737
1738	radix_tree_for_each_slot(slot, root, &iter, first_index) {
1739		results[ret] = slot;
1740		if (indices)
1741			indices[ret] = iter.index;
1742		if (++ret == max_items)
1743			break;
1744	}
1745
1746	return ret;
1747}
1748EXPORT_SYMBOL(radix_tree_gang_lookup_slot);
1749
1750/**
1751 *	radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1752 *	                             based on a tag
1753 *	@root:		radix tree root
1754 *	@results:	where the results of the lookup are placed
1755 *	@first_index:	start the lookup from this key
1756 *	@max_items:	place up to this many items at *results
1757 *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1758 *
1759 *	Performs an index-ascending scan of the tree for present items which
1760 *	have the tag indexed by @tag set.  Places the items at *@results and
1761 *	returns the number of items which were placed at *@results.
1762 */
1763unsigned int
1764radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results,
1765		unsigned long first_index, unsigned int max_items,
1766		unsigned int tag)
1767{
1768	struct radix_tree_iter iter;
1769	void **slot;
1770	unsigned int ret = 0;
1771
1772	if (unlikely(!max_items))
1773		return 0;
1774
1775	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1776		results[ret] = rcu_dereference_raw(*slot);
1777		if (!results[ret])
1778			continue;
1779		if (radix_tree_is_internal_node(results[ret])) {
1780			slot = radix_tree_iter_retry(&iter);
1781			continue;
1782		}
1783		if (++ret == max_items)
1784			break;
1785	}
1786
1787	return ret;
1788}
1789EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1790
1791/**
1792 *	radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1793 *					  radix tree based on a tag
1794 *	@root:		radix tree root
1795 *	@results:	where the results of the lookup are placed
1796 *	@first_index:	start the lookup from this key
1797 *	@max_items:	place up to this many items at *results
1798 *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1799 *
1800 *	Performs an index-ascending scan of the tree for present items which
1801 *	have the tag indexed by @tag set.  Places the slots at *@results and
1802 *	returns the number of slots which were placed at *@results.
1803 */
1804unsigned int
1805radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results,
1806		unsigned long first_index, unsigned int max_items,
1807		unsigned int tag)
1808{
1809	struct radix_tree_iter iter;
1810	void **slot;
1811	unsigned int ret = 0;
1812
1813	if (unlikely(!max_items))
1814		return 0;
1815
1816	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1817		results[ret] = slot;
1818		if (++ret == max_items)
1819			break;
1820	}
1821
1822	return ret;
1823}
1824EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1826/**
1827 *	__radix_tree_delete_node    -    try to free node after clearing a slot
1828 *	@root:		radix tree root
1829 *	@node:		node containing @index
1830 *	@update_node:	callback for changing leaf nodes
1831 *	@private:	private data to pass to @update_node
1832 *
1833 *	After clearing the slot at @index in @node from radix tree
1834 *	rooted at @root, call this function to attempt freeing the
1835 *	node and shrinking the tree.
 
 
1836 */
1837void __radix_tree_delete_node(struct radix_tree_root *root,
1838			      struct radix_tree_node *node,
1839			      radix_tree_update_node_t update_node,
1840			      void *private)
1841{
1842	delete_node(root, node, update_node, private);
 
1843}
 
1844
1845/**
1846 *	radix_tree_delete_item    -    delete an item from a radix tree
1847 *	@root:		radix tree root
1848 *	@index:		index key
1849 *	@item:		expected item
1850 *
1851 *	Remove @item at @index from the radix tree rooted at @root.
1852 *
1853 *	Returns the address of the deleted item, or NULL if it was not present
1854 *	or the entry at the given @index was not @item.
1855 */
1856void *radix_tree_delete_item(struct radix_tree_root *root,
1857			     unsigned long index, void *item)
1858{
1859	struct radix_tree_node *node;
1860	unsigned int offset;
1861	void **slot;
1862	void *entry;
1863	int tag;
1864
1865	entry = __radix_tree_lookup(root, index, &node, &slot);
1866	if (!entry)
 
 
 
1867		return NULL;
1868
1869	if (item && entry != item)
1870		return NULL;
1871
1872	if (!node) {
1873		root_tag_clear_all(root);
1874		root->rnode = NULL;
1875		return entry;
1876	}
1877
1878	offset = get_slot_offset(node, slot);
1879
1880	/* Clear all tags associated with the item to be deleted.  */
1881	for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1882		node_tag_clear(root, node, tag, offset);
1883
1884	__radix_tree_replace(root, node, slot, NULL, NULL, NULL);
1885
1886	return entry;
1887}
1888EXPORT_SYMBOL(radix_tree_delete_item);
1889
1890/**
1891 *	radix_tree_delete    -    delete an item from a radix tree
1892 *	@root:		radix tree root
1893 *	@index:		index key
1894 *
1895 *	Remove the item at @index from the radix tree rooted at @root.
1896 *
1897 *	Returns the address of the deleted item, or NULL if it was not present.
1898 */
1899void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1900{
1901	return radix_tree_delete_item(root, index, NULL);
1902}
1903EXPORT_SYMBOL(radix_tree_delete);
1904
1905void radix_tree_clear_tags(struct radix_tree_root *root,
1906			   struct radix_tree_node *node,
1907			   void **slot)
1908{
1909	if (node) {
1910		unsigned int tag, offset = get_slot_offset(node, slot);
1911		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1912			node_tag_clear(root, node, tag, offset);
1913	} else {
1914		/* Clear root node tags */
1915		root->gfp_mask &= __GFP_BITS_MASK;
1916	}
1917}
1918
1919/**
1920 *	radix_tree_tagged - test whether any items in the tree are tagged
1921 *	@root:		radix tree root
1922 *	@tag:		tag to test
1923 */
1924int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag)
1925{
1926	return root_tag_get(root, tag);
1927}
1928EXPORT_SYMBOL(radix_tree_tagged);
1929
1930static void
1931radix_tree_node_ctor(void *arg)
 
 
 
 
 
 
1932{
1933	struct radix_tree_node *node = arg;
1934
1935	memset(node, 0, sizeof(*node));
1936	INIT_LIST_HEAD(&node->private_list);
1937}
 
1938
1939static __init unsigned long __maxindex(unsigned int height)
 
 
1940{
1941	unsigned int width = height * RADIX_TREE_MAP_SHIFT;
1942	int shift = RADIX_TREE_INDEX_BITS - width;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1943
1944	if (shift < 0)
1945		return ~0UL;
1946	if (shift >= BITS_PER_LONG)
1947		return 0UL;
1948	return ~0UL >> shift;
 
 
 
 
 
 
 
 
 
 
 
 
 
1949}
 
1950
1951static __init void radix_tree_init_maxnodes(void)
 
1952{
1953	unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1];
1954	unsigned int i, j;
1955
1956	for (i = 0; i < ARRAY_SIZE(height_to_maxindex); i++)
1957		height_to_maxindex[i] = __maxindex(i);
1958	for (i = 0; i < ARRAY_SIZE(height_to_maxnodes); i++) {
1959		for (j = i; j > 0; j--)
1960			height_to_maxnodes[i] += height_to_maxindex[j - 1] + 1;
1961	}
1962}
1963
1964static int radix_tree_cpu_dead(unsigned int cpu)
1965{
1966	struct radix_tree_preload *rtp;
1967	struct radix_tree_node *node;
1968
1969	/* Free per-cpu pool of preloaded nodes */
1970	rtp = &per_cpu(radix_tree_preloads, cpu);
1971	while (rtp->nr) {
1972		node = rtp->nodes;
1973		rtp->nodes = node->private_data;
1974		kmem_cache_free(radix_tree_node_cachep, node);
1975		rtp->nr--;
1976	}
1977	return 0;
1978}
1979
1980void __init radix_tree_init(void)
1981{
1982	int ret;
 
 
 
 
1983	radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1984			sizeof(struct radix_tree_node), 0,
1985			SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1986			radix_tree_node_ctor);
1987	radix_tree_init_maxnodes();
1988	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
1989					NULL, radix_tree_cpu_dead);
1990	WARN_ON(ret < 0);
1991}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Momchil Velikov
   4 * Portions Copyright (C) 2001 Christoph Hellwig
   5 * Copyright (C) 2005 SGI, Christoph Lameter
   6 * Copyright (C) 2006 Nick Piggin
   7 * Copyright (C) 2012 Konstantin Khlebnikov
   8 * Copyright (C) 2016 Intel, Matthew Wilcox
   9 * Copyright (C) 2016 Intel, Ross Zwisler
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12#include <linux/bitmap.h>
  13#include <linux/bitops.h>
  14#include <linux/bug.h>
  15#include <linux/cpu.h>
  16#include <linux/errno.h>
  17#include <linux/export.h>
  18#include <linux/idr.h>
  19#include <linux/init.h>
  20#include <linux/kernel.h>
  21#include <linux/kmemleak.h>
 
  22#include <linux/percpu.h>
  23#include <linux/preempt.h>		/* in_interrupt() */
  24#include <linux/radix-tree.h>
  25#include <linux/rcupdate.h>
  26#include <linux/slab.h>
 
 
  27#include <linux/string.h>
  28#include <linux/xarray.h>
 
 
 
  29
  30#include "radix-tree.h"
 
  31
  32/*
  33 * Radix tree node cache.
  34 */
  35struct kmem_cache *radix_tree_node_cachep;
  36
  37/*
  38 * The radix tree is variable-height, so an insert operation not only has
  39 * to build the branch to its corresponding item, it also has to build the
  40 * branch to existing items if the size has to be increased (by
  41 * radix_tree_extend).
  42 *
  43 * The worst case is a zero height tree with just a single item at index 0,
  44 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
  45 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
  46 * Hence:
  47 */
  48#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
  49
  50/*
  51 * The IDR does not have to be as high as the radix tree since it uses
  52 * signed integers, not unsigned longs.
  53 */
  54#define IDR_INDEX_BITS		(8 /* CHAR_BIT */ * sizeof(int) - 1)
  55#define IDR_MAX_PATH		(DIV_ROUND_UP(IDR_INDEX_BITS, \
  56						RADIX_TREE_MAP_SHIFT))
  57#define IDR_PRELOAD_SIZE	(IDR_MAX_PATH * 2 - 1)
  58
  59/*
  60 * Per-cpu pool of preloaded nodes
  61 */
  62DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = {
  63	.lock = INIT_LOCAL_LOCK(lock),
 
 
  64};
  65EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads);
  66
  67static inline struct radix_tree_node *entry_to_node(void *ptr)
  68{
  69	return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE);
  70}
  71
  72static inline void *node_to_entry(void *ptr)
  73{
  74	return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE);
  75}
  76
  77#define RADIX_TREE_RETRY	XA_RETRY_ENTRY
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  78
  79static inline unsigned long
  80get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot)
  81{
  82	return parent ? slot - parent->slots : 0;
  83}
  84
  85static unsigned int radix_tree_descend(const struct radix_tree_node *parent,
  86			struct radix_tree_node **nodep, unsigned long index)
  87{
  88	unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK;
  89	void __rcu **entry = rcu_dereference_raw(parent->slots[offset]);
 
 
 
 
 
 
 
 
 
 
  90
  91	*nodep = (void *)entry;
  92	return offset;
  93}
  94
  95static inline gfp_t root_gfp_mask(const struct radix_tree_root *root)
  96{
  97	return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK);
  98}
  99
 100static inline void tag_set(struct radix_tree_node *node, unsigned int tag,
 101		int offset)
 102{
 103	__set_bit(offset, node->tags[tag]);
 104}
 105
 106static inline void tag_clear(struct radix_tree_node *node, unsigned int tag,
 107		int offset)
 108{
 109	__clear_bit(offset, node->tags[tag]);
 110}
 111
 112static inline int tag_get(const struct radix_tree_node *node, unsigned int tag,
 113		int offset)
 114{
 115	return test_bit(offset, node->tags[tag]);
 116}
 117
 118static inline void root_tag_set(struct radix_tree_root *root, unsigned tag)
 119{
 120	root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT));
 121}
 122
 123static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag)
 124{
 125	root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT));
 126}
 127
 128static inline void root_tag_clear_all(struct radix_tree_root *root)
 129{
 130	root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1);
 131}
 132
 133static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag)
 134{
 135	return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT));
 136}
 137
 138static inline unsigned root_tags_get(const struct radix_tree_root *root)
 139{
 140	return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT;
 141}
 142
 143static inline bool is_idr(const struct radix_tree_root *root)
 144{
 145	return !!(root->xa_flags & ROOT_IS_IDR);
 146}
 147
 148/*
 149 * Returns 1 if any slot in the node has this tag set.
 150 * Otherwise returns 0.
 151 */
 152static inline int any_tag_set(const struct radix_tree_node *node,
 153							unsigned int tag)
 154{
 155	unsigned idx;
 156	for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) {
 157		if (node->tags[tag][idx])
 158			return 1;
 159	}
 160	return 0;
 161}
 162
 163static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag)
 164{
 165	bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE);
 166}
 167
 168/**
 169 * radix_tree_find_next_bit - find the next set bit in a memory region
 170 *
 171 * @node: where to begin the search
 172 * @tag: the tag index
 173 * @offset: the bitnumber to start searching at
 174 *
 175 * Unrollable variant of find_next_bit() for constant size arrays.
 176 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero.
 177 * Returns next bit offset, or size if nothing found.
 178 */
 179static __always_inline unsigned long
 180radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag,
 181			 unsigned long offset)
 182{
 183	const unsigned long *addr = node->tags[tag];
 184
 185	if (offset < RADIX_TREE_MAP_SIZE) {
 186		unsigned long tmp;
 187
 188		addr += offset / BITS_PER_LONG;
 189		tmp = *addr >> (offset % BITS_PER_LONG);
 190		if (tmp)
 191			return __ffs(tmp) + offset;
 192		offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1);
 193		while (offset < RADIX_TREE_MAP_SIZE) {
 194			tmp = *++addr;
 195			if (tmp)
 196				return __ffs(tmp) + offset;
 197			offset += BITS_PER_LONG;
 198		}
 199	}
 200	return RADIX_TREE_MAP_SIZE;
 201}
 202
 203static unsigned int iter_offset(const struct radix_tree_iter *iter)
 204{
 205	return iter->index & RADIX_TREE_MAP_MASK;
 206}
 207
 208/*
 209 * The maximum index which can be stored in a radix tree
 210 */
 211static inline unsigned long shift_maxindex(unsigned int shift)
 212{
 213	return (RADIX_TREE_MAP_SIZE << shift) - 1;
 214}
 215
 216static inline unsigned long node_maxindex(const struct radix_tree_node *node)
 217{
 218	return shift_maxindex(node->shift);
 219}
 220
 221static unsigned long next_index(unsigned long index,
 222				const struct radix_tree_node *node,
 223				unsigned long offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224{
 225	return (index & ~node_maxindex(node)) + (offset << node->shift);
 
 
 
 
 
 226}
 
 227
 228/*
 229 * This assumes that the caller has performed appropriate preallocation, and
 230 * that the caller has pinned this thread of control to the current CPU.
 231 */
 232static struct radix_tree_node *
 233radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent,
 234			struct radix_tree_root *root,
 235			unsigned int shift, unsigned int offset,
 236			unsigned int count, unsigned int nr_values)
 237{
 238	struct radix_tree_node *ret = NULL;
 
 239
 240	/*
 241	 * Preload code isn't irq safe and it doesn't make sense to use
 242	 * preloading during an interrupt anyway as all the allocations have
 243	 * to be atomic. So just do normal allocation when in interrupt.
 244	 */
 245	if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) {
 246		struct radix_tree_preload *rtp;
 247
 248		/*
 249		 * Even if the caller has preloaded, try to allocate from the
 250		 * cache first for the new node to get accounted to the memory
 251		 * cgroup.
 252		 */
 253		ret = kmem_cache_alloc(radix_tree_node_cachep,
 254				       gfp_mask | __GFP_NOWARN);
 255		if (ret)
 256			goto out;
 257
 258		/*
 259		 * Provided the caller has preloaded here, we will always
 260		 * succeed in getting a node here (and never reach
 261		 * kmem_cache_alloc)
 262		 */
 263		rtp = this_cpu_ptr(&radix_tree_preloads);
 264		if (rtp->nr) {
 265			ret = rtp->nodes;
 266			rtp->nodes = ret->parent;
 
 267			rtp->nr--;
 268		}
 269		/*
 270		 * Update the allocation stack trace as this is more useful
 271		 * for debugging.
 272		 */
 273		kmemleak_update_trace(ret);
 274		goto out;
 275	}
 276	ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 277out:
 278	BUG_ON(radix_tree_is_internal_node(ret));
 279	if (ret) {
 
 280		ret->shift = shift;
 281		ret->offset = offset;
 282		ret->count = count;
 283		ret->nr_values = nr_values;
 284		ret->parent = parent;
 285		ret->array = root;
 286	}
 287	return ret;
 288}
 289
 290void radix_tree_node_rcu_free(struct rcu_head *head)
 291{
 292	struct radix_tree_node *node =
 293			container_of(head, struct radix_tree_node, rcu_head);
 294
 295	/*
 296	 * Must only free zeroed nodes into the slab.  We can be left with
 297	 * non-NULL entries by radix_tree_free_nodes, so clear the entries
 298	 * and tags here.
 299	 */
 300	memset(node->slots, 0, sizeof(node->slots));
 301	memset(node->tags, 0, sizeof(node->tags));
 302	INIT_LIST_HEAD(&node->private_list);
 303
 304	kmem_cache_free(radix_tree_node_cachep, node);
 305}
 306
 307static inline void
 308radix_tree_node_free(struct radix_tree_node *node)
 309{
 310	call_rcu(&node->rcu_head, radix_tree_node_rcu_free);
 311}
 312
 313/*
 314 * Load up this CPU's radix_tree_node buffer with sufficient objects to
 315 * ensure that the addition of a single element in the tree cannot fail.  On
 316 * success, return zero, with preemption disabled.  On error, return -ENOMEM
 317 * with preemption not disabled.
 318 *
 319 * To make use of this facility, the radix tree must be initialised without
 320 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
 321 */
 322static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr)
 323{
 324	struct radix_tree_preload *rtp;
 325	struct radix_tree_node *node;
 326	int ret = -ENOMEM;
 327
 328	/*
 329	 * Nodes preloaded by one cgroup can be used by another cgroup, so
 330	 * they should never be accounted to any particular memory cgroup.
 331	 */
 332	gfp_mask &= ~__GFP_ACCOUNT;
 333
 334	local_lock(&radix_tree_preloads.lock);
 335	rtp = this_cpu_ptr(&radix_tree_preloads);
 336	while (rtp->nr < nr) {
 337		local_unlock(&radix_tree_preloads.lock);
 338		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 339		if (node == NULL)
 340			goto out;
 341		local_lock(&radix_tree_preloads.lock);
 342		rtp = this_cpu_ptr(&radix_tree_preloads);
 343		if (rtp->nr < nr) {
 344			node->parent = rtp->nodes;
 345			rtp->nodes = node;
 346			rtp->nr++;
 347		} else {
 348			kmem_cache_free(radix_tree_node_cachep, node);
 349		}
 350	}
 351	ret = 0;
 352out:
 353	return ret;
 354}
 355
 356/*
 357 * Load up this CPU's radix_tree_node buffer with sufficient objects to
 358 * ensure that the addition of a single element in the tree cannot fail.  On
 359 * success, return zero, with preemption disabled.  On error, return -ENOMEM
 360 * with preemption not disabled.
 361 *
 362 * To make use of this facility, the radix tree must be initialised without
 363 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE().
 364 */
 365int radix_tree_preload(gfp_t gfp_mask)
 366{
 367	/* Warn on non-sensical use... */
 368	WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask));
 369	return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
 370}
 371EXPORT_SYMBOL(radix_tree_preload);
 372
 373/*
 374 * The same as above function, except we don't guarantee preloading happens.
 375 * We do it, if we decide it helps. On success, return zero with preemption
 376 * disabled. On error, return -ENOMEM with preemption not disabled.
 377 */
 378int radix_tree_maybe_preload(gfp_t gfp_mask)
 379{
 380	if (gfpflags_allow_blocking(gfp_mask))
 381		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
 382	/* Preloading doesn't help anything with this gfp mask, skip it */
 383	local_lock(&radix_tree_preloads.lock);
 384	return 0;
 385}
 386EXPORT_SYMBOL(radix_tree_maybe_preload);
 387
 388static unsigned radix_tree_load_root(const struct radix_tree_root *root,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389		struct radix_tree_node **nodep, unsigned long *maxindex)
 390{
 391	struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
 392
 393	*nodep = node;
 394
 395	if (likely(radix_tree_is_internal_node(node))) {
 396		node = entry_to_node(node);
 397		*maxindex = node_maxindex(node);
 398		return node->shift + RADIX_TREE_MAP_SHIFT;
 399	}
 400
 401	*maxindex = 0;
 402	return 0;
 403}
 404
 405/*
 406 *	Extend a radix tree so it can store key @index.
 407 */
 408static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp,
 409				unsigned long index, unsigned int shift)
 410{
 411	void *entry;
 412	unsigned int maxshift;
 413	int tag;
 414
 415	/* Figure out what the shift should be.  */
 416	maxshift = shift;
 417	while (index > shift_maxindex(maxshift))
 418		maxshift += RADIX_TREE_MAP_SHIFT;
 419
 420	entry = rcu_dereference_raw(root->xa_head);
 421	if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE)))
 422		goto out;
 423
 424	do {
 425		struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL,
 426							root, shift, 0, 1, 0);
 427		if (!node)
 428			return -ENOMEM;
 429
 430		if (is_idr(root)) {
 431			all_tag_set(node, IDR_FREE);
 432			if (!root_tag_get(root, IDR_FREE)) {
 433				tag_clear(node, IDR_FREE, 0);
 434				root_tag_set(root, IDR_FREE);
 435			}
 436		} else {
 437			/* Propagate the aggregated tag info to the new child */
 438			for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
 439				if (root_tag_get(root, tag))
 440					tag_set(node, tag, 0);
 441			}
 442		}
 443
 444		BUG_ON(shift > BITS_PER_LONG);
 445		if (radix_tree_is_internal_node(entry)) {
 446			entry_to_node(entry)->parent = node;
 447		} else if (xa_is_value(entry)) {
 448			/* Moving a value entry root->xa_head to a node */
 449			node->nr_values = 1;
 450		}
 451		/*
 452		 * entry was already in the radix tree, so we do not need
 453		 * rcu_assign_pointer here
 454		 */
 455		node->slots[0] = (void __rcu *)entry;
 456		entry = node_to_entry(node);
 457		rcu_assign_pointer(root->xa_head, entry);
 458		shift += RADIX_TREE_MAP_SHIFT;
 459	} while (shift <= maxshift);
 460out:
 461	return maxshift + RADIX_TREE_MAP_SHIFT;
 462}
 463
 464/**
 465 *	radix_tree_shrink    -    shrink radix tree to minimum height
 466 *	@root:		radix tree root
 467 */
 468static inline bool radix_tree_shrink(struct radix_tree_root *root)
 
 
 469{
 470	bool shrunk = false;
 471
 472	for (;;) {
 473		struct radix_tree_node *node = rcu_dereference_raw(root->xa_head);
 474		struct radix_tree_node *child;
 475
 476		if (!radix_tree_is_internal_node(node))
 477			break;
 478		node = entry_to_node(node);
 479
 480		/*
 481		 * The candidate node has more than one child, or its child
 482		 * is not at the leftmost slot, we cannot shrink.
 
 483		 */
 484		if (node->count != 1)
 485			break;
 486		child = rcu_dereference_raw(node->slots[0]);
 487		if (!child)
 488			break;
 489
 490		/*
 491		 * For an IDR, we must not shrink entry 0 into the root in
 492		 * case somebody calls idr_replace() with a pointer that
 493		 * appears to be an internal entry
 494		 */
 495		if (!node->shift && is_idr(root))
 496			break;
 497
 498		if (radix_tree_is_internal_node(child))
 499			entry_to_node(child)->parent = NULL;
 500
 501		/*
 502		 * We don't need rcu_assign_pointer(), since we are simply
 503		 * moving the node from one part of the tree to another: if it
 504		 * was safe to dereference the old pointer to it
 505		 * (node->slots[0]), it will be safe to dereference the new
 506		 * one (root->xa_head) as far as dependent read barriers go.
 507		 */
 508		root->xa_head = (void __rcu *)child;
 509		if (is_idr(root) && !tag_get(node, IDR_FREE, 0))
 510			root_tag_clear(root, IDR_FREE);
 511
 512		/*
 513		 * We have a dilemma here. The node's slot[0] must not be
 514		 * NULLed in case there are concurrent lookups expecting to
 515		 * find the item. However if this was a bottom-level node,
 516		 * then it may be subject to the slot pointer being visible
 517		 * to callers dereferencing it. If item corresponding to
 518		 * slot[0] is subsequently deleted, these callers would expect
 519		 * their slot to become empty sooner or later.
 520		 *
 521		 * For example, lockless pagecache will look up a slot, deref
 522		 * the page pointer, and if the page has 0 refcount it means it
 523		 * was concurrently deleted from pagecache so try the deref
 524		 * again. Fortunately there is already a requirement for logic
 525		 * to retry the entire slot lookup -- the indirect pointer
 526		 * problem (replacing direct root node with an indirect pointer
 527		 * also results in a stale slot). So tag the slot as indirect
 528		 * to force callers to retry.
 529		 */
 530		node->count = 0;
 531		if (!radix_tree_is_internal_node(child)) {
 532			node->slots[0] = (void __rcu *)RADIX_TREE_RETRY;
 
 
 533		}
 534
 535		WARN_ON_ONCE(!list_empty(&node->private_list));
 536		radix_tree_node_free(node);
 537		shrunk = true;
 538	}
 539
 540	return shrunk;
 541}
 542
 543static bool delete_node(struct radix_tree_root *root,
 544			struct radix_tree_node *node)
 
 545{
 546	bool deleted = false;
 547
 548	do {
 549		struct radix_tree_node *parent;
 550
 551		if (node->count) {
 552			if (node_to_entry(node) ==
 553					rcu_dereference_raw(root->xa_head))
 554				deleted |= radix_tree_shrink(root);
 555			return deleted;
 556		}
 557
 558		parent = node->parent;
 559		if (parent) {
 560			parent->slots[node->offset] = NULL;
 561			parent->count--;
 562		} else {
 563			/*
 564			 * Shouldn't the tags already have all been cleared
 565			 * by the caller?
 566			 */
 567			if (!is_idr(root))
 568				root_tag_clear_all(root);
 569			root->xa_head = NULL;
 570		}
 571
 572		WARN_ON_ONCE(!list_empty(&node->private_list));
 573		radix_tree_node_free(node);
 574		deleted = true;
 575
 576		node = parent;
 577	} while (node);
 578
 579	return deleted;
 580}
 581
 582/**
 583 *	__radix_tree_create	-	create a slot in a radix tree
 584 *	@root:		radix tree root
 585 *	@index:		index key
 
 586 *	@nodep:		returns node
 587 *	@slotp:		returns slot
 588 *
 589 *	Create, if necessary, and return the node and slot for an item
 590 *	at position @index in the radix tree @root.
 591 *
 592 *	Until there is more than one item in the tree, no nodes are
 593 *	allocated and @root->xa_head is used as a direct slot instead of
 594 *	pointing to a node, in which case *@nodep will be NULL.
 595 *
 596 *	Returns -ENOMEM, or 0 for success.
 597 */
 598static int __radix_tree_create(struct radix_tree_root *root,
 599		unsigned long index, struct radix_tree_node **nodep,
 600		void __rcu ***slotp)
 601{
 602	struct radix_tree_node *node = NULL, *child;
 603	void __rcu **slot = (void __rcu **)&root->xa_head;
 604	unsigned long maxindex;
 605	unsigned int shift, offset = 0;
 606	unsigned long max = index;
 607	gfp_t gfp = root_gfp_mask(root);
 608
 609	shift = radix_tree_load_root(root, &child, &maxindex);
 610
 611	/* Make sure the tree is high enough.  */
 
 
 612	if (max > maxindex) {
 613		int error = radix_tree_extend(root, gfp, max, shift);
 614		if (error < 0)
 615			return error;
 616		shift = error;
 617		child = rcu_dereference_raw(root->xa_head);
 618	}
 619
 620	while (shift > 0) {
 621		shift -= RADIX_TREE_MAP_SHIFT;
 622		if (child == NULL) {
 623			/* Have to add a child node.  */
 624			child = radix_tree_node_alloc(gfp, node, root, shift,
 625							offset, 0, 0);
 626			if (!child)
 627				return -ENOMEM;
 628			rcu_assign_pointer(*slot, node_to_entry(child));
 629			if (node)
 630				node->count++;
 631		} else if (!radix_tree_is_internal_node(child))
 632			break;
 633
 634		/* Go a level down */
 635		node = entry_to_node(child);
 636		offset = radix_tree_descend(node, &child, index);
 637		slot = &node->slots[offset];
 638	}
 639
 640	if (nodep)
 641		*nodep = node;
 642	if (slotp)
 643		*slotp = slot;
 644	return 0;
 645}
 646
 
 647/*
 648 * Free any nodes below this node.  The tree is presumed to not need
 649 * shrinking, and any user data in the tree is presumed to not need a
 650 * destructor called on it.  If we need to add a destructor, we can
 651 * add that functionality later.  Note that we may not clear tags or
 652 * slots from the tree as an RCU walker may still have a pointer into
 653 * this subtree.  We could replace the entries with RADIX_TREE_RETRY,
 654 * but we'll still have to clear those in rcu_free.
 655 */
 656static void radix_tree_free_nodes(struct radix_tree_node *node)
 657{
 658	unsigned offset = 0;
 659	struct radix_tree_node *child = entry_to_node(node);
 660
 661	for (;;) {
 662		void *entry = rcu_dereference_raw(child->slots[offset]);
 663		if (xa_is_node(entry) && child->shift) {
 
 664			child = entry_to_node(entry);
 665			offset = 0;
 666			continue;
 667		}
 668		offset++;
 669		while (offset == RADIX_TREE_MAP_SIZE) {
 670			struct radix_tree_node *old = child;
 671			offset = child->offset + 1;
 672			child = child->parent;
 673			WARN_ON_ONCE(!list_empty(&old->private_list));
 674			radix_tree_node_free(old);
 675			if (old == entry_to_node(node))
 676				return;
 677		}
 678	}
 679}
 680
 681static inline int insert_entries(struct radix_tree_node *node,
 682		void __rcu **slot, void *item)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 683{
 684	if (*slot)
 685		return -EEXIST;
 686	rcu_assign_pointer(*slot, item);
 687	if (node) {
 688		node->count++;
 689		if (xa_is_value(item))
 690			node->nr_values++;
 691	}
 692	return 1;
 693}
 
 694
 695/**
 696 *	radix_tree_insert    -    insert into a radix tree
 697 *	@root:		radix tree root
 698 *	@index:		index key
 
 699 *	@item:		item to insert
 700 *
 701 *	Insert an item into the radix tree at position @index.
 702 */
 703int radix_tree_insert(struct radix_tree_root *root, unsigned long index,
 704			void *item)
 705{
 706	struct radix_tree_node *node;
 707	void __rcu **slot;
 708	int error;
 709
 710	BUG_ON(radix_tree_is_internal_node(item));
 711
 712	error = __radix_tree_create(root, index, &node, &slot);
 713	if (error)
 714		return error;
 715
 716	error = insert_entries(node, slot, item);
 717	if (error < 0)
 718		return error;
 719
 720	if (node) {
 721		unsigned offset = get_slot_offset(node, slot);
 722		BUG_ON(tag_get(node, 0, offset));
 723		BUG_ON(tag_get(node, 1, offset));
 724		BUG_ON(tag_get(node, 2, offset));
 725	} else {
 726		BUG_ON(root_tags_get(root));
 727	}
 728
 729	return 0;
 730}
 731EXPORT_SYMBOL(radix_tree_insert);
 732
 733/**
 734 *	__radix_tree_lookup	-	lookup an item in a radix tree
 735 *	@root:		radix tree root
 736 *	@index:		index key
 737 *	@nodep:		returns node
 738 *	@slotp:		returns slot
 739 *
 740 *	Lookup and return the item at position @index in the radix
 741 *	tree @root.
 742 *
 743 *	Until there is more than one item in the tree, no nodes are
 744 *	allocated and @root->xa_head is used as a direct slot instead of
 745 *	pointing to a node, in which case *@nodep will be NULL.
 746 */
 747void *__radix_tree_lookup(const struct radix_tree_root *root,
 748			  unsigned long index, struct radix_tree_node **nodep,
 749			  void __rcu ***slotp)
 750{
 751	struct radix_tree_node *node, *parent;
 752	unsigned long maxindex;
 753	void __rcu **slot;
 754
 755 restart:
 756	parent = NULL;
 757	slot = (void __rcu **)&root->xa_head;
 758	radix_tree_load_root(root, &node, &maxindex);
 759	if (index > maxindex)
 760		return NULL;
 761
 762	while (radix_tree_is_internal_node(node)) {
 763		unsigned offset;
 764
 
 
 765		parent = entry_to_node(node);
 766		offset = radix_tree_descend(parent, &node, index);
 767		slot = parent->slots + offset;
 768		if (node == RADIX_TREE_RETRY)
 769			goto restart;
 770		if (parent->shift == 0)
 771			break;
 772	}
 773
 774	if (nodep)
 775		*nodep = parent;
 776	if (slotp)
 777		*slotp = slot;
 778	return node;
 779}
 780
 781/**
 782 *	radix_tree_lookup_slot    -    lookup a slot in a radix tree
 783 *	@root:		radix tree root
 784 *	@index:		index key
 785 *
 786 *	Returns:  the slot corresponding to the position @index in the
 787 *	radix tree @root. This is useful for update-if-exists operations.
 788 *
 789 *	This function can be called under rcu_read_lock iff the slot is not
 790 *	modified by radix_tree_replace_slot, otherwise it must be called
 791 *	exclusive from other writers. Any dereference of the slot must be done
 792 *	using radix_tree_deref_slot.
 793 */
 794void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root,
 795				unsigned long index)
 796{
 797	void __rcu **slot;
 798
 799	if (!__radix_tree_lookup(root, index, NULL, &slot))
 800		return NULL;
 801	return slot;
 802}
 803EXPORT_SYMBOL(radix_tree_lookup_slot);
 804
 805/**
 806 *	radix_tree_lookup    -    perform lookup operation on a radix tree
 807 *	@root:		radix tree root
 808 *	@index:		index key
 809 *
 810 *	Lookup the item at the position @index in the radix tree @root.
 811 *
 812 *	This function can be called under rcu_read_lock, however the caller
 813 *	must manage lifetimes of leaf nodes (eg. RCU may also be used to free
 814 *	them safely). No RCU barriers are required to access or modify the
 815 *	returned item, however.
 816 */
 817void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index)
 818{
 819	return __radix_tree_lookup(root, index, NULL, NULL);
 820}
 821EXPORT_SYMBOL(radix_tree_lookup);
 822
 823static void replace_slot(void __rcu **slot, void *item,
 824		struct radix_tree_node *node, int count, int values)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 825{
 826	if (node && (count || values)) {
 
 
 
 
 
 
 
 
 
 
 
 827		node->count += count;
 828		node->nr_values += values;
 
 
 
 829	}
 830
 831	rcu_assign_pointer(*slot, item);
 832}
 833
 834static bool node_tag_get(const struct radix_tree_root *root,
 835				const struct radix_tree_node *node,
 836				unsigned int tag, unsigned int offset)
 837{
 838	if (node)
 839		return tag_get(node, tag, offset);
 840	return root_tag_get(root, tag);
 841}
 
 842
 843/*
 844 * IDR users want to be able to store NULL in the tree, so if the slot isn't
 845 * free, don't adjust the count, even if it's transitioning between NULL and
 846 * non-NULL.  For the IDA, we mark slots as being IDR_FREE while they still
 847 * have empty bits, but it only stores NULL in slots when they're being
 848 * deleted.
 849 */
 850static int calculate_count(struct radix_tree_root *root,
 851				struct radix_tree_node *node, void __rcu **slot,
 852				void *item, void *old)
 853{
 854	if (is_idr(root)) {
 855		unsigned offset = get_slot_offset(node, slot);
 856		bool free = node_tag_get(root, node, IDR_FREE, offset);
 857		if (!free)
 858			return 0;
 859		if (!old)
 860			return 1;
 861	}
 862	return !!item - !!old;
 863}
 864
 865/**
 866 * __radix_tree_replace		- replace item in a slot
 867 * @root:		radix tree root
 868 * @node:		pointer to tree node
 869 * @slot:		pointer to slot in @node
 870 * @item:		new item to store in the slot.
 
 
 871 *
 872 * For use with __radix_tree_lookup().  Caller must hold tree write locked
 873 * across slot lookup and replacement.
 874 */
 875void __radix_tree_replace(struct radix_tree_root *root,
 876			  struct radix_tree_node *node,
 877			  void __rcu **slot, void *item)
 
 878{
 879	void *old = rcu_dereference_raw(*slot);
 880	int values = !!xa_is_value(item) - !!xa_is_value(old);
 881	int count = calculate_count(root, node, slot, item, old);
 882
 883	/*
 884	 * This function supports replacing value entries and
 885	 * deleting entries, but that needs accounting against the
 886	 * node unless the slot is root->xa_head.
 887	 */
 888	WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) &&
 889			(count || values));
 890	replace_slot(slot, item, node, count, values);
 891
 892	if (!node)
 893		return;
 894
 895	delete_node(root, node);
 
 
 
 896}
 897
 898/**
 899 * radix_tree_replace_slot	- replace item in a slot
 900 * @root:	radix tree root
 901 * @slot:	pointer to slot
 902 * @item:	new item to store in the slot.
 903 *
 904 * For use with radix_tree_lookup_slot() and
 905 * radix_tree_gang_lookup_tag_slot().  Caller must hold tree write locked
 906 * across slot lookup and replacement.
 907 *
 908 * NOTE: This cannot be used to switch between non-entries (empty slots),
 909 * regular entries, and value entries, as that requires accounting
 910 * inside the radix tree node. When switching from one type of entry or
 911 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or
 912 * radix_tree_iter_replace().
 913 */
 914void radix_tree_replace_slot(struct radix_tree_root *root,
 915			     void __rcu **slot, void *item)
 916{
 917	__radix_tree_replace(root, NULL, slot, item);
 918}
 919EXPORT_SYMBOL(radix_tree_replace_slot);
 920
 921/**
 922 * radix_tree_iter_replace - replace item in a slot
 923 * @root:	radix tree root
 924 * @iter:	iterator state
 925 * @slot:	pointer to slot
 926 * @item:	new item to store in the slot.
 927 *
 928 * For use with radix_tree_for_each_slot().
 929 * Caller must hold tree write locked.
 930 */
 931void radix_tree_iter_replace(struct radix_tree_root *root,
 932				const struct radix_tree_iter *iter,
 933				void __rcu **slot, void *item)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 934{
 935	__radix_tree_replace(root, iter->node, slot, item);
 
 
 
 
 
 
 
 
 
 
 
 
 936}
 937
 938static void node_tag_set(struct radix_tree_root *root,
 939				struct radix_tree_node *node,
 940				unsigned int tag, unsigned int offset)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 941{
 942	while (node) {
 943		if (tag_get(node, tag, offset))
 944			return;
 945		tag_set(node, tag, offset);
 946		offset = node->offset;
 947		node = node->parent;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 948	}
 949
 950	if (!root_tag_get(root, tag))
 951		root_tag_set(root, tag);
 
 
 
 952}
 
 953
 954/**
 955 *	radix_tree_tag_set - set a tag on a radix tree node
 956 *	@root:		radix tree root
 957 *	@index:		index key
 958 *	@tag:		tag index
 959 *
 960 *	Set the search tag (which must be < RADIX_TREE_MAX_TAGS)
 961 *	corresponding to @index in the radix tree.  From
 962 *	the root all the way down to the leaf node.
 963 *
 964 *	Returns the address of the tagged item.  Setting a tag on a not-present
 965 *	item is a bug.
 966 */
 967void *radix_tree_tag_set(struct radix_tree_root *root,
 968			unsigned long index, unsigned int tag)
 969{
 970	struct radix_tree_node *node, *parent;
 971	unsigned long maxindex;
 972
 973	radix_tree_load_root(root, &node, &maxindex);
 974	BUG_ON(index > maxindex);
 975
 976	while (radix_tree_is_internal_node(node)) {
 977		unsigned offset;
 978
 979		parent = entry_to_node(node);
 980		offset = radix_tree_descend(parent, &node, index);
 981		BUG_ON(!node);
 982
 983		if (!tag_get(parent, tag, offset))
 984			tag_set(parent, tag, offset);
 985	}
 986
 987	/* set the root's tag bit */
 988	if (!root_tag_get(root, tag))
 989		root_tag_set(root, tag);
 990
 991	return node;
 992}
 993EXPORT_SYMBOL(radix_tree_tag_set);
 994
 995static void node_tag_clear(struct radix_tree_root *root,
 996				struct radix_tree_node *node,
 997				unsigned int tag, unsigned int offset)
 998{
 999	while (node) {
1000		if (!tag_get(node, tag, offset))
1001			return;
1002		tag_clear(node, tag, offset);
1003		if (any_tag_set(node, tag))
1004			return;
1005
1006		offset = node->offset;
1007		node = node->parent;
1008	}
1009
1010	/* clear the root's tag bit */
1011	if (root_tag_get(root, tag))
1012		root_tag_clear(root, tag);
1013}
1014
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015/**
1016 *	radix_tree_tag_clear - clear a tag on a radix tree node
1017 *	@root:		radix tree root
1018 *	@index:		index key
1019 *	@tag:		tag index
1020 *
1021 *	Clear the search tag (which must be < RADIX_TREE_MAX_TAGS)
1022 *	corresponding to @index in the radix tree.  If this causes
1023 *	the leaf node to have no tags set then clear the tag in the
1024 *	next-to-leaf node, etc.
1025 *
1026 *	Returns the address of the tagged item on success, else NULL.  ie:
1027 *	has the same return value and semantics as radix_tree_lookup().
1028 */
1029void *radix_tree_tag_clear(struct radix_tree_root *root,
1030			unsigned long index, unsigned int tag)
1031{
1032	struct radix_tree_node *node, *parent;
1033	unsigned long maxindex;
1034	int offset = 0;
1035
1036	radix_tree_load_root(root, &node, &maxindex);
1037	if (index > maxindex)
1038		return NULL;
1039
1040	parent = NULL;
1041
1042	while (radix_tree_is_internal_node(node)) {
1043		parent = entry_to_node(node);
1044		offset = radix_tree_descend(parent, &node, index);
1045	}
1046
1047	if (node)
1048		node_tag_clear(root, parent, tag, offset);
1049
1050	return node;
1051}
1052EXPORT_SYMBOL(radix_tree_tag_clear);
1053
1054/**
1055  * radix_tree_iter_tag_clear - clear a tag on the current iterator entry
1056  * @root: radix tree root
1057  * @iter: iterator state
1058  * @tag: tag to clear
1059  */
1060void radix_tree_iter_tag_clear(struct radix_tree_root *root,
1061			const struct radix_tree_iter *iter, unsigned int tag)
1062{
1063	node_tag_clear(root, iter->node, tag, iter_offset(iter));
1064}
1065
1066/**
1067 * radix_tree_tag_get - get a tag on a radix tree node
1068 * @root:		radix tree root
1069 * @index:		index key
1070 * @tag:		tag index (< RADIX_TREE_MAX_TAGS)
1071 *
1072 * Return values:
1073 *
1074 *  0: tag not present or not set
1075 *  1: tag set
1076 *
1077 * Note that the return value of this function may not be relied on, even if
1078 * the RCU lock is held, unless tag modification and node deletion are excluded
1079 * from concurrency.
1080 */
1081int radix_tree_tag_get(const struct radix_tree_root *root,
1082			unsigned long index, unsigned int tag)
1083{
1084	struct radix_tree_node *node, *parent;
1085	unsigned long maxindex;
1086
1087	if (!root_tag_get(root, tag))
1088		return 0;
1089
1090	radix_tree_load_root(root, &node, &maxindex);
1091	if (index > maxindex)
1092		return 0;
 
 
1093
1094	while (radix_tree_is_internal_node(node)) {
1095		unsigned offset;
1096
1097		parent = entry_to_node(node);
1098		offset = radix_tree_descend(parent, &node, index);
1099
 
 
1100		if (!tag_get(parent, tag, offset))
1101			return 0;
1102		if (node == RADIX_TREE_RETRY)
1103			break;
1104	}
1105
1106	return 1;
1107}
1108EXPORT_SYMBOL(radix_tree_tag_get);
1109
 
 
 
 
 
 
 
 
1110/* Construct iter->tags bit-mask from node->tags[tag] array */
1111static void set_iter_tags(struct radix_tree_iter *iter,
1112				struct radix_tree_node *node, unsigned offset,
1113				unsigned tag)
1114{
1115	unsigned tag_long = offset / BITS_PER_LONG;
1116	unsigned tag_bit  = offset % BITS_PER_LONG;
1117
1118	if (!node) {
1119		iter->tags = 1;
1120		return;
1121	}
1122
1123	iter->tags = node->tags[tag][tag_long] >> tag_bit;
1124
1125	/* This never happens if RADIX_TREE_TAG_LONGS == 1 */
1126	if (tag_long < RADIX_TREE_TAG_LONGS - 1) {
1127		/* Pick tags from next element */
1128		if (tag_bit)
1129			iter->tags |= node->tags[tag][tag_long + 1] <<
1130						(BITS_PER_LONG - tag_bit);
1131		/* Clip chunk size, here only BITS_PER_LONG tags */
1132		iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG);
1133	}
1134}
1135
1136void __rcu **radix_tree_iter_resume(void __rcu **slot,
1137					struct radix_tree_iter *iter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1138{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139	iter->index = __radix_tree_iter_add(iter, 1);
 
 
1140	iter->next_index = iter->index;
1141	iter->tags = 0;
1142	return NULL;
1143}
1144EXPORT_SYMBOL(radix_tree_iter_resume);
1145
1146/**
1147 * radix_tree_next_chunk - find next chunk of slots for iteration
1148 *
1149 * @root:	radix tree root
1150 * @iter:	iterator state
1151 * @flags:	RADIX_TREE_ITER_* flags and tag index
1152 * Returns:	pointer to chunk first slot, or NULL if iteration is over
1153 */
1154void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root,
1155			     struct radix_tree_iter *iter, unsigned flags)
1156{
1157	unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK;
1158	struct radix_tree_node *node, *child;
1159	unsigned long index, offset, maxindex;
1160
1161	if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag))
1162		return NULL;
1163
1164	/*
1165	 * Catch next_index overflow after ~0UL. iter->index never overflows
1166	 * during iterating; it can be zero only at the beginning.
1167	 * And we cannot overflow iter->next_index in a single step,
1168	 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
1169	 *
1170	 * This condition also used by radix_tree_next_slot() to stop
1171	 * contiguous iterating, and forbid switching to the next chunk.
1172	 */
1173	index = iter->next_index;
1174	if (!index && iter->index)
1175		return NULL;
1176
1177 restart:
1178	radix_tree_load_root(root, &child, &maxindex);
1179	if (index > maxindex)
1180		return NULL;
1181	if (!child)
1182		return NULL;
1183
1184	if (!radix_tree_is_internal_node(child)) {
1185		/* Single-slot tree */
1186		iter->index = index;
1187		iter->next_index = maxindex + 1;
1188		iter->tags = 1;
1189		iter->node = NULL;
1190		return (void __rcu **)&root->xa_head;
 
1191	}
1192
1193	do {
1194		node = entry_to_node(child);
1195		offset = radix_tree_descend(node, &child, index);
1196
1197		if ((flags & RADIX_TREE_ITER_TAGGED) ?
1198				!tag_get(node, tag, offset) : !child) {
1199			/* Hole detected */
1200			if (flags & RADIX_TREE_ITER_CONTIG)
1201				return NULL;
1202
1203			if (flags & RADIX_TREE_ITER_TAGGED)
1204				offset = radix_tree_find_next_bit(node, tag,
1205						offset + 1);
1206			else
1207				while (++offset	< RADIX_TREE_MAP_SIZE) {
1208					void *slot = rcu_dereference_raw(
1209							node->slots[offset]);
 
1210					if (slot)
1211						break;
1212				}
1213			index &= ~node_maxindex(node);
1214			index += offset << node->shift;
1215			/* Overflow after ~0UL */
1216			if (!index)
1217				return NULL;
1218			if (offset == RADIX_TREE_MAP_SIZE)
1219				goto restart;
1220			child = rcu_dereference_raw(node->slots[offset]);
1221		}
1222
1223		if (!child)
1224			goto restart;
1225		if (child == RADIX_TREE_RETRY)
1226			break;
1227	} while (node->shift && radix_tree_is_internal_node(child));
1228
1229	/* Update the iterator state */
1230	iter->index = (index &~ node_maxindex(node)) | offset;
1231	iter->next_index = (index | node_maxindex(node)) + 1;
1232	iter->node = node;
 
1233
1234	if (flags & RADIX_TREE_ITER_TAGGED)
1235		set_iter_tags(iter, node, offset, tag);
1236
1237	return node->slots + offset;
1238}
1239EXPORT_SYMBOL(radix_tree_next_chunk);
1240
1241/**
1242 *	radix_tree_gang_lookup - perform multiple lookup on a radix tree
1243 *	@root:		radix tree root
1244 *	@results:	where the results of the lookup are placed
1245 *	@first_index:	start the lookup from this key
1246 *	@max_items:	place up to this many items at *results
1247 *
1248 *	Performs an index-ascending scan of the tree for present items.  Places
1249 *	them at *@results and returns the number of items which were placed at
1250 *	*@results.
1251 *
1252 *	The implementation is naive.
1253 *
1254 *	Like radix_tree_lookup, radix_tree_gang_lookup may be called under
1255 *	rcu_read_lock. In this case, rather than the returned results being
1256 *	an atomic snapshot of the tree at a single point in time, the
1257 *	semantics of an RCU protected gang lookup are as though multiple
1258 *	radix_tree_lookups have been issued in individual locks, and results
1259 *	stored in 'results'.
1260 */
1261unsigned int
1262radix_tree_gang_lookup(const struct radix_tree_root *root, void **results,
1263			unsigned long first_index, unsigned int max_items)
1264{
1265	struct radix_tree_iter iter;
1266	void __rcu **slot;
1267	unsigned int ret = 0;
1268
1269	if (unlikely(!max_items))
1270		return 0;
1271
1272	radix_tree_for_each_slot(slot, root, &iter, first_index) {
1273		results[ret] = rcu_dereference_raw(*slot);
1274		if (!results[ret])
1275			continue;
1276		if (radix_tree_is_internal_node(results[ret])) {
1277			slot = radix_tree_iter_retry(&iter);
1278			continue;
1279		}
1280		if (++ret == max_items)
1281			break;
1282	}
1283
1284	return ret;
1285}
1286EXPORT_SYMBOL(radix_tree_gang_lookup);
1287
1288/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289 *	radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree
1290 *	                             based on a tag
1291 *	@root:		radix tree root
1292 *	@results:	where the results of the lookup are placed
1293 *	@first_index:	start the lookup from this key
1294 *	@max_items:	place up to this many items at *results
1295 *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1296 *
1297 *	Performs an index-ascending scan of the tree for present items which
1298 *	have the tag indexed by @tag set.  Places the items at *@results and
1299 *	returns the number of items which were placed at *@results.
1300 */
1301unsigned int
1302radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results,
1303		unsigned long first_index, unsigned int max_items,
1304		unsigned int tag)
1305{
1306	struct radix_tree_iter iter;
1307	void __rcu **slot;
1308	unsigned int ret = 0;
1309
1310	if (unlikely(!max_items))
1311		return 0;
1312
1313	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1314		results[ret] = rcu_dereference_raw(*slot);
1315		if (!results[ret])
1316			continue;
1317		if (radix_tree_is_internal_node(results[ret])) {
1318			slot = radix_tree_iter_retry(&iter);
1319			continue;
1320		}
1321		if (++ret == max_items)
1322			break;
1323	}
1324
1325	return ret;
1326}
1327EXPORT_SYMBOL(radix_tree_gang_lookup_tag);
1328
1329/**
1330 *	radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a
1331 *					  radix tree based on a tag
1332 *	@root:		radix tree root
1333 *	@results:	where the results of the lookup are placed
1334 *	@first_index:	start the lookup from this key
1335 *	@max_items:	place up to this many items at *results
1336 *	@tag:		the tag index (< RADIX_TREE_MAX_TAGS)
1337 *
1338 *	Performs an index-ascending scan of the tree for present items which
1339 *	have the tag indexed by @tag set.  Places the slots at *@results and
1340 *	returns the number of slots which were placed at *@results.
1341 */
1342unsigned int
1343radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root,
1344		void __rcu ***results, unsigned long first_index,
1345		unsigned int max_items, unsigned int tag)
1346{
1347	struct radix_tree_iter iter;
1348	void __rcu **slot;
1349	unsigned int ret = 0;
1350
1351	if (unlikely(!max_items))
1352		return 0;
1353
1354	radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) {
1355		results[ret] = slot;
1356		if (++ret == max_items)
1357			break;
1358	}
1359
1360	return ret;
1361}
1362EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot);
1363
1364static bool __radix_tree_delete(struct radix_tree_root *root,
1365				struct radix_tree_node *node, void __rcu **slot)
1366{
1367	void *old = rcu_dereference_raw(*slot);
1368	int values = xa_is_value(old) ? -1 : 0;
1369	unsigned offset = get_slot_offset(node, slot);
1370	int tag;
1371
1372	if (is_idr(root))
1373		node_tag_set(root, node, IDR_FREE, offset);
1374	else
1375		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++)
1376			node_tag_clear(root, node, tag, offset);
1377
1378	replace_slot(slot, NULL, node, -1, values);
1379	return node && delete_node(root, node);
1380}
1381
1382/**
1383 * radix_tree_iter_delete - delete the entry at this iterator position
1384 * @root: radix tree root
1385 * @iter: iterator state
1386 * @slot: pointer to slot
 
1387 *
1388 * Delete the entry at the position currently pointed to by the iterator.
1389 * This may result in the current node being freed; if it is, the iterator
1390 * is advanced so that it will not reference the freed memory.  This
1391 * function may be called without any locking if there are no other threads
1392 * which can access this tree.
1393 */
1394void radix_tree_iter_delete(struct radix_tree_root *root,
1395				struct radix_tree_iter *iter, void __rcu **slot)
 
 
1396{
1397	if (__radix_tree_delete(root, iter->node, slot))
1398		iter->index = iter->next_index;
1399}
1400EXPORT_SYMBOL(radix_tree_iter_delete);
1401
1402/**
1403 * radix_tree_delete_item - delete an item from a radix tree
1404 * @root: radix tree root
1405 * @index: index key
1406 * @item: expected item
1407 *
1408 * Remove @item at @index from the radix tree rooted at @root.
1409 *
1410 * Return: the deleted entry, or %NULL if it was not present
1411 * or the entry at the given @index was not @item.
1412 */
1413void *radix_tree_delete_item(struct radix_tree_root *root,
1414			     unsigned long index, void *item)
1415{
1416	struct radix_tree_node *node = NULL;
1417	void __rcu **slot = NULL;
 
1418	void *entry;
 
1419
1420	entry = __radix_tree_lookup(root, index, &node, &slot);
1421	if (!slot)
1422		return NULL;
1423	if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE,
1424						get_slot_offset(node, slot))))
1425		return NULL;
1426
1427	if (item && entry != item)
1428		return NULL;
1429
1430	__radix_tree_delete(root, node, slot);
 
 
 
 
 
 
 
 
 
 
 
 
1431
1432	return entry;
1433}
1434EXPORT_SYMBOL(radix_tree_delete_item);
1435
1436/**
1437 * radix_tree_delete - delete an entry from a radix tree
1438 * @root: radix tree root
1439 * @index: index key
1440 *
1441 * Remove the entry at @index from the radix tree rooted at @root.
1442 *
1443 * Return: The deleted entry, or %NULL if it was not present.
1444 */
1445void *radix_tree_delete(struct radix_tree_root *root, unsigned long index)
1446{
1447	return radix_tree_delete_item(root, index, NULL);
1448}
1449EXPORT_SYMBOL(radix_tree_delete);
1450
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1451/**
1452 *	radix_tree_tagged - test whether any items in the tree are tagged
1453 *	@root:		radix tree root
1454 *	@tag:		tag to test
1455 */
1456int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag)
1457{
1458	return root_tag_get(root, tag);
1459}
1460EXPORT_SYMBOL(radix_tree_tagged);
1461
1462/**
1463 * idr_preload - preload for idr_alloc()
1464 * @gfp_mask: allocation mask to use for preloading
1465 *
1466 * Preallocate memory to use for the next call to idr_alloc().  This function
1467 * returns with preemption disabled.  It will be enabled by idr_preload_end().
1468 */
1469void idr_preload(gfp_t gfp_mask)
1470{
1471	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
1472		local_lock(&radix_tree_preloads.lock);
 
 
1473}
1474EXPORT_SYMBOL(idr_preload);
1475
1476void __rcu **idr_get_free(struct radix_tree_root *root,
1477			      struct radix_tree_iter *iter, gfp_t gfp,
1478			      unsigned long max)
1479{
1480	struct radix_tree_node *node = NULL, *child;
1481	void __rcu **slot = (void __rcu **)&root->xa_head;
1482	unsigned long maxindex, start = iter->next_index;
1483	unsigned int shift, offset = 0;
1484
1485 grow:
1486	shift = radix_tree_load_root(root, &child, &maxindex);
1487	if (!radix_tree_tagged(root, IDR_FREE))
1488		start = max(start, maxindex + 1);
1489	if (start > max)
1490		return ERR_PTR(-ENOSPC);
1491
1492	if (start > maxindex) {
1493		int error = radix_tree_extend(root, gfp, start, shift);
1494		if (error < 0)
1495			return ERR_PTR(error);
1496		shift = error;
1497		child = rcu_dereference_raw(root->xa_head);
1498	}
1499	if (start == 0 && shift == 0)
1500		shift = RADIX_TREE_MAP_SHIFT;
1501
1502	while (shift) {
1503		shift -= RADIX_TREE_MAP_SHIFT;
1504		if (child == NULL) {
1505			/* Have to add a child node.  */
1506			child = radix_tree_node_alloc(gfp, node, root, shift,
1507							offset, 0, 0);
1508			if (!child)
1509				return ERR_PTR(-ENOMEM);
1510			all_tag_set(child, IDR_FREE);
1511			rcu_assign_pointer(*slot, node_to_entry(child));
1512			if (node)
1513				node->count++;
1514		} else if (!radix_tree_is_internal_node(child))
1515			break;
1516
1517		node = entry_to_node(child);
1518		offset = radix_tree_descend(node, &child, start);
1519		if (!tag_get(node, IDR_FREE, offset)) {
1520			offset = radix_tree_find_next_bit(node, IDR_FREE,
1521							offset + 1);
1522			start = next_index(start, node, offset);
1523			if (start > max || start == 0)
1524				return ERR_PTR(-ENOSPC);
1525			while (offset == RADIX_TREE_MAP_SIZE) {
1526				offset = node->offset + 1;
1527				node = node->parent;
1528				if (!node)
1529					goto grow;
1530				shift = node->shift;
1531			}
1532			child = rcu_dereference_raw(node->slots[offset]);
1533		}
1534		slot = &node->slots[offset];
1535	}
1536
1537	iter->index = start;
1538	if (node)
1539		iter->next_index = 1 + min(max, (start | node_maxindex(node)));
1540	else
1541		iter->next_index = 1;
1542	iter->node = node;
1543	set_iter_tags(iter, node, offset, IDR_FREE);
1544
1545	return slot;
1546}
1547
1548/**
1549 * idr_destroy - release all internal memory from an IDR
1550 * @idr: idr handle
1551 *
1552 * After this function is called, the IDR is empty, and may be reused or
1553 * the data structure containing it may be freed.
1554 *
1555 * A typical clean-up sequence for objects stored in an idr tree will use
1556 * idr_for_each() to free all objects, if necessary, then idr_destroy() to
1557 * free the memory used to keep track of those objects.
1558 */
1559void idr_destroy(struct idr *idr)
1560{
1561	struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head);
1562	if (radix_tree_is_internal_node(node))
1563		radix_tree_free_nodes(node);
1564	idr->idr_rt.xa_head = NULL;
1565	root_tag_set(&idr->idr_rt, IDR_FREE);
1566}
1567EXPORT_SYMBOL(idr_destroy);
1568
1569static void
1570radix_tree_node_ctor(void *arg)
1571{
1572	struct radix_tree_node *node = arg;
 
1573
1574	memset(node, 0, sizeof(*node));
1575	INIT_LIST_HEAD(&node->private_list);
 
 
 
 
1576}
1577
1578static int radix_tree_cpu_dead(unsigned int cpu)
1579{
1580	struct radix_tree_preload *rtp;
1581	struct radix_tree_node *node;
1582
1583	/* Free per-cpu pool of preloaded nodes */
1584	rtp = &per_cpu(radix_tree_preloads, cpu);
1585	while (rtp->nr) {
1586		node = rtp->nodes;
1587		rtp->nodes = node->parent;
1588		kmem_cache_free(radix_tree_node_cachep, node);
1589		rtp->nr--;
1590	}
1591	return 0;
1592}
1593
1594void __init radix_tree_init(void)
1595{
1596	int ret;
1597
1598	BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32);
1599	BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK);
1600	BUILD_BUG_ON(XA_CHUNK_SIZE > 255);
1601	radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
1602			sizeof(struct radix_tree_node), 0,
1603			SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
1604			radix_tree_node_ctor);
 
1605	ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead",
1606					NULL, radix_tree_cpu_dead);
1607	WARN_ON(ret < 0);
1608}