Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Maple Tree implementation
   4 * Copyright (c) 2018-2022 Oracle Corporation
   5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
   6 *	    Matthew Wilcox <willy@infradead.org>
   7 * Copyright (c) 2023 ByteDance
   8 * Author: Peng Zhang <zhangpeng.00@bytedance.com>
   9 */
  10
  11/*
  12 * DOC: Interesting implementation details of the Maple Tree
  13 *
  14 * Each node type has a number of slots for entries and a number of slots for
  15 * pivots.  In the case of dense nodes, the pivots are implied by the position
  16 * and are simply the slot index + the minimum of the node.
  17 *
  18 * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
  19 * indicate that the tree is specifying ranges.  Pivots may appear in the
  20 * subtree with an entry attached to the value whereas keys are unique to a
  21 * specific position of a B-tree.  Pivot values are inclusive of the slot with
  22 * the same index.
  23 *
  24 *
  25 * The following illustrates the layout of a range64 nodes slots and pivots.
  26 *
  27 *
  28 *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
  29 *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
  30 *           │   │   │   │     │    │    │    │    └─ Implied maximum
  31 *           │   │   │   │     │    │    │    └─ Pivot 14
  32 *           │   │   │   │     │    │    └─ Pivot 13
  33 *           │   │   │   │     │    └─ Pivot 12
  34 *           │   │   │   │     └─ Pivot 11
  35 *           │   │   │   └─ Pivot 2
  36 *           │   │   └─ Pivot 1
  37 *           │   └─ Pivot 0
  38 *           └─  Implied minimum
  39 *
  40 * Slot contents:
  41 *  Internal (non-leaf) nodes contain pointers to other nodes.
  42 *  Leaf nodes contain entries.
  43 *
  44 * The location of interest is often referred to as an offset.  All offsets have
  45 * a slot, but the last offset has an implied pivot from the node above (or
  46 * UINT_MAX for the root node.
  47 *
  48 * Ranges complicate certain write activities.  When modifying any of
  49 * the B-tree variants, it is known that one entry will either be added or
  50 * deleted.  When modifying the Maple Tree, one store operation may overwrite
  51 * the entire data set, or one half of the tree, or the middle half of the tree.
  52 *
  53 */
  54
  55
  56#include <linux/maple_tree.h>
  57#include <linux/xarray.h>
  58#include <linux/types.h>
  59#include <linux/export.h>
  60#include <linux/slab.h>
  61#include <linux/limits.h>
  62#include <asm/barrier.h>
  63
  64#define CREATE_TRACE_POINTS
  65#include <trace/events/maple_tree.h>
  66
  67#define MA_ROOT_PARENT 1
  68
  69/*
  70 * Maple state flags
  71 * * MA_STATE_BULK		- Bulk insert mode
  72 * * MA_STATE_REBALANCE		- Indicate a rebalance during bulk insert
  73 * * MA_STATE_PREALLOC		- Preallocated nodes, WARN_ON allocation
  74 */
  75#define MA_STATE_BULK		1
  76#define MA_STATE_REBALANCE	2
  77#define MA_STATE_PREALLOC	4
  78
  79#define ma_parent_ptr(x) ((struct maple_pnode *)(x))
  80#define mas_tree_parent(x) ((unsigned long)(x->tree) | MA_ROOT_PARENT)
  81#define ma_mnode_ptr(x) ((struct maple_node *)(x))
  82#define ma_enode_ptr(x) ((struct maple_enode *)(x))
  83static struct kmem_cache *maple_node_cache;
  84
  85#ifdef CONFIG_DEBUG_MAPLE_TREE
  86static const unsigned long mt_max[] = {
  87	[maple_dense]		= MAPLE_NODE_SLOTS,
  88	[maple_leaf_64]		= ULONG_MAX,
  89	[maple_range_64]	= ULONG_MAX,
  90	[maple_arange_64]	= ULONG_MAX,
  91};
  92#define mt_node_max(x) mt_max[mte_node_type(x)]
  93#endif
  94
  95static const unsigned char mt_slots[] = {
  96	[maple_dense]		= MAPLE_NODE_SLOTS,
  97	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
  98	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
  99	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
 100};
 101#define mt_slot_count(x) mt_slots[mte_node_type(x)]
 102
 103static const unsigned char mt_pivots[] = {
 104	[maple_dense]		= 0,
 105	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
 106	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
 107	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
 108};
 109#define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
 110
 111static const unsigned char mt_min_slots[] = {
 112	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
 113	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
 114	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
 115	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 1,
 116};
 117#define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
 118
 119#define MAPLE_BIG_NODE_SLOTS	(MAPLE_RANGE64_SLOTS * 2 + 2)
 120#define MAPLE_BIG_NODE_GAPS	(MAPLE_ARANGE64_SLOTS * 2 + 1)
 121
 122struct maple_big_node {
 123	struct maple_pnode *parent;
 124	unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
 125	union {
 126		struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
 127		struct {
 128			unsigned long padding[MAPLE_BIG_NODE_GAPS];
 129			unsigned long gap[MAPLE_BIG_NODE_GAPS];
 130		};
 131	};
 132	unsigned char b_end;
 133	enum maple_type type;
 134};
 135
 136/*
 137 * The maple_subtree_state is used to build a tree to replace a segment of an
 138 * existing tree in a more atomic way.  Any walkers of the older tree will hit a
 139 * dead node and restart on updates.
 140 */
 141struct maple_subtree_state {
 142	struct ma_state *orig_l;	/* Original left side of subtree */
 143	struct ma_state *orig_r;	/* Original right side of subtree */
 144	struct ma_state *l;		/* New left side of subtree */
 145	struct ma_state *m;		/* New middle of subtree (rare) */
 146	struct ma_state *r;		/* New right side of subtree */
 147	struct ma_topiary *free;	/* nodes to be freed */
 148	struct ma_topiary *destroy;	/* Nodes to be destroyed (walked and freed) */
 149	struct maple_big_node *bn;
 150};
 151
 152#ifdef CONFIG_KASAN_STACK
 153/* Prevent mas_wr_bnode() from exceeding the stack frame limit */
 154#define noinline_for_kasan noinline_for_stack
 155#else
 156#define noinline_for_kasan inline
 157#endif
 158
 159/* Functions */
 160static inline struct maple_node *mt_alloc_one(gfp_t gfp)
 161{
 162	return kmem_cache_alloc(maple_node_cache, gfp);
 163}
 164
 165static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
 166{
 167	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
 168}
 169
 170static inline void mt_free_one(struct maple_node *node)
 171{
 172	kmem_cache_free(maple_node_cache, node);
 173}
 174
 175static inline void mt_free_bulk(size_t size, void __rcu **nodes)
 176{
 177	kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
 178}
 179
 180static void mt_free_rcu(struct rcu_head *head)
 181{
 182	struct maple_node *node = container_of(head, struct maple_node, rcu);
 183
 184	kmem_cache_free(maple_node_cache, node);
 185}
 186
 187/*
 188 * ma_free_rcu() - Use rcu callback to free a maple node
 189 * @node: The node to free
 190 *
 191 * The maple tree uses the parent pointer to indicate this node is no longer in
 192 * use and will be freed.
 193 */
 194static void ma_free_rcu(struct maple_node *node)
 195{
 196	WARN_ON(node->parent != ma_parent_ptr(node));
 197	call_rcu(&node->rcu, mt_free_rcu);
 198}
 199
 200static void mas_set_height(struct ma_state *mas)
 201{
 202	unsigned int new_flags = mas->tree->ma_flags;
 203
 204	new_flags &= ~MT_FLAGS_HEIGHT_MASK;
 205	MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
 206	new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
 207	mas->tree->ma_flags = new_flags;
 208}
 209
 210static unsigned int mas_mt_height(struct ma_state *mas)
 211{
 212	return mt_height(mas->tree);
 213}
 214
 215static inline unsigned int mt_attr(struct maple_tree *mt)
 216{
 217	return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
 218}
 219
 220static __always_inline enum maple_type mte_node_type(
 221		const struct maple_enode *entry)
 222{
 223	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
 224		MAPLE_NODE_TYPE_MASK;
 225}
 226
 227static __always_inline bool ma_is_dense(const enum maple_type type)
 228{
 229	return type < maple_leaf_64;
 230}
 231
 232static __always_inline bool ma_is_leaf(const enum maple_type type)
 233{
 234	return type < maple_range_64;
 235}
 236
 237static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
 238{
 239	return ma_is_leaf(mte_node_type(entry));
 240}
 241
 242/*
 243 * We also reserve values with the bottom two bits set to '10' which are
 244 * below 4096
 245 */
 246static __always_inline bool mt_is_reserved(const void *entry)
 247{
 248	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
 249		xa_is_internal(entry);
 250}
 251
 252static __always_inline void mas_set_err(struct ma_state *mas, long err)
 253{
 254	mas->node = MA_ERROR(err);
 255	mas->status = ma_error;
 256}
 257
 258static __always_inline bool mas_is_ptr(const struct ma_state *mas)
 259{
 260	return mas->status == ma_root;
 261}
 262
 263static __always_inline bool mas_is_start(const struct ma_state *mas)
 264{
 265	return mas->status == ma_start;
 266}
 267
 268static __always_inline bool mas_is_none(const struct ma_state *mas)
 269{
 270	return mas->status == ma_none;
 271}
 272
 273static __always_inline bool mas_is_paused(const struct ma_state *mas)
 274{
 275	return mas->status == ma_pause;
 276}
 277
 278static __always_inline bool mas_is_overflow(struct ma_state *mas)
 279{
 280	return mas->status == ma_overflow;
 281}
 282
 283static inline bool mas_is_underflow(struct ma_state *mas)
 284{
 285	return mas->status == ma_underflow;
 286}
 287
 288static __always_inline struct maple_node *mte_to_node(
 289		const struct maple_enode *entry)
 290{
 291	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
 292}
 293
 294/*
 295 * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
 296 * @entry: The maple encoded node
 297 *
 298 * Return: a maple topiary pointer
 299 */
 300static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
 301{
 302	return (struct maple_topiary *)
 303		((unsigned long)entry & ~MAPLE_NODE_MASK);
 304}
 305
 306/*
 307 * mas_mn() - Get the maple state node.
 308 * @mas: The maple state
 309 *
 310 * Return: the maple node (not encoded - bare pointer).
 311 */
 312static inline struct maple_node *mas_mn(const struct ma_state *mas)
 313{
 314	return mte_to_node(mas->node);
 315}
 316
 317/*
 318 * mte_set_node_dead() - Set a maple encoded node as dead.
 319 * @mn: The maple encoded node.
 320 */
 321static inline void mte_set_node_dead(struct maple_enode *mn)
 322{
 323	mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
 324	smp_wmb(); /* Needed for RCU */
 325}
 326
 327/* Bit 1 indicates the root is a node */
 328#define MAPLE_ROOT_NODE			0x02
 329/* maple_type stored bit 3-6 */
 330#define MAPLE_ENODE_TYPE_SHIFT		0x03
 331/* Bit 2 means a NULL somewhere below */
 332#define MAPLE_ENODE_NULL		0x04
 333
 334static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
 335					     enum maple_type type)
 336{
 337	return (void *)((unsigned long)node |
 338			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
 339}
 340
 341static inline void *mte_mk_root(const struct maple_enode *node)
 342{
 343	return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
 344}
 345
 346static inline void *mte_safe_root(const struct maple_enode *node)
 347{
 348	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
 349}
 350
 351static inline void *mte_set_full(const struct maple_enode *node)
 352{
 353	return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
 354}
 355
 356static inline void *mte_clear_full(const struct maple_enode *node)
 357{
 358	return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
 359}
 360
 361static inline bool mte_has_null(const struct maple_enode *node)
 362{
 363	return (unsigned long)node & MAPLE_ENODE_NULL;
 364}
 365
 366static __always_inline bool ma_is_root(struct maple_node *node)
 367{
 368	return ((unsigned long)node->parent & MA_ROOT_PARENT);
 369}
 370
 371static __always_inline bool mte_is_root(const struct maple_enode *node)
 372{
 373	return ma_is_root(mte_to_node(node));
 374}
 375
 376static inline bool mas_is_root_limits(const struct ma_state *mas)
 377{
 378	return !mas->min && mas->max == ULONG_MAX;
 379}
 380
 381static __always_inline bool mt_is_alloc(struct maple_tree *mt)
 382{
 383	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
 384}
 385
 386/*
 387 * The Parent Pointer
 388 * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
 389 * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
 390 * bit values need an extra bit to store the offset.  This extra bit comes from
 391 * a reuse of the last bit in the node type.  This is possible by using bit 1 to
 392 * indicate if bit 2 is part of the type or the slot.
 393 *
 394 * Note types:
 395 *  0x??1 = Root
 396 *  0x?00 = 16 bit nodes
 397 *  0x010 = 32 bit nodes
 398 *  0x110 = 64 bit nodes
 399 *
 400 * Slot size and alignment
 401 *  0b??1 : Root
 402 *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
 403 *  0b010 : 32 bit values, type in 0-2, slot in 3-7
 404 *  0b110 : 64 bit values, type in 0-2, slot in 3-7
 405 */
 406
 407#define MAPLE_PARENT_ROOT		0x01
 408
 409#define MAPLE_PARENT_SLOT_SHIFT		0x03
 410#define MAPLE_PARENT_SLOT_MASK		0xF8
 411
 412#define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
 413#define MAPLE_PARENT_16B_SLOT_MASK	0xFC
 414
 415#define MAPLE_PARENT_RANGE64		0x06
 416#define MAPLE_PARENT_RANGE32		0x04
 417#define MAPLE_PARENT_NOT_RANGE16	0x02
 418
 419/*
 420 * mte_parent_shift() - Get the parent shift for the slot storage.
 421 * @parent: The parent pointer cast as an unsigned long
 422 * Return: The shift into that pointer to the star to of the slot
 423 */
 424static inline unsigned long mte_parent_shift(unsigned long parent)
 425{
 426	/* Note bit 1 == 0 means 16B */
 427	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
 428		return MAPLE_PARENT_SLOT_SHIFT;
 429
 430	return MAPLE_PARENT_16B_SLOT_SHIFT;
 431}
 432
 433/*
 434 * mte_parent_slot_mask() - Get the slot mask for the parent.
 435 * @parent: The parent pointer cast as an unsigned long.
 436 * Return: The slot mask for that parent.
 437 */
 438static inline unsigned long mte_parent_slot_mask(unsigned long parent)
 439{
 440	/* Note bit 1 == 0 means 16B */
 441	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
 442		return MAPLE_PARENT_SLOT_MASK;
 443
 444	return MAPLE_PARENT_16B_SLOT_MASK;
 445}
 446
 447/*
 448 * mas_parent_type() - Return the maple_type of the parent from the stored
 449 * parent type.
 450 * @mas: The maple state
 451 * @enode: The maple_enode to extract the parent's enum
 452 * Return: The node->parent maple_type
 453 */
 454static inline
 455enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
 456{
 457	unsigned long p_type;
 458
 459	p_type = (unsigned long)mte_to_node(enode)->parent;
 460	if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
 461		return 0;
 462
 463	p_type &= MAPLE_NODE_MASK;
 464	p_type &= ~mte_parent_slot_mask(p_type);
 465	switch (p_type) {
 466	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
 467		if (mt_is_alloc(mas->tree))
 468			return maple_arange_64;
 469		return maple_range_64;
 470	}
 471
 472	return 0;
 473}
 474
 475/*
 476 * mas_set_parent() - Set the parent node and encode the slot
 477 * @enode: The encoded maple node.
 478 * @parent: The encoded maple node that is the parent of @enode.
 479 * @slot: The slot that @enode resides in @parent.
 480 *
 481 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
 482 * parent type.
 483 */
 484static inline
 485void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
 486		    const struct maple_enode *parent, unsigned char slot)
 487{
 488	unsigned long val = (unsigned long)parent;
 489	unsigned long shift;
 490	unsigned long type;
 491	enum maple_type p_type = mte_node_type(parent);
 492
 493	MAS_BUG_ON(mas, p_type == maple_dense);
 494	MAS_BUG_ON(mas, p_type == maple_leaf_64);
 495
 496	switch (p_type) {
 497	case maple_range_64:
 498	case maple_arange_64:
 499		shift = MAPLE_PARENT_SLOT_SHIFT;
 500		type = MAPLE_PARENT_RANGE64;
 501		break;
 502	default:
 503	case maple_dense:
 504	case maple_leaf_64:
 505		shift = type = 0;
 506		break;
 507	}
 508
 509	val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
 510	val |= (slot << shift) | type;
 511	mte_to_node(enode)->parent = ma_parent_ptr(val);
 512}
 513
 514/*
 515 * mte_parent_slot() - get the parent slot of @enode.
 516 * @enode: The encoded maple node.
 517 *
 518 * Return: The slot in the parent node where @enode resides.
 519 */
 520static __always_inline
 521unsigned int mte_parent_slot(const struct maple_enode *enode)
 522{
 523	unsigned long val = (unsigned long)mte_to_node(enode)->parent;
 524
 525	if (unlikely(val & MA_ROOT_PARENT))
 526		return 0;
 527
 528	/*
 529	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
 530	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
 531	 */
 532	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
 533}
 534
 535/*
 536 * mte_parent() - Get the parent of @node.
 537 * @node: The encoded maple node.
 538 *
 539 * Return: The parent maple node.
 540 */
 541static __always_inline
 542struct maple_node *mte_parent(const struct maple_enode *enode)
 543{
 544	return (void *)((unsigned long)
 545			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
 546}
 547
 548/*
 549 * ma_dead_node() - check if the @enode is dead.
 550 * @enode: The encoded maple node
 551 *
 552 * Return: true if dead, false otherwise.
 553 */
 554static __always_inline bool ma_dead_node(const struct maple_node *node)
 555{
 556	struct maple_node *parent;
 557
 558	/* Do not reorder reads from the node prior to the parent check */
 559	smp_rmb();
 560	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
 561	return (parent == node);
 562}
 563
 564/*
 565 * mte_dead_node() - check if the @enode is dead.
 566 * @enode: The encoded maple node
 567 *
 568 * Return: true if dead, false otherwise.
 569 */
 570static __always_inline bool mte_dead_node(const struct maple_enode *enode)
 571{
 572	struct maple_node *parent, *node;
 573
 574	node = mte_to_node(enode);
 575	/* Do not reorder reads from the node prior to the parent check */
 576	smp_rmb();
 577	parent = mte_parent(enode);
 578	return (parent == node);
 579}
 580
 581/*
 582 * mas_allocated() - Get the number of nodes allocated in a maple state.
 583 * @mas: The maple state
 584 *
 585 * The ma_state alloc member is overloaded to hold a pointer to the first
 586 * allocated node or to the number of requested nodes to allocate.  If bit 0 is
 587 * set, then the alloc contains the number of requested nodes.  If there is an
 588 * allocated node, then the total allocated nodes is in that node.
 589 *
 590 * Return: The total number of nodes allocated
 591 */
 592static inline unsigned long mas_allocated(const struct ma_state *mas)
 593{
 594	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
 595		return 0;
 596
 597	return mas->alloc->total;
 598}
 599
 600/*
 601 * mas_set_alloc_req() - Set the requested number of allocations.
 602 * @mas: the maple state
 603 * @count: the number of allocations.
 604 *
 605 * The requested number of allocations is either in the first allocated node,
 606 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
 607 * no allocated node.  Set the request either in the node or do the necessary
 608 * encoding to store in @mas->alloc directly.
 609 */
 610static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
 611{
 612	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
 613		if (!count)
 614			mas->alloc = NULL;
 615		else
 616			mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
 617		return;
 618	}
 619
 620	mas->alloc->request_count = count;
 621}
 622
 623/*
 624 * mas_alloc_req() - get the requested number of allocations.
 625 * @mas: The maple state
 626 *
 627 * The alloc count is either stored directly in @mas, or in
 628 * @mas->alloc->request_count if there is at least one node allocated.  Decode
 629 * the request count if it's stored directly in @mas->alloc.
 630 *
 631 * Return: The allocation request count.
 632 */
 633static inline unsigned int mas_alloc_req(const struct ma_state *mas)
 634{
 635	if ((unsigned long)mas->alloc & 0x1)
 636		return (unsigned long)(mas->alloc) >> 1;
 637	else if (mas->alloc)
 638		return mas->alloc->request_count;
 639	return 0;
 640}
 641
 642/*
 643 * ma_pivots() - Get a pointer to the maple node pivots.
 644 * @node - the maple node
 645 * @type - the node type
 646 *
 647 * In the event of a dead node, this array may be %NULL
 648 *
 649 * Return: A pointer to the maple node pivots
 650 */
 651static inline unsigned long *ma_pivots(struct maple_node *node,
 652					   enum maple_type type)
 653{
 654	switch (type) {
 655	case maple_arange_64:
 656		return node->ma64.pivot;
 657	case maple_range_64:
 658	case maple_leaf_64:
 659		return node->mr64.pivot;
 660	case maple_dense:
 661		return NULL;
 662	}
 663	return NULL;
 664}
 665
 666/*
 667 * ma_gaps() - Get a pointer to the maple node gaps.
 668 * @node - the maple node
 669 * @type - the node type
 670 *
 671 * Return: A pointer to the maple node gaps
 672 */
 673static inline unsigned long *ma_gaps(struct maple_node *node,
 674				     enum maple_type type)
 675{
 676	switch (type) {
 677	case maple_arange_64:
 678		return node->ma64.gap;
 679	case maple_range_64:
 680	case maple_leaf_64:
 681	case maple_dense:
 682		return NULL;
 683	}
 684	return NULL;
 685}
 686
 687/*
 688 * mas_safe_pivot() - get the pivot at @piv or mas->max.
 689 * @mas: The maple state
 690 * @pivots: The pointer to the maple node pivots
 691 * @piv: The pivot to fetch
 692 * @type: The maple node type
 693 *
 694 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
 695 * otherwise.
 696 */
 697static __always_inline unsigned long
 698mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
 699	       unsigned char piv, enum maple_type type)
 700{
 701	if (piv >= mt_pivots[type])
 702		return mas->max;
 703
 704	return pivots[piv];
 705}
 706
 707/*
 708 * mas_safe_min() - Return the minimum for a given offset.
 709 * @mas: The maple state
 710 * @pivots: The pointer to the maple node pivots
 711 * @offset: The offset into the pivot array
 712 *
 713 * Return: The minimum range value that is contained in @offset.
 714 */
 715static inline unsigned long
 716mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
 717{
 718	if (likely(offset))
 719		return pivots[offset - 1] + 1;
 720
 721	return mas->min;
 722}
 723
 724/*
 725 * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
 726 * @mn: The encoded maple node
 727 * @piv: The pivot offset
 728 * @val: The value of the pivot
 729 */
 730static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
 731				unsigned long val)
 732{
 733	struct maple_node *node = mte_to_node(mn);
 734	enum maple_type type = mte_node_type(mn);
 735
 736	BUG_ON(piv >= mt_pivots[type]);
 737	switch (type) {
 738	case maple_range_64:
 739	case maple_leaf_64:
 740		node->mr64.pivot[piv] = val;
 741		break;
 742	case maple_arange_64:
 743		node->ma64.pivot[piv] = val;
 744		break;
 745	case maple_dense:
 746		break;
 747	}
 748
 749}
 750
 751/*
 752 * ma_slots() - Get a pointer to the maple node slots.
 753 * @mn: The maple node
 754 * @mt: The maple node type
 755 *
 756 * Return: A pointer to the maple node slots
 757 */
 758static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
 759{
 760	switch (mt) {
 761	case maple_arange_64:
 762		return mn->ma64.slot;
 763	case maple_range_64:
 764	case maple_leaf_64:
 765		return mn->mr64.slot;
 766	case maple_dense:
 767		return mn->slot;
 768	}
 769
 770	return NULL;
 771}
 772
 773static inline bool mt_write_locked(const struct maple_tree *mt)
 774{
 775	return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
 776		lockdep_is_held(&mt->ma_lock);
 777}
 778
 779static __always_inline bool mt_locked(const struct maple_tree *mt)
 780{
 781	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
 782		lockdep_is_held(&mt->ma_lock);
 783}
 784
 785static __always_inline void *mt_slot(const struct maple_tree *mt,
 786		void __rcu **slots, unsigned char offset)
 787{
 788	return rcu_dereference_check(slots[offset], mt_locked(mt));
 789}
 790
 791static __always_inline void *mt_slot_locked(struct maple_tree *mt,
 792		void __rcu **slots, unsigned char offset)
 793{
 794	return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
 795}
 796/*
 797 * mas_slot_locked() - Get the slot value when holding the maple tree lock.
 798 * @mas: The maple state
 799 * @slots: The pointer to the slots
 800 * @offset: The offset into the slots array to fetch
 801 *
 802 * Return: The entry stored in @slots at the @offset.
 803 */
 804static __always_inline void *mas_slot_locked(struct ma_state *mas,
 805		void __rcu **slots, unsigned char offset)
 806{
 807	return mt_slot_locked(mas->tree, slots, offset);
 808}
 809
 810/*
 811 * mas_slot() - Get the slot value when not holding the maple tree lock.
 812 * @mas: The maple state
 813 * @slots: The pointer to the slots
 814 * @offset: The offset into the slots array to fetch
 815 *
 816 * Return: The entry stored in @slots at the @offset
 817 */
 818static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
 819		unsigned char offset)
 820{
 821	return mt_slot(mas->tree, slots, offset);
 822}
 823
 824/*
 825 * mas_root() - Get the maple tree root.
 826 * @mas: The maple state.
 827 *
 828 * Return: The pointer to the root of the tree
 829 */
 830static __always_inline void *mas_root(struct ma_state *mas)
 831{
 832	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
 833}
 834
 835static inline void *mt_root_locked(struct maple_tree *mt)
 836{
 837	return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
 838}
 839
 840/*
 841 * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
 842 * @mas: The maple state.
 843 *
 844 * Return: The pointer to the root of the tree
 845 */
 846static inline void *mas_root_locked(struct ma_state *mas)
 847{
 848	return mt_root_locked(mas->tree);
 849}
 850
 851static inline struct maple_metadata *ma_meta(struct maple_node *mn,
 852					     enum maple_type mt)
 853{
 854	switch (mt) {
 855	case maple_arange_64:
 856		return &mn->ma64.meta;
 857	default:
 858		return &mn->mr64.meta;
 859	}
 860}
 861
 862/*
 863 * ma_set_meta() - Set the metadata information of a node.
 864 * @mn: The maple node
 865 * @mt: The maple node type
 866 * @offset: The offset of the highest sub-gap in this node.
 867 * @end: The end of the data in this node.
 868 */
 869static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
 870			       unsigned char offset, unsigned char end)
 871{
 872	struct maple_metadata *meta = ma_meta(mn, mt);
 873
 874	meta->gap = offset;
 875	meta->end = end;
 876}
 877
 878/*
 879 * mt_clear_meta() - clear the metadata information of a node, if it exists
 880 * @mt: The maple tree
 881 * @mn: The maple node
 882 * @type: The maple node type
 883 * @offset: The offset of the highest sub-gap in this node.
 884 * @end: The end of the data in this node.
 885 */
 886static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
 887				  enum maple_type type)
 888{
 889	struct maple_metadata *meta;
 890	unsigned long *pivots;
 891	void __rcu **slots;
 892	void *next;
 893
 894	switch (type) {
 895	case maple_range_64:
 896		pivots = mn->mr64.pivot;
 897		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
 898			slots = mn->mr64.slot;
 899			next = mt_slot_locked(mt, slots,
 900					      MAPLE_RANGE64_SLOTS - 1);
 901			if (unlikely((mte_to_node(next) &&
 902				      mte_node_type(next))))
 903				return; /* no metadata, could be node */
 904		}
 905		fallthrough;
 906	case maple_arange_64:
 907		meta = ma_meta(mn, type);
 908		break;
 909	default:
 910		return;
 911	}
 912
 913	meta->gap = 0;
 914	meta->end = 0;
 915}
 916
 917/*
 918 * ma_meta_end() - Get the data end of a node from the metadata
 919 * @mn: The maple node
 920 * @mt: The maple node type
 921 */
 922static inline unsigned char ma_meta_end(struct maple_node *mn,
 923					enum maple_type mt)
 924{
 925	struct maple_metadata *meta = ma_meta(mn, mt);
 926
 927	return meta->end;
 928}
 929
 930/*
 931 * ma_meta_gap() - Get the largest gap location of a node from the metadata
 932 * @mn: The maple node
 933 */
 934static inline unsigned char ma_meta_gap(struct maple_node *mn)
 935{
 936	return mn->ma64.meta.gap;
 937}
 938
 939/*
 940 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
 941 * @mn: The maple node
 942 * @mn: The maple node type
 943 * @offset: The location of the largest gap.
 944 */
 945static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
 946				   unsigned char offset)
 947{
 948
 949	struct maple_metadata *meta = ma_meta(mn, mt);
 950
 951	meta->gap = offset;
 952}
 953
 954/*
 955 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
 956 * @mat - the ma_topiary, a linked list of dead nodes.
 957 * @dead_enode - the node to be marked as dead and added to the tail of the list
 958 *
 959 * Add the @dead_enode to the linked list in @mat.
 960 */
 961static inline void mat_add(struct ma_topiary *mat,
 962			   struct maple_enode *dead_enode)
 963{
 964	mte_set_node_dead(dead_enode);
 965	mte_to_mat(dead_enode)->next = NULL;
 966	if (!mat->tail) {
 967		mat->tail = mat->head = dead_enode;
 968		return;
 969	}
 970
 971	mte_to_mat(mat->tail)->next = dead_enode;
 972	mat->tail = dead_enode;
 973}
 974
 975static void mt_free_walk(struct rcu_head *head);
 976static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
 977			    bool free);
 978/*
 979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
 980 * @mas - the maple state
 981 * @mat - the ma_topiary linked list of dead nodes to free.
 982 *
 983 * Destroy walk a dead list.
 984 */
 985static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
 986{
 987	struct maple_enode *next;
 988	struct maple_node *node;
 989	bool in_rcu = mt_in_rcu(mas->tree);
 990
 991	while (mat->head) {
 992		next = mte_to_mat(mat->head)->next;
 993		node = mte_to_node(mat->head);
 994		mt_destroy_walk(mat->head, mas->tree, !in_rcu);
 995		if (in_rcu)
 996			call_rcu(&node->rcu, mt_free_walk);
 997		mat->head = next;
 998	}
 999}
1000/*
1001 * mas_descend() - Descend into the slot stored in the ma_state.
1002 * @mas - the maple state.
1003 *
1004 * Note: Not RCU safe, only use in write side or debug code.
1005 */
1006static inline void mas_descend(struct ma_state *mas)
1007{
1008	enum maple_type type;
1009	unsigned long *pivots;
1010	struct maple_node *node;
1011	void __rcu **slots;
1012
1013	node = mas_mn(mas);
1014	type = mte_node_type(mas->node);
1015	pivots = ma_pivots(node, type);
1016	slots = ma_slots(node, type);
1017
1018	if (mas->offset)
1019		mas->min = pivots[mas->offset - 1] + 1;
1020	mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1021	mas->node = mas_slot(mas, slots, mas->offset);
1022}
1023
1024/*
1025 * mte_set_gap() - Set a maple node gap.
1026 * @mn: The encoded maple node
1027 * @gap: The offset of the gap to set
1028 * @val: The gap value
1029 */
1030static inline void mte_set_gap(const struct maple_enode *mn,
1031				 unsigned char gap, unsigned long val)
1032{
1033	switch (mte_node_type(mn)) {
1034	default:
1035		break;
1036	case maple_arange_64:
1037		mte_to_node(mn)->ma64.gap[gap] = val;
1038		break;
1039	}
1040}
1041
1042/*
1043 * mas_ascend() - Walk up a level of the tree.
1044 * @mas: The maple state
1045 *
1046 * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1047 * may cause several levels of walking up to find the correct min and max.
1048 * May find a dead node which will cause a premature return.
1049 * Return: 1 on dead node, 0 otherwise
1050 */
1051static int mas_ascend(struct ma_state *mas)
1052{
1053	struct maple_enode *p_enode; /* parent enode. */
1054	struct maple_enode *a_enode; /* ancestor enode. */
1055	struct maple_node *a_node; /* ancestor node. */
1056	struct maple_node *p_node; /* parent node. */
1057	unsigned char a_slot;
1058	enum maple_type a_type;
1059	unsigned long min, max;
1060	unsigned long *pivots;
1061	bool set_max = false, set_min = false;
1062
1063	a_node = mas_mn(mas);
1064	if (ma_is_root(a_node)) {
1065		mas->offset = 0;
1066		return 0;
1067	}
1068
1069	p_node = mte_parent(mas->node);
1070	if (unlikely(a_node == p_node))
1071		return 1;
1072
1073	a_type = mas_parent_type(mas, mas->node);
1074	mas->offset = mte_parent_slot(mas->node);
1075	a_enode = mt_mk_node(p_node, a_type);
1076
1077	/* Check to make sure all parent information is still accurate */
1078	if (p_node != mte_parent(mas->node))
1079		return 1;
1080
1081	mas->node = a_enode;
1082
1083	if (mte_is_root(a_enode)) {
1084		mas->max = ULONG_MAX;
1085		mas->min = 0;
1086		return 0;
1087	}
1088
1089	min = 0;
1090	max = ULONG_MAX;
1091	if (!mas->offset) {
1092		min = mas->min;
1093		set_min = true;
1094	}
1095
1096	if (mas->max == ULONG_MAX)
1097		set_max = true;
1098
1099	do {
1100		p_enode = a_enode;
1101		a_type = mas_parent_type(mas, p_enode);
1102		a_node = mte_parent(p_enode);
1103		a_slot = mte_parent_slot(p_enode);
1104		a_enode = mt_mk_node(a_node, a_type);
1105		pivots = ma_pivots(a_node, a_type);
1106
1107		if (unlikely(ma_dead_node(a_node)))
1108			return 1;
1109
1110		if (!set_min && a_slot) {
1111			set_min = true;
1112			min = pivots[a_slot - 1] + 1;
1113		}
1114
1115		if (!set_max && a_slot < mt_pivots[a_type]) {
1116			set_max = true;
1117			max = pivots[a_slot];
1118		}
1119
1120		if (unlikely(ma_dead_node(a_node)))
1121			return 1;
1122
1123		if (unlikely(ma_is_root(a_node)))
1124			break;
1125
1126	} while (!set_min || !set_max);
1127
1128	mas->max = max;
1129	mas->min = min;
1130	return 0;
1131}
1132
1133/*
1134 * mas_pop_node() - Get a previously allocated maple node from the maple state.
1135 * @mas: The maple state
1136 *
1137 * Return: A pointer to a maple node.
1138 */
1139static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1140{
1141	struct maple_alloc *ret, *node = mas->alloc;
1142	unsigned long total = mas_allocated(mas);
1143	unsigned int req = mas_alloc_req(mas);
1144
1145	/* nothing or a request pending. */
1146	if (WARN_ON(!total))
1147		return NULL;
1148
1149	if (total == 1) {
1150		/* single allocation in this ma_state */
1151		mas->alloc = NULL;
1152		ret = node;
1153		goto single_node;
1154	}
1155
1156	if (node->node_count == 1) {
1157		/* Single allocation in this node. */
1158		mas->alloc = node->slot[0];
1159		mas->alloc->total = node->total - 1;
1160		ret = node;
1161		goto new_head;
1162	}
1163	node->total--;
1164	ret = node->slot[--node->node_count];
1165	node->slot[node->node_count] = NULL;
1166
1167single_node:
1168new_head:
1169	if (req) {
1170		req++;
1171		mas_set_alloc_req(mas, req);
1172	}
1173
1174	memset(ret, 0, sizeof(*ret));
1175	return (struct maple_node *)ret;
1176}
1177
1178/*
1179 * mas_push_node() - Push a node back on the maple state allocation.
1180 * @mas: The maple state
1181 * @used: The used maple node
1182 *
1183 * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1184 * requested node count as necessary.
1185 */
1186static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1187{
1188	struct maple_alloc *reuse = (struct maple_alloc *)used;
1189	struct maple_alloc *head = mas->alloc;
1190	unsigned long count;
1191	unsigned int requested = mas_alloc_req(mas);
1192
1193	count = mas_allocated(mas);
1194
1195	reuse->request_count = 0;
1196	reuse->node_count = 0;
1197	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1198		head->slot[head->node_count++] = reuse;
1199		head->total++;
1200		goto done;
1201	}
1202
1203	reuse->total = 1;
1204	if ((head) && !((unsigned long)head & 0x1)) {
1205		reuse->slot[0] = head;
1206		reuse->node_count = 1;
1207		reuse->total += head->total;
1208	}
1209
1210	mas->alloc = reuse;
1211done:
1212	if (requested > 1)
1213		mas_set_alloc_req(mas, requested - 1);
1214}
1215
1216/*
1217 * mas_alloc_nodes() - Allocate nodes into a maple state
1218 * @mas: The maple state
1219 * @gfp: The GFP Flags
1220 */
1221static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1222{
1223	struct maple_alloc *node;
1224	unsigned long allocated = mas_allocated(mas);
1225	unsigned int requested = mas_alloc_req(mas);
1226	unsigned int count;
1227	void **slots = NULL;
1228	unsigned int max_req = 0;
1229
1230	if (!requested)
1231		return;
1232
1233	mas_set_alloc_req(mas, 0);
1234	if (mas->mas_flags & MA_STATE_PREALLOC) {
1235		if (allocated)
1236			return;
1237		BUG_ON(!allocated);
1238		WARN_ON(!allocated);
1239	}
1240
1241	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1242		node = (struct maple_alloc *)mt_alloc_one(gfp);
1243		if (!node)
1244			goto nomem_one;
1245
1246		if (allocated) {
1247			node->slot[0] = mas->alloc;
1248			node->node_count = 1;
1249		} else {
1250			node->node_count = 0;
1251		}
1252
1253		mas->alloc = node;
1254		node->total = ++allocated;
1255		requested--;
1256	}
1257
1258	node = mas->alloc;
1259	node->request_count = 0;
1260	while (requested) {
1261		max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1262		slots = (void **)&node->slot[node->node_count];
1263		max_req = min(requested, max_req);
1264		count = mt_alloc_bulk(gfp, max_req, slots);
1265		if (!count)
1266			goto nomem_bulk;
1267
1268		if (node->node_count == 0) {
1269			node->slot[0]->node_count = 0;
1270			node->slot[0]->request_count = 0;
1271		}
1272
1273		node->node_count += count;
1274		allocated += count;
1275		node = node->slot[0];
1276		requested -= count;
1277	}
1278	mas->alloc->total = allocated;
1279	return;
1280
1281nomem_bulk:
1282	/* Clean up potential freed allocations on bulk failure */
1283	memset(slots, 0, max_req * sizeof(unsigned long));
1284nomem_one:
1285	mas_set_alloc_req(mas, requested);
1286	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1287		mas->alloc->total = allocated;
1288	mas_set_err(mas, -ENOMEM);
1289}
1290
1291/*
1292 * mas_free() - Free an encoded maple node
1293 * @mas: The maple state
1294 * @used: The encoded maple node to free.
1295 *
1296 * Uses rcu free if necessary, pushes @used back on the maple state allocations
1297 * otherwise.
1298 */
1299static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1300{
1301	struct maple_node *tmp = mte_to_node(used);
1302
1303	if (mt_in_rcu(mas->tree))
1304		ma_free_rcu(tmp);
1305	else
1306		mas_push_node(mas, tmp);
1307}
1308
1309/*
1310 * mas_node_count() - Check if enough nodes are allocated and request more if
1311 * there is not enough nodes.
1312 * @mas: The maple state
1313 * @count: The number of nodes needed
1314 * @gfp: the gfp flags
1315 */
1316static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1317{
1318	unsigned long allocated = mas_allocated(mas);
1319
1320	if (allocated < count) {
1321		mas_set_alloc_req(mas, count - allocated);
1322		mas_alloc_nodes(mas, gfp);
1323	}
1324}
1325
1326/*
1327 * mas_node_count() - Check if enough nodes are allocated and request more if
1328 * there is not enough nodes.
1329 * @mas: The maple state
1330 * @count: The number of nodes needed
1331 *
1332 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1333 */
1334static void mas_node_count(struct ma_state *mas, int count)
1335{
1336	return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1337}
1338
1339/*
1340 * mas_start() - Sets up maple state for operations.
1341 * @mas: The maple state.
1342 *
1343 * If mas->status == mas_start, then set the min, max and depth to
1344 * defaults.
1345 *
1346 * Return:
1347 * - If mas->node is an error or not mas_start, return NULL.
1348 * - If it's an empty tree:     NULL & mas->status == ma_none
1349 * - If it's a single entry:    The entry & mas->status == mas_root
1350 * - If it's a tree:            NULL & mas->status == safe root node.
1351 */
1352static inline struct maple_enode *mas_start(struct ma_state *mas)
1353{
1354	if (likely(mas_is_start(mas))) {
1355		struct maple_enode *root;
1356
1357		mas->min = 0;
1358		mas->max = ULONG_MAX;
1359
1360retry:
1361		mas->depth = 0;
1362		root = mas_root(mas);
1363		/* Tree with nodes */
1364		if (likely(xa_is_node(root))) {
1365			mas->depth = 1;
1366			mas->status = ma_active;
1367			mas->node = mte_safe_root(root);
1368			mas->offset = 0;
1369			if (mte_dead_node(mas->node))
1370				goto retry;
1371
1372			return NULL;
1373		}
1374
1375		/* empty tree */
1376		if (unlikely(!root)) {
1377			mas->node = NULL;
1378			mas->status = ma_none;
1379			mas->offset = MAPLE_NODE_SLOTS;
1380			return NULL;
1381		}
1382
1383		/* Single entry tree */
1384		mas->status = ma_root;
1385		mas->offset = MAPLE_NODE_SLOTS;
1386
1387		/* Single entry tree. */
1388		if (mas->index > 0)
1389			return NULL;
1390
1391		return root;
1392	}
1393
1394	return NULL;
1395}
1396
1397/*
1398 * ma_data_end() - Find the end of the data in a node.
1399 * @node: The maple node
1400 * @type: The maple node type
1401 * @pivots: The array of pivots in the node
1402 * @max: The maximum value in the node
1403 *
1404 * Uses metadata to find the end of the data when possible.
1405 * Return: The zero indexed last slot with data (may be null).
1406 */
1407static __always_inline unsigned char ma_data_end(struct maple_node *node,
1408		enum maple_type type, unsigned long *pivots, unsigned long max)
1409{
1410	unsigned char offset;
1411
1412	if (!pivots)
1413		return 0;
1414
1415	if (type == maple_arange_64)
1416		return ma_meta_end(node, type);
1417
1418	offset = mt_pivots[type] - 1;
1419	if (likely(!pivots[offset]))
1420		return ma_meta_end(node, type);
1421
1422	if (likely(pivots[offset] == max))
1423		return offset;
1424
1425	return mt_pivots[type];
1426}
1427
1428/*
1429 * mas_data_end() - Find the end of the data (slot).
1430 * @mas: the maple state
1431 *
1432 * This method is optimized to check the metadata of a node if the node type
1433 * supports data end metadata.
1434 *
1435 * Return: The zero indexed last slot with data (may be null).
1436 */
1437static inline unsigned char mas_data_end(struct ma_state *mas)
1438{
1439	enum maple_type type;
1440	struct maple_node *node;
1441	unsigned char offset;
1442	unsigned long *pivots;
1443
1444	type = mte_node_type(mas->node);
1445	node = mas_mn(mas);
1446	if (type == maple_arange_64)
1447		return ma_meta_end(node, type);
1448
1449	pivots = ma_pivots(node, type);
1450	if (unlikely(ma_dead_node(node)))
1451		return 0;
1452
1453	offset = mt_pivots[type] - 1;
1454	if (likely(!pivots[offset]))
1455		return ma_meta_end(node, type);
1456
1457	if (likely(pivots[offset] == mas->max))
1458		return offset;
1459
1460	return mt_pivots[type];
1461}
1462
1463/*
1464 * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1465 * @mas - the maple state
1466 *
1467 * Return: The maximum gap in the leaf.
1468 */
1469static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1470{
1471	enum maple_type mt;
1472	unsigned long pstart, gap, max_gap;
1473	struct maple_node *mn;
1474	unsigned long *pivots;
1475	void __rcu **slots;
1476	unsigned char i;
1477	unsigned char max_piv;
1478
1479	mt = mte_node_type(mas->node);
1480	mn = mas_mn(mas);
1481	slots = ma_slots(mn, mt);
1482	max_gap = 0;
1483	if (unlikely(ma_is_dense(mt))) {
1484		gap = 0;
1485		for (i = 0; i < mt_slots[mt]; i++) {
1486			if (slots[i]) {
1487				if (gap > max_gap)
1488					max_gap = gap;
1489				gap = 0;
1490			} else {
1491				gap++;
1492			}
1493		}
1494		if (gap > max_gap)
1495			max_gap = gap;
1496		return max_gap;
1497	}
1498
1499	/*
1500	 * Check the first implied pivot optimizes the loop below and slot 1 may
1501	 * be skipped if there is a gap in slot 0.
1502	 */
1503	pivots = ma_pivots(mn, mt);
1504	if (likely(!slots[0])) {
1505		max_gap = pivots[0] - mas->min + 1;
1506		i = 2;
1507	} else {
1508		i = 1;
1509	}
1510
1511	/* reduce max_piv as the special case is checked before the loop */
1512	max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1513	/*
1514	 * Check end implied pivot which can only be a gap on the right most
1515	 * node.
1516	 */
1517	if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1518		gap = ULONG_MAX - pivots[max_piv];
1519		if (gap > max_gap)
1520			max_gap = gap;
1521
1522		if (max_gap > pivots[max_piv] - mas->min)
1523			return max_gap;
1524	}
1525
1526	for (; i <= max_piv; i++) {
1527		/* data == no gap. */
1528		if (likely(slots[i]))
1529			continue;
1530
1531		pstart = pivots[i - 1];
1532		gap = pivots[i] - pstart;
1533		if (gap > max_gap)
1534			max_gap = gap;
1535
1536		/* There cannot be two gaps in a row. */
1537		i++;
1538	}
1539	return max_gap;
1540}
1541
1542/*
1543 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1544 * @node: The maple node
1545 * @gaps: The pointer to the gaps
1546 * @mt: The maple node type
1547 * @*off: Pointer to store the offset location of the gap.
1548 *
1549 * Uses the metadata data end to scan backwards across set gaps.
1550 *
1551 * Return: The maximum gap value
1552 */
1553static inline unsigned long
1554ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1555	    unsigned char *off)
1556{
1557	unsigned char offset, i;
1558	unsigned long max_gap = 0;
1559
1560	i = offset = ma_meta_end(node, mt);
1561	do {
1562		if (gaps[i] > max_gap) {
1563			max_gap = gaps[i];
1564			offset = i;
1565		}
1566	} while (i--);
1567
1568	*off = offset;
1569	return max_gap;
1570}
1571
1572/*
1573 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1574 * @mas: The maple state.
1575 *
1576 * Return: The gap value.
1577 */
1578static inline unsigned long mas_max_gap(struct ma_state *mas)
1579{
1580	unsigned long *gaps;
1581	unsigned char offset;
1582	enum maple_type mt;
1583	struct maple_node *node;
1584
1585	mt = mte_node_type(mas->node);
1586	if (ma_is_leaf(mt))
1587		return mas_leaf_max_gap(mas);
1588
1589	node = mas_mn(mas);
1590	MAS_BUG_ON(mas, mt != maple_arange_64);
1591	offset = ma_meta_gap(node);
1592	gaps = ma_gaps(node, mt);
1593	return gaps[offset];
1594}
1595
1596/*
1597 * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1598 * @mas: The maple state
1599 * @offset: The gap offset in the parent to set
1600 * @new: The new gap value.
1601 *
1602 * Set the parent gap then continue to set the gap upwards, using the metadata
1603 * of the parent to see if it is necessary to check the node above.
1604 */
1605static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1606		unsigned long new)
1607{
1608	unsigned long meta_gap = 0;
1609	struct maple_node *pnode;
1610	struct maple_enode *penode;
1611	unsigned long *pgaps;
1612	unsigned char meta_offset;
1613	enum maple_type pmt;
1614
1615	pnode = mte_parent(mas->node);
1616	pmt = mas_parent_type(mas, mas->node);
1617	penode = mt_mk_node(pnode, pmt);
1618	pgaps = ma_gaps(pnode, pmt);
1619
1620ascend:
1621	MAS_BUG_ON(mas, pmt != maple_arange_64);
1622	meta_offset = ma_meta_gap(pnode);
1623	meta_gap = pgaps[meta_offset];
1624
1625	pgaps[offset] = new;
1626
1627	if (meta_gap == new)
1628		return;
1629
1630	if (offset != meta_offset) {
1631		if (meta_gap > new)
1632			return;
1633
1634		ma_set_meta_gap(pnode, pmt, offset);
1635	} else if (new < meta_gap) {
1636		new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1637		ma_set_meta_gap(pnode, pmt, meta_offset);
1638	}
1639
1640	if (ma_is_root(pnode))
1641		return;
1642
1643	/* Go to the parent node. */
1644	pnode = mte_parent(penode);
1645	pmt = mas_parent_type(mas, penode);
1646	pgaps = ma_gaps(pnode, pmt);
1647	offset = mte_parent_slot(penode);
1648	penode = mt_mk_node(pnode, pmt);
1649	goto ascend;
1650}
1651
1652/*
1653 * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1654 * @mas - the maple state.
1655 */
1656static inline void mas_update_gap(struct ma_state *mas)
1657{
1658	unsigned char pslot;
1659	unsigned long p_gap;
1660	unsigned long max_gap;
1661
1662	if (!mt_is_alloc(mas->tree))
1663		return;
1664
1665	if (mte_is_root(mas->node))
1666		return;
1667
1668	max_gap = mas_max_gap(mas);
1669
1670	pslot = mte_parent_slot(mas->node);
1671	p_gap = ma_gaps(mte_parent(mas->node),
1672			mas_parent_type(mas, mas->node))[pslot];
1673
1674	if (p_gap != max_gap)
1675		mas_parent_gap(mas, pslot, max_gap);
1676}
1677
1678/*
1679 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1680 * @parent with the slot encoded.
1681 * @mas - the maple state (for the tree)
1682 * @parent - the maple encoded node containing the children.
1683 */
1684static inline void mas_adopt_children(struct ma_state *mas,
1685		struct maple_enode *parent)
1686{
1687	enum maple_type type = mte_node_type(parent);
1688	struct maple_node *node = mte_to_node(parent);
1689	void __rcu **slots = ma_slots(node, type);
1690	unsigned long *pivots = ma_pivots(node, type);
1691	struct maple_enode *child;
1692	unsigned char offset;
1693
1694	offset = ma_data_end(node, type, pivots, mas->max);
1695	do {
1696		child = mas_slot_locked(mas, slots, offset);
1697		mas_set_parent(mas, child, parent, offset);
1698	} while (offset--);
1699}
1700
1701/*
1702 * mas_put_in_tree() - Put a new node in the tree, smp_wmb(), and mark the old
1703 * node as dead.
1704 * @mas - the maple state with the new node
1705 * @old_enode - The old maple encoded node to replace.
1706 */
1707static inline void mas_put_in_tree(struct ma_state *mas,
1708		struct maple_enode *old_enode)
1709	__must_hold(mas->tree->ma_lock)
1710{
1711	unsigned char offset;
1712	void __rcu **slots;
1713
1714	if (mte_is_root(mas->node)) {
1715		mas_mn(mas)->parent = ma_parent_ptr(mas_tree_parent(mas));
1716		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1717		mas_set_height(mas);
1718	} else {
1719
1720		offset = mte_parent_slot(mas->node);
1721		slots = ma_slots(mte_parent(mas->node),
1722				 mas_parent_type(mas, mas->node));
1723		rcu_assign_pointer(slots[offset], mas->node);
1724	}
1725
1726	mte_set_node_dead(old_enode);
1727}
1728
1729/*
1730 * mas_replace_node() - Replace a node by putting it in the tree, marking it
1731 * dead, and freeing it.
1732 * the parent encoding to locate the maple node in the tree.
1733 * @mas - the ma_state with @mas->node pointing to the new node.
1734 * @old_enode - The old maple encoded node.
1735 */
1736static inline void mas_replace_node(struct ma_state *mas,
1737		struct maple_enode *old_enode)
1738	__must_hold(mas->tree->ma_lock)
1739{
1740	mas_put_in_tree(mas, old_enode);
1741	mas_free(mas, old_enode);
1742}
1743
1744/*
1745 * mas_find_child() - Find a child who has the parent @mas->node.
1746 * @mas: the maple state with the parent.
1747 * @child: the maple state to store the child.
1748 */
1749static inline bool mas_find_child(struct ma_state *mas, struct ma_state *child)
1750	__must_hold(mas->tree->ma_lock)
1751{
1752	enum maple_type mt;
1753	unsigned char offset;
1754	unsigned char end;
1755	unsigned long *pivots;
1756	struct maple_enode *entry;
1757	struct maple_node *node;
1758	void __rcu **slots;
1759
1760	mt = mte_node_type(mas->node);
1761	node = mas_mn(mas);
1762	slots = ma_slots(node, mt);
1763	pivots = ma_pivots(node, mt);
1764	end = ma_data_end(node, mt, pivots, mas->max);
1765	for (offset = mas->offset; offset <= end; offset++) {
1766		entry = mas_slot_locked(mas, slots, offset);
1767		if (mte_parent(entry) == node) {
1768			*child = *mas;
1769			mas->offset = offset + 1;
1770			child->offset = offset;
1771			mas_descend(child);
1772			child->offset = 0;
1773			return true;
1774		}
1775	}
1776	return false;
1777}
1778
1779/*
1780 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1781 * old data or set b_node->b_end.
1782 * @b_node: the maple_big_node
1783 * @shift: the shift count
1784 */
1785static inline void mab_shift_right(struct maple_big_node *b_node,
1786				 unsigned char shift)
1787{
1788	unsigned long size = b_node->b_end * sizeof(unsigned long);
1789
1790	memmove(b_node->pivot + shift, b_node->pivot, size);
1791	memmove(b_node->slot + shift, b_node->slot, size);
1792	if (b_node->type == maple_arange_64)
1793		memmove(b_node->gap + shift, b_node->gap, size);
1794}
1795
1796/*
1797 * mab_middle_node() - Check if a middle node is needed (unlikely)
1798 * @b_node: the maple_big_node that contains the data.
1799 * @size: the amount of data in the b_node
1800 * @split: the potential split location
1801 * @slot_count: the size that can be stored in a single node being considered.
1802 *
1803 * Return: true if a middle node is required.
1804 */
1805static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1806				   unsigned char slot_count)
1807{
1808	unsigned char size = b_node->b_end;
1809
1810	if (size >= 2 * slot_count)
1811		return true;
1812
1813	if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1814		return true;
1815
1816	return false;
1817}
1818
1819/*
1820 * mab_no_null_split() - ensure the split doesn't fall on a NULL
1821 * @b_node: the maple_big_node with the data
1822 * @split: the suggested split location
1823 * @slot_count: the number of slots in the node being considered.
1824 *
1825 * Return: the split location.
1826 */
1827static inline int mab_no_null_split(struct maple_big_node *b_node,
1828				    unsigned char split, unsigned char slot_count)
1829{
1830	if (!b_node->slot[split]) {
1831		/*
1832		 * If the split is less than the max slot && the right side will
1833		 * still be sufficient, then increment the split on NULL.
1834		 */
1835		if ((split < slot_count - 1) &&
1836		    (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1837			split++;
1838		else
1839			split--;
1840	}
1841	return split;
1842}
1843
1844/*
1845 * mab_calc_split() - Calculate the split location and if there needs to be two
1846 * splits.
1847 * @bn: The maple_big_node with the data
1848 * @mid_split: The second split, if required.  0 otherwise.
1849 *
1850 * Return: The first split location.  The middle split is set in @mid_split.
1851 */
1852static inline int mab_calc_split(struct ma_state *mas,
1853	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1854{
1855	unsigned char b_end = bn->b_end;
1856	int split = b_end / 2; /* Assume equal split. */
1857	unsigned char slot_min, slot_count = mt_slots[bn->type];
1858
1859	/*
1860	 * To support gap tracking, all NULL entries are kept together and a node cannot
1861	 * end on a NULL entry, with the exception of the left-most leaf.  The
1862	 * limitation means that the split of a node must be checked for this condition
1863	 * and be able to put more data in one direction or the other.
1864	 */
1865	if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1866		*mid_split = 0;
1867		split = b_end - mt_min_slots[bn->type];
1868
1869		if (!ma_is_leaf(bn->type))
1870			return split;
1871
1872		mas->mas_flags |= MA_STATE_REBALANCE;
1873		if (!bn->slot[split])
1874			split--;
1875		return split;
1876	}
1877
1878	/*
1879	 * Although extremely rare, it is possible to enter what is known as the 3-way
1880	 * split scenario.  The 3-way split comes about by means of a store of a range
1881	 * that overwrites the end and beginning of two full nodes.  The result is a set
1882	 * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1883	 * also be located in different parent nodes which are also full.  This can
1884	 * carry upwards all the way to the root in the worst case.
1885	 */
1886	if (unlikely(mab_middle_node(bn, split, slot_count))) {
1887		split = b_end / 3;
1888		*mid_split = split * 2;
1889	} else {
1890		slot_min = mt_min_slots[bn->type];
1891
1892		*mid_split = 0;
1893		/*
1894		 * Avoid having a range less than the slot count unless it
1895		 * causes one node to be deficient.
1896		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1897		 */
1898		while ((split < slot_count - 1) &&
1899		       ((bn->pivot[split] - min) < slot_count - 1) &&
1900		       (b_end - split > slot_min))
1901			split++;
1902	}
1903
1904	/* Avoid ending a node on a NULL entry */
1905	split = mab_no_null_split(bn, split, slot_count);
1906
1907	if (unlikely(*mid_split))
1908		*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1909
1910	return split;
1911}
1912
1913/*
1914 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1915 * and set @b_node->b_end to the next free slot.
1916 * @mas: The maple state
1917 * @mas_start: The starting slot to copy
1918 * @mas_end: The end slot to copy (inclusively)
1919 * @b_node: The maple_big_node to place the data
1920 * @mab_start: The starting location in maple_big_node to store the data.
1921 */
1922static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1923			unsigned char mas_end, struct maple_big_node *b_node,
1924			unsigned char mab_start)
1925{
1926	enum maple_type mt;
1927	struct maple_node *node;
1928	void __rcu **slots;
1929	unsigned long *pivots, *gaps;
1930	int i = mas_start, j = mab_start;
1931	unsigned char piv_end;
1932
1933	node = mas_mn(mas);
1934	mt = mte_node_type(mas->node);
1935	pivots = ma_pivots(node, mt);
1936	if (!i) {
1937		b_node->pivot[j] = pivots[i++];
1938		if (unlikely(i > mas_end))
1939			goto complete;
1940		j++;
1941	}
1942
1943	piv_end = min(mas_end, mt_pivots[mt]);
1944	for (; i < piv_end; i++, j++) {
1945		b_node->pivot[j] = pivots[i];
1946		if (unlikely(!b_node->pivot[j]))
1947			break;
1948
1949		if (unlikely(mas->max == b_node->pivot[j]))
1950			goto complete;
1951	}
1952
1953	if (likely(i <= mas_end))
1954		b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1955
1956complete:
1957	b_node->b_end = ++j;
1958	j -= mab_start;
1959	slots = ma_slots(node, mt);
1960	memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1961	if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1962		gaps = ma_gaps(node, mt);
1963		memcpy(b_node->gap + mab_start, gaps + mas_start,
1964		       sizeof(unsigned long) * j);
1965	}
1966}
1967
1968/*
1969 * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1970 * @node: The maple node
1971 * @mt: The maple type
1972 * @end: The node end
1973 */
1974static inline void mas_leaf_set_meta(struct maple_node *node,
1975		enum maple_type mt, unsigned char end)
1976{
1977	if (end < mt_slots[mt] - 1)
1978		ma_set_meta(node, mt, 0, end);
1979}
1980
1981/*
1982 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1983 * @b_node: the maple_big_node that has the data
1984 * @mab_start: the start location in @b_node.
1985 * @mab_end: The end location in @b_node (inclusively)
1986 * @mas: The maple state with the maple encoded node.
1987 */
1988static inline void mab_mas_cp(struct maple_big_node *b_node,
1989			      unsigned char mab_start, unsigned char mab_end,
1990			      struct ma_state *mas, bool new_max)
1991{
1992	int i, j = 0;
1993	enum maple_type mt = mte_node_type(mas->node);
1994	struct maple_node *node = mte_to_node(mas->node);
1995	void __rcu **slots = ma_slots(node, mt);
1996	unsigned long *pivots = ma_pivots(node, mt);
1997	unsigned long *gaps = NULL;
1998	unsigned char end;
1999
2000	if (mab_end - mab_start > mt_pivots[mt])
2001		mab_end--;
2002
2003	if (!pivots[mt_pivots[mt] - 1])
2004		slots[mt_pivots[mt]] = NULL;
2005
2006	i = mab_start;
2007	do {
2008		pivots[j++] = b_node->pivot[i++];
2009	} while (i <= mab_end && likely(b_node->pivot[i]));
2010
2011	memcpy(slots, b_node->slot + mab_start,
2012	       sizeof(void *) * (i - mab_start));
2013
2014	if (new_max)
2015		mas->max = b_node->pivot[i - 1];
2016
2017	end = j - 1;
2018	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2019		unsigned long max_gap = 0;
2020		unsigned char offset = 0;
2021
2022		gaps = ma_gaps(node, mt);
2023		do {
2024			gaps[--j] = b_node->gap[--i];
2025			if (gaps[j] > max_gap) {
2026				offset = j;
2027				max_gap = gaps[j];
2028			}
2029		} while (j);
2030
2031		ma_set_meta(node, mt, offset, end);
2032	} else {
2033		mas_leaf_set_meta(node, mt, end);
2034	}
2035}
2036
2037/*
2038 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2039 * @mas: The maple state
2040 * @end: The maple node end
2041 * @mt: The maple node type
2042 */
2043static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2044				      enum maple_type mt)
2045{
2046	if (!(mas->mas_flags & MA_STATE_BULK))
2047		return;
2048
2049	if (mte_is_root(mas->node))
2050		return;
2051
2052	if (end > mt_min_slots[mt]) {
2053		mas->mas_flags &= ~MA_STATE_REBALANCE;
2054		return;
2055	}
2056}
2057
2058/*
2059 * mas_store_b_node() - Store an @entry into the b_node while also copying the
2060 * data from a maple encoded node.
2061 * @wr_mas: the maple write state
2062 * @b_node: the maple_big_node to fill with data
2063 * @offset_end: the offset to end copying
2064 *
2065 * Return: The actual end of the data stored in @b_node
2066 */
2067static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2068		struct maple_big_node *b_node, unsigned char offset_end)
2069{
2070	unsigned char slot;
2071	unsigned char b_end;
2072	/* Possible underflow of piv will wrap back to 0 before use. */
2073	unsigned long piv;
2074	struct ma_state *mas = wr_mas->mas;
2075
2076	b_node->type = wr_mas->type;
2077	b_end = 0;
2078	slot = mas->offset;
2079	if (slot) {
2080		/* Copy start data up to insert. */
2081		mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2082		b_end = b_node->b_end;
2083		piv = b_node->pivot[b_end - 1];
2084	} else
2085		piv = mas->min - 1;
2086
2087	if (piv + 1 < mas->index) {
2088		/* Handle range starting after old range */
2089		b_node->slot[b_end] = wr_mas->content;
2090		if (!wr_mas->content)
2091			b_node->gap[b_end] = mas->index - 1 - piv;
2092		b_node->pivot[b_end++] = mas->index - 1;
2093	}
2094
2095	/* Store the new entry. */
2096	mas->offset = b_end;
2097	b_node->slot[b_end] = wr_mas->entry;
2098	b_node->pivot[b_end] = mas->last;
2099
2100	/* Appended. */
2101	if (mas->last >= mas->max)
2102		goto b_end;
2103
2104	/* Handle new range ending before old range ends */
2105	piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2106	if (piv > mas->last) {
2107		if (piv == ULONG_MAX)
2108			mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2109
2110		if (offset_end != slot)
2111			wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2112							  offset_end);
2113
2114		b_node->slot[++b_end] = wr_mas->content;
2115		if (!wr_mas->content)
2116			b_node->gap[b_end] = piv - mas->last + 1;
2117		b_node->pivot[b_end] = piv;
2118	}
2119
2120	slot = offset_end + 1;
2121	if (slot > mas->end)
2122		goto b_end;
2123
2124	/* Copy end data to the end of the node. */
2125	mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
2126	b_node->b_end--;
2127	return;
2128
2129b_end:
2130	b_node->b_end = b_end;
2131}
2132
2133/*
2134 * mas_prev_sibling() - Find the previous node with the same parent.
2135 * @mas: the maple state
2136 *
2137 * Return: True if there is a previous sibling, false otherwise.
2138 */
2139static inline bool mas_prev_sibling(struct ma_state *mas)
2140{
2141	unsigned int p_slot = mte_parent_slot(mas->node);
2142
2143	if (mte_is_root(mas->node))
2144		return false;
2145
2146	if (!p_slot)
2147		return false;
2148
2149	mas_ascend(mas);
2150	mas->offset = p_slot - 1;
2151	mas_descend(mas);
2152	return true;
2153}
2154
2155/*
2156 * mas_next_sibling() - Find the next node with the same parent.
2157 * @mas: the maple state
2158 *
2159 * Return: true if there is a next sibling, false otherwise.
2160 */
2161static inline bool mas_next_sibling(struct ma_state *mas)
2162{
2163	MA_STATE(parent, mas->tree, mas->index, mas->last);
2164
2165	if (mte_is_root(mas->node))
2166		return false;
2167
2168	parent = *mas;
2169	mas_ascend(&parent);
2170	parent.offset = mte_parent_slot(mas->node) + 1;
2171	if (parent.offset > mas_data_end(&parent))
2172		return false;
2173
2174	*mas = parent;
2175	mas_descend(mas);
2176	return true;
2177}
2178
2179/*
2180 * mte_node_or_none() - Set the enode and state.
2181 * @enode: The encoded maple node.
2182 *
2183 * Set the node to the enode and the status.
2184 */
2185static inline void mas_node_or_none(struct ma_state *mas,
2186		struct maple_enode *enode)
2187{
2188	if (enode) {
2189		mas->node = enode;
2190		mas->status = ma_active;
2191	} else {
2192		mas->node = NULL;
2193		mas->status = ma_none;
2194	}
2195}
2196
2197/*
2198 * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2199 * @wr_mas: The maple write state
2200 *
2201 * Uses mas_slot_locked() and does not need to worry about dead nodes.
2202 */
2203static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2204{
2205	struct ma_state *mas = wr_mas->mas;
2206	unsigned char count, offset;
2207
2208	if (unlikely(ma_is_dense(wr_mas->type))) {
2209		wr_mas->r_max = wr_mas->r_min = mas->index;
2210		mas->offset = mas->index = mas->min;
2211		return;
2212	}
2213
2214	wr_mas->node = mas_mn(wr_mas->mas);
2215	wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2216	count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
2217				       wr_mas->pivots, mas->max);
2218	offset = mas->offset;
2219
2220	while (offset < count && mas->index > wr_mas->pivots[offset])
2221		offset++;
2222
2223	wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2224	wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2225	wr_mas->offset_end = mas->offset = offset;
2226}
2227
2228/*
2229 * mast_rebalance_next() - Rebalance against the next node
2230 * @mast: The maple subtree state
2231 * @old_r: The encoded maple node to the right (next node).
2232 */
2233static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2234{
2235	unsigned char b_end = mast->bn->b_end;
2236
2237	mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2238		   mast->bn, b_end);
2239	mast->orig_r->last = mast->orig_r->max;
2240}
2241
2242/*
2243 * mast_rebalance_prev() - Rebalance against the previous node
2244 * @mast: The maple subtree state
2245 * @old_l: The encoded maple node to the left (previous node)
2246 */
2247static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2248{
2249	unsigned char end = mas_data_end(mast->orig_l) + 1;
2250	unsigned char b_end = mast->bn->b_end;
2251
2252	mab_shift_right(mast->bn, end);
2253	mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2254	mast->l->min = mast->orig_l->min;
2255	mast->orig_l->index = mast->orig_l->min;
2256	mast->bn->b_end = end + b_end;
2257	mast->l->offset += end;
2258}
2259
2260/*
2261 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2262 * the node to the right.  Checking the nodes to the right then the left at each
2263 * level upwards until root is reached.
2264 * Data is copied into the @mast->bn.
2265 * @mast: The maple_subtree_state.
2266 */
2267static inline
2268bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2269{
2270	struct ma_state r_tmp = *mast->orig_r;
2271	struct ma_state l_tmp = *mast->orig_l;
2272	unsigned char depth = 0;
2273
2274	r_tmp = *mast->orig_r;
2275	l_tmp = *mast->orig_l;
2276	do {
2277		mas_ascend(mast->orig_r);
2278		mas_ascend(mast->orig_l);
2279		depth++;
2280		if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2281			mast->orig_r->offset++;
2282			do {
2283				mas_descend(mast->orig_r);
2284				mast->orig_r->offset = 0;
2285			} while (--depth);
2286
2287			mast_rebalance_next(mast);
2288			*mast->orig_l = l_tmp;
2289			return true;
2290		} else if (mast->orig_l->offset != 0) {
2291			mast->orig_l->offset--;
2292			do {
2293				mas_descend(mast->orig_l);
2294				mast->orig_l->offset =
2295					mas_data_end(mast->orig_l);
2296			} while (--depth);
2297
2298			mast_rebalance_prev(mast);
2299			*mast->orig_r = r_tmp;
2300			return true;
2301		}
2302	} while (!mte_is_root(mast->orig_r->node));
2303
2304	*mast->orig_r = r_tmp;
2305	*mast->orig_l = l_tmp;
2306	return false;
2307}
2308
2309/*
2310 * mast_ascend() - Ascend the original left and right maple states.
2311 * @mast: the maple subtree state.
2312 *
2313 * Ascend the original left and right sides.  Set the offsets to point to the
2314 * data already in the new tree (@mast->l and @mast->r).
2315 */
2316static inline void mast_ascend(struct maple_subtree_state *mast)
2317{
2318	MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2319	mas_ascend(mast->orig_l);
2320	mas_ascend(mast->orig_r);
2321
2322	mast->orig_r->offset = 0;
2323	mast->orig_r->index = mast->r->max;
2324	/* last should be larger than or equal to index */
2325	if (mast->orig_r->last < mast->orig_r->index)
2326		mast->orig_r->last = mast->orig_r->index;
2327
2328	wr_mas.type = mte_node_type(mast->orig_r->node);
2329	mas_wr_node_walk(&wr_mas);
2330	/* Set up the left side of things */
2331	mast->orig_l->offset = 0;
2332	mast->orig_l->index = mast->l->min;
2333	wr_mas.mas = mast->orig_l;
2334	wr_mas.type = mte_node_type(mast->orig_l->node);
2335	mas_wr_node_walk(&wr_mas);
2336
2337	mast->bn->type = wr_mas.type;
2338}
2339
2340/*
2341 * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2342 * @mas: the maple state with the allocations.
2343 * @b_node: the maple_big_node with the type encoding.
2344 *
2345 * Use the node type from the maple_big_node to allocate a new node from the
2346 * ma_state.  This function exists mainly for code readability.
2347 *
2348 * Return: A new maple encoded node
2349 */
2350static inline struct maple_enode
2351*mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2352{
2353	return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2354}
2355
2356/*
2357 * mas_mab_to_node() - Set up right and middle nodes
2358 *
2359 * @mas: the maple state that contains the allocations.
2360 * @b_node: the node which contains the data.
2361 * @left: The pointer which will have the left node
2362 * @right: The pointer which may have the right node
2363 * @middle: the pointer which may have the middle node (rare)
2364 * @mid_split: the split location for the middle node
2365 *
2366 * Return: the split of left.
2367 */
2368static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2369	struct maple_big_node *b_node, struct maple_enode **left,
2370	struct maple_enode **right, struct maple_enode **middle,
2371	unsigned char *mid_split, unsigned long min)
2372{
2373	unsigned char split = 0;
2374	unsigned char slot_count = mt_slots[b_node->type];
2375
2376	*left = mas_new_ma_node(mas, b_node);
2377	*right = NULL;
2378	*middle = NULL;
2379	*mid_split = 0;
2380
2381	if (b_node->b_end < slot_count) {
2382		split = b_node->b_end;
2383	} else {
2384		split = mab_calc_split(mas, b_node, mid_split, min);
2385		*right = mas_new_ma_node(mas, b_node);
2386	}
2387
2388	if (*mid_split)
2389		*middle = mas_new_ma_node(mas, b_node);
2390
2391	return split;
2392
2393}
2394
2395/*
2396 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2397 * pointer.
2398 * @b_node - the big node to add the entry
2399 * @mas - the maple state to get the pivot (mas->max)
2400 * @entry - the entry to add, if NULL nothing happens.
2401 */
2402static inline void mab_set_b_end(struct maple_big_node *b_node,
2403				 struct ma_state *mas,
2404				 void *entry)
2405{
2406	if (!entry)
2407		return;
2408
2409	b_node->slot[b_node->b_end] = entry;
2410	if (mt_is_alloc(mas->tree))
2411		b_node->gap[b_node->b_end] = mas_max_gap(mas);
2412	b_node->pivot[b_node->b_end++] = mas->max;
2413}
2414
2415/*
2416 * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2417 * of @mas->node to either @left or @right, depending on @slot and @split
2418 *
2419 * @mas - the maple state with the node that needs a parent
2420 * @left - possible parent 1
2421 * @right - possible parent 2
2422 * @slot - the slot the mas->node was placed
2423 * @split - the split location between @left and @right
2424 */
2425static inline void mas_set_split_parent(struct ma_state *mas,
2426					struct maple_enode *left,
2427					struct maple_enode *right,
2428					unsigned char *slot, unsigned char split)
2429{
2430	if (mas_is_none(mas))
2431		return;
2432
2433	if ((*slot) <= split)
2434		mas_set_parent(mas, mas->node, left, *slot);
2435	else if (right)
2436		mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2437
2438	(*slot)++;
2439}
2440
2441/*
2442 * mte_mid_split_check() - Check if the next node passes the mid-split
2443 * @**l: Pointer to left encoded maple node.
2444 * @**m: Pointer to middle encoded maple node.
2445 * @**r: Pointer to right encoded maple node.
2446 * @slot: The offset
2447 * @*split: The split location.
2448 * @mid_split: The middle split.
2449 */
2450static inline void mte_mid_split_check(struct maple_enode **l,
2451				       struct maple_enode **r,
2452				       struct maple_enode *right,
2453				       unsigned char slot,
2454				       unsigned char *split,
2455				       unsigned char mid_split)
2456{
2457	if (*r == right)
2458		return;
2459
2460	if (slot < mid_split)
2461		return;
2462
2463	*l = *r;
2464	*r = right;
2465	*split = mid_split;
2466}
2467
2468/*
2469 * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2470 * is taken from @mast->l.
2471 * @mast - the maple subtree state
2472 * @left - the left node
2473 * @right - the right node
2474 * @split - the split location.
2475 */
2476static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2477					  struct maple_enode *left,
2478					  struct maple_enode *middle,
2479					  struct maple_enode *right,
2480					  unsigned char split,
2481					  unsigned char mid_split)
2482{
2483	unsigned char slot;
2484	struct maple_enode *l = left;
2485	struct maple_enode *r = right;
2486
2487	if (mas_is_none(mast->l))
2488		return;
2489
2490	if (middle)
2491		r = middle;
2492
2493	slot = mast->l->offset;
2494
2495	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2496	mas_set_split_parent(mast->l, l, r, &slot, split);
2497
2498	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2499	mas_set_split_parent(mast->m, l, r, &slot, split);
2500
2501	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2502	mas_set_split_parent(mast->r, l, r, &slot, split);
2503}
2504
2505/*
2506 * mas_topiary_node() - Dispose of a single node
2507 * @mas: The maple state for pushing nodes
2508 * @enode: The encoded maple node
2509 * @in_rcu: If the tree is in rcu mode
2510 *
2511 * The node will either be RCU freed or pushed back on the maple state.
2512 */
2513static inline void mas_topiary_node(struct ma_state *mas,
2514		struct ma_state *tmp_mas, bool in_rcu)
2515{
2516	struct maple_node *tmp;
2517	struct maple_enode *enode;
2518
2519	if (mas_is_none(tmp_mas))
2520		return;
2521
2522	enode = tmp_mas->node;
2523	tmp = mte_to_node(enode);
2524	mte_set_node_dead(enode);
2525	if (in_rcu)
2526		ma_free_rcu(tmp);
2527	else
2528		mas_push_node(mas, tmp);
2529}
2530
2531/*
2532 * mas_topiary_replace() - Replace the data with new data, then repair the
2533 * parent links within the new tree.  Iterate over the dead sub-tree and collect
2534 * the dead subtrees and topiary the nodes that are no longer of use.
2535 *
2536 * The new tree will have up to three children with the correct parent.  Keep
2537 * track of the new entries as they need to be followed to find the next level
2538 * of new entries.
2539 *
2540 * The old tree will have up to three children with the old parent.  Keep track
2541 * of the old entries as they may have more nodes below replaced.  Nodes within
2542 * [index, last] are dead subtrees, others need to be freed and followed.
2543 *
2544 * @mas: The maple state pointing at the new data
2545 * @old_enode: The maple encoded node being replaced
2546 *
2547 */
2548static inline void mas_topiary_replace(struct ma_state *mas,
2549		struct maple_enode *old_enode)
2550{
2551	struct ma_state tmp[3], tmp_next[3];
2552	MA_TOPIARY(subtrees, mas->tree);
2553	bool in_rcu;
2554	int i, n;
2555
2556	/* Place data in tree & then mark node as old */
2557	mas_put_in_tree(mas, old_enode);
2558
2559	/* Update the parent pointers in the tree */
2560	tmp[0] = *mas;
2561	tmp[0].offset = 0;
2562	tmp[1].status = ma_none;
2563	tmp[2].status = ma_none;
2564	while (!mte_is_leaf(tmp[0].node)) {
2565		n = 0;
2566		for (i = 0; i < 3; i++) {
2567			if (mas_is_none(&tmp[i]))
2568				continue;
2569
2570			while (n < 3) {
2571				if (!mas_find_child(&tmp[i], &tmp_next[n]))
2572					break;
2573				n++;
2574			}
2575
2576			mas_adopt_children(&tmp[i], tmp[i].node);
2577		}
2578
2579		if (MAS_WARN_ON(mas, n == 0))
2580			break;
2581
2582		while (n < 3)
2583			tmp_next[n++].status = ma_none;
2584
2585		for (i = 0; i < 3; i++)
2586			tmp[i] = tmp_next[i];
2587	}
2588
2589	/* Collect the old nodes that need to be discarded */
2590	if (mte_is_leaf(old_enode))
2591		return mas_free(mas, old_enode);
2592
2593	tmp[0] = *mas;
2594	tmp[0].offset = 0;
2595	tmp[0].node = old_enode;
2596	tmp[1].status = ma_none;
2597	tmp[2].status = ma_none;
2598	in_rcu = mt_in_rcu(mas->tree);
2599	do {
2600		n = 0;
2601		for (i = 0; i < 3; i++) {
2602			if (mas_is_none(&tmp[i]))
2603				continue;
2604
2605			while (n < 3) {
2606				if (!mas_find_child(&tmp[i], &tmp_next[n]))
2607					break;
2608
2609				if ((tmp_next[n].min >= tmp_next->index) &&
2610				    (tmp_next[n].max <= tmp_next->last)) {
2611					mat_add(&subtrees, tmp_next[n].node);
2612					tmp_next[n].status = ma_none;
2613				} else {
2614					n++;
2615				}
2616			}
2617		}
2618
2619		if (MAS_WARN_ON(mas, n == 0))
2620			break;
2621
2622		while (n < 3)
2623			tmp_next[n++].status = ma_none;
2624
2625		for (i = 0; i < 3; i++) {
2626			mas_topiary_node(mas, &tmp[i], in_rcu);
2627			tmp[i] = tmp_next[i];
2628		}
2629	} while (!mte_is_leaf(tmp[0].node));
2630
2631	for (i = 0; i < 3; i++)
2632		mas_topiary_node(mas, &tmp[i], in_rcu);
2633
2634	mas_mat_destroy(mas, &subtrees);
2635}
2636
2637/*
2638 * mas_wmb_replace() - Write memory barrier and replace
2639 * @mas: The maple state
2640 * @old: The old maple encoded node that is being replaced.
2641 *
2642 * Updates gap as necessary.
2643 */
2644static inline void mas_wmb_replace(struct ma_state *mas,
2645		struct maple_enode *old_enode)
2646{
2647	/* Insert the new data in the tree */
2648	mas_topiary_replace(mas, old_enode);
2649
2650	if (mte_is_leaf(mas->node))
2651		return;
2652
2653	mas_update_gap(mas);
2654}
2655
2656/*
2657 * mast_cp_to_nodes() - Copy data out to nodes.
2658 * @mast: The maple subtree state
2659 * @left: The left encoded maple node
2660 * @middle: The middle encoded maple node
2661 * @right: The right encoded maple node
2662 * @split: The location to split between left and (middle ? middle : right)
2663 * @mid_split: The location to split between middle and right.
2664 */
2665static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2666	struct maple_enode *left, struct maple_enode *middle,
2667	struct maple_enode *right, unsigned char split, unsigned char mid_split)
2668{
2669	bool new_lmax = true;
2670
2671	mas_node_or_none(mast->l, left);
2672	mas_node_or_none(mast->m, middle);
2673	mas_node_or_none(mast->r, right);
2674
2675	mast->l->min = mast->orig_l->min;
2676	if (split == mast->bn->b_end) {
2677		mast->l->max = mast->orig_r->max;
2678		new_lmax = false;
2679	}
2680
2681	mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2682
2683	if (middle) {
2684		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2685		mast->m->min = mast->bn->pivot[split] + 1;
2686		split = mid_split;
2687	}
2688
2689	mast->r->max = mast->orig_r->max;
2690	if (right) {
2691		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2692		mast->r->min = mast->bn->pivot[split] + 1;
2693	}
2694}
2695
2696/*
2697 * mast_combine_cp_left - Copy in the original left side of the tree into the
2698 * combined data set in the maple subtree state big node.
2699 * @mast: The maple subtree state
2700 */
2701static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2702{
2703	unsigned char l_slot = mast->orig_l->offset;
2704
2705	if (!l_slot)
2706		return;
2707
2708	mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2709}
2710
2711/*
2712 * mast_combine_cp_right: Copy in the original right side of the tree into the
2713 * combined data set in the maple subtree state big node.
2714 * @mast: The maple subtree state
2715 */
2716static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2717{
2718	if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2719		return;
2720
2721	mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2722		   mt_slot_count(mast->orig_r->node), mast->bn,
2723		   mast->bn->b_end);
2724	mast->orig_r->last = mast->orig_r->max;
2725}
2726
2727/*
2728 * mast_sufficient: Check if the maple subtree state has enough data in the big
2729 * node to create at least one sufficient node
2730 * @mast: the maple subtree state
2731 */
2732static inline bool mast_sufficient(struct maple_subtree_state *mast)
2733{
2734	if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2735		return true;
2736
2737	return false;
2738}
2739
2740/*
2741 * mast_overflow: Check if there is too much data in the subtree state for a
2742 * single node.
2743 * @mast: The maple subtree state
2744 */
2745static inline bool mast_overflow(struct maple_subtree_state *mast)
2746{
2747	if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2748		return true;
2749
2750	return false;
2751}
2752
2753static inline void *mtree_range_walk(struct ma_state *mas)
2754{
2755	unsigned long *pivots;
2756	unsigned char offset;
2757	struct maple_node *node;
2758	struct maple_enode *next, *last;
2759	enum maple_type type;
2760	void __rcu **slots;
2761	unsigned char end;
2762	unsigned long max, min;
2763	unsigned long prev_max, prev_min;
2764
2765	next = mas->node;
2766	min = mas->min;
2767	max = mas->max;
2768	do {
2769		last = next;
2770		node = mte_to_node(next);
2771		type = mte_node_type(next);
2772		pivots = ma_pivots(node, type);
2773		end = ma_data_end(node, type, pivots, max);
2774		prev_min = min;
2775		prev_max = max;
2776		if (pivots[0] >= mas->index) {
2777			offset = 0;
2778			max = pivots[0];
2779			goto next;
2780		}
2781
2782		offset = 1;
2783		while (offset < end) {
2784			if (pivots[offset] >= mas->index) {
2785				max = pivots[offset];
2786				break;
2787			}
2788			offset++;
2789		}
2790
2791		min = pivots[offset - 1] + 1;
2792next:
2793		slots = ma_slots(node, type);
2794		next = mt_slot(mas->tree, slots, offset);
2795		if (unlikely(ma_dead_node(node)))
2796			goto dead_node;
2797	} while (!ma_is_leaf(type));
2798
2799	mas->end = end;
2800	mas->offset = offset;
2801	mas->index = min;
2802	mas->last = max;
2803	mas->min = prev_min;
2804	mas->max = prev_max;
2805	mas->node = last;
2806	return (void *)next;
2807
2808dead_node:
2809	mas_reset(mas);
2810	return NULL;
2811}
2812
2813/*
2814 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2815 * @mas: The starting maple state
2816 * @mast: The maple_subtree_state, keeps track of 4 maple states.
2817 * @count: The estimated count of iterations needed.
2818 *
2819 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2820 * is hit.  First @b_node is split into two entries which are inserted into the
2821 * next iteration of the loop.  @b_node is returned populated with the final
2822 * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
2823 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2824 * to account of what has been copied into the new sub-tree.  The update of
2825 * orig_l_mas->last is used in mas_consume to find the slots that will need to
2826 * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
2827 * the new sub-tree in case the sub-tree becomes the full tree.
2828 *
2829 * Return: the number of elements in b_node during the last loop.
2830 */
2831static int mas_spanning_rebalance(struct ma_state *mas,
2832		struct maple_subtree_state *mast, unsigned char count)
2833{
2834	unsigned char split, mid_split;
2835	unsigned char slot = 0;
2836	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2837	struct maple_enode *old_enode;
2838
2839	MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2840	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2841	MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2842
2843	/*
2844	 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2845	 * Rebalancing is done by use of the ``struct maple_topiary``.
2846	 */
2847	mast->l = &l_mas;
2848	mast->m = &m_mas;
2849	mast->r = &r_mas;
2850	l_mas.status = r_mas.status = m_mas.status = ma_none;
2851
2852	/* Check if this is not root and has sufficient data.  */
2853	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
2854	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
2855		mast_spanning_rebalance(mast);
2856
2857	l_mas.depth = 0;
2858
2859	/*
2860	 * Each level of the tree is examined and balanced, pushing data to the left or
2861	 * right, or rebalancing against left or right nodes is employed to avoid
2862	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
2863	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
2864	 * will have the incorrect parent pointers and currently be in two trees: the
2865	 * original tree and the partially new tree.  To remedy the parent pointers in
2866	 * the old tree, the new data is swapped into the active tree and a walk down
2867	 * the tree is performed and the parent pointers are updated.
2868	 * See mas_topiary_replace() for more information.
2869	 */
2870	while (count--) {
2871		mast->bn->b_end--;
2872		mast->bn->type = mte_node_type(mast->orig_l->node);
2873		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
2874					&mid_split, mast->orig_l->min);
2875		mast_set_split_parents(mast, left, middle, right, split,
2876				       mid_split);
2877		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
2878
2879		/*
2880		 * Copy data from next level in the tree to mast->bn from next
2881		 * iteration
2882		 */
2883		memset(mast->bn, 0, sizeof(struct maple_big_node));
2884		mast->bn->type = mte_node_type(left);
2885		l_mas.depth++;
2886
2887		/* Root already stored in l->node. */
2888		if (mas_is_root_limits(mast->l))
2889			goto new_root;
2890
2891		mast_ascend(mast);
2892		mast_combine_cp_left(mast);
2893		l_mas.offset = mast->bn->b_end;
2894		mab_set_b_end(mast->bn, &l_mas, left);
2895		mab_set_b_end(mast->bn, &m_mas, middle);
2896		mab_set_b_end(mast->bn, &r_mas, right);
2897
2898		/* Copy anything necessary out of the right node. */
2899		mast_combine_cp_right(mast);
2900		mast->orig_l->last = mast->orig_l->max;
2901
2902		if (mast_sufficient(mast))
2903			continue;
2904
2905		if (mast_overflow(mast))
2906			continue;
2907
2908		/* May be a new root stored in mast->bn */
2909		if (mas_is_root_limits(mast->orig_l))
2910			break;
2911
2912		mast_spanning_rebalance(mast);
2913
2914		/* rebalancing from other nodes may require another loop. */
2915		if (!count)
2916			count++;
2917	}
2918
2919	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
2920				mte_node_type(mast->orig_l->node));
2921	l_mas.depth++;
2922	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
2923	mas_set_parent(mas, left, l_mas.node, slot);
2924	if (middle)
2925		mas_set_parent(mas, middle, l_mas.node, ++slot);
2926
2927	if (right)
2928		mas_set_parent(mas, right, l_mas.node, ++slot);
2929
2930	if (mas_is_root_limits(mast->l)) {
2931new_root:
2932		mas_mn(mast->l)->parent = ma_parent_ptr(mas_tree_parent(mas));
2933		while (!mte_is_root(mast->orig_l->node))
2934			mast_ascend(mast);
2935	} else {
2936		mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
2937	}
2938
2939	old_enode = mast->orig_l->node;
2940	mas->depth = l_mas.depth;
2941	mas->node = l_mas.node;
2942	mas->min = l_mas.min;
2943	mas->max = l_mas.max;
2944	mas->offset = l_mas.offset;
2945	mas_wmb_replace(mas, old_enode);
2946	mtree_range_walk(mas);
2947	return mast->bn->b_end;
2948}
2949
2950/*
2951 * mas_rebalance() - Rebalance a given node.
2952 * @mas: The maple state
2953 * @b_node: The big maple node.
2954 *
2955 * Rebalance two nodes into a single node or two new nodes that are sufficient.
2956 * Continue upwards until tree is sufficient.
2957 *
2958 * Return: the number of elements in b_node during the last loop.
2959 */
2960static inline int mas_rebalance(struct ma_state *mas,
2961				struct maple_big_node *b_node)
2962{
2963	char empty_count = mas_mt_height(mas);
2964	struct maple_subtree_state mast;
2965	unsigned char shift, b_end = ++b_node->b_end;
2966
2967	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
2968	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2969
2970	trace_ma_op(__func__, mas);
2971
2972	/*
2973	 * Rebalancing occurs if a node is insufficient.  Data is rebalanced
2974	 * against the node to the right if it exists, otherwise the node to the
2975	 * left of this node is rebalanced against this node.  If rebalancing
2976	 * causes just one node to be produced instead of two, then the parent
2977	 * is also examined and rebalanced if it is insufficient.  Every level
2978	 * tries to combine the data in the same way.  If one node contains the
2979	 * entire range of the tree, then that node is used as a new root node.
2980	 */
2981	mas_node_count(mas, empty_count * 2 - 1);
2982	if (mas_is_err(mas))
2983		return 0;
2984
2985	mast.orig_l = &l_mas;
2986	mast.orig_r = &r_mas;
2987	mast.bn = b_node;
2988	mast.bn->type = mte_node_type(mas->node);
2989
2990	l_mas = r_mas = *mas;
2991
2992	if (mas_next_sibling(&r_mas)) {
2993		mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
2994		r_mas.last = r_mas.index = r_mas.max;
2995	} else {
2996		mas_prev_sibling(&l_mas);
2997		shift = mas_data_end(&l_mas) + 1;
2998		mab_shift_right(b_node, shift);
2999		mas->offset += shift;
3000		mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3001		b_node->b_end = shift + b_end;
3002		l_mas.index = l_mas.last = l_mas.min;
3003	}
3004
3005	return mas_spanning_rebalance(mas, &mast, empty_count);
3006}
3007
3008/*
3009 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3010 * state.
3011 * @mas: The maple state
3012 * @end: The end of the left-most node.
3013 *
3014 * During a mass-insert event (such as forking), it may be necessary to
3015 * rebalance the left-most node when it is not sufficient.
3016 */
3017static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3018{
3019	enum maple_type mt = mte_node_type(mas->node);
3020	struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3021	struct maple_enode *eparent, *old_eparent;
3022	unsigned char offset, tmp, split = mt_slots[mt] / 2;
3023	void __rcu **l_slots, **slots;
3024	unsigned long *l_pivs, *pivs, gap;
3025	bool in_rcu = mt_in_rcu(mas->tree);
3026
3027	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3028
3029	l_mas = *mas;
3030	mas_prev_sibling(&l_mas);
3031
3032	/* set up node. */
3033	if (in_rcu) {
3034		/* Allocate for both left and right as well as parent. */
3035		mas_node_count(mas, 3);
3036		if (mas_is_err(mas))
3037			return;
3038
3039		newnode = mas_pop_node(mas);
3040	} else {
3041		newnode = &reuse;
3042	}
3043
3044	node = mas_mn(mas);
3045	newnode->parent = node->parent;
3046	slots = ma_slots(newnode, mt);
3047	pivs = ma_pivots(newnode, mt);
3048	left = mas_mn(&l_mas);
3049	l_slots = ma_slots(left, mt);
3050	l_pivs = ma_pivots(left, mt);
3051	if (!l_slots[split])
3052		split++;
3053	tmp = mas_data_end(&l_mas) - split;
3054
3055	memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3056	memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3057	pivs[tmp] = l_mas.max;
3058	memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3059	memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3060
3061	l_mas.max = l_pivs[split];
3062	mas->min = l_mas.max + 1;
3063	old_eparent = mt_mk_node(mte_parent(l_mas.node),
3064			     mas_parent_type(&l_mas, l_mas.node));
3065	tmp += end;
3066	if (!in_rcu) {
3067		unsigned char max_p = mt_pivots[mt];
3068		unsigned char max_s = mt_slots[mt];
3069
3070		if (tmp < max_p)
3071			memset(pivs + tmp, 0,
3072			       sizeof(unsigned long) * (max_p - tmp));
3073
3074		if (tmp < mt_slots[mt])
3075			memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3076
3077		memcpy(node, newnode, sizeof(struct maple_node));
3078		ma_set_meta(node, mt, 0, tmp - 1);
3079		mte_set_pivot(old_eparent, mte_parent_slot(l_mas.node),
3080			      l_pivs[split]);
3081
3082		/* Remove data from l_pivs. */
3083		tmp = split + 1;
3084		memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3085		memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3086		ma_set_meta(left, mt, 0, split);
3087		eparent = old_eparent;
3088
3089		goto done;
3090	}
3091
3092	/* RCU requires replacing both l_mas, mas, and parent. */
3093	mas->node = mt_mk_node(newnode, mt);
3094	ma_set_meta(newnode, mt, 0, tmp);
3095
3096	new_left = mas_pop_node(mas);
3097	new_left->parent = left->parent;
3098	mt = mte_node_type(l_mas.node);
3099	slots = ma_slots(new_left, mt);
3100	pivs = ma_pivots(new_left, mt);
3101	memcpy(slots, l_slots, sizeof(void *) * split);
3102	memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3103	ma_set_meta(new_left, mt, 0, split);
3104	l_mas.node = mt_mk_node(new_left, mt);
3105
3106	/* replace parent. */
3107	offset = mte_parent_slot(mas->node);
3108	mt = mas_parent_type(&l_mas, l_mas.node);
3109	parent = mas_pop_node(mas);
3110	slots = ma_slots(parent, mt);
3111	pivs = ma_pivots(parent, mt);
3112	memcpy(parent, mte_to_node(old_eparent), sizeof(struct maple_node));
3113	rcu_assign_pointer(slots[offset], mas->node);
3114	rcu_assign_pointer(slots[offset - 1], l_mas.node);
3115	pivs[offset - 1] = l_mas.max;
3116	eparent = mt_mk_node(parent, mt);
3117done:
3118	gap = mas_leaf_max_gap(mas);
3119	mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3120	gap = mas_leaf_max_gap(&l_mas);
3121	mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3122	mas_ascend(mas);
3123
3124	if (in_rcu) {
3125		mas_replace_node(mas, old_eparent);
3126		mas_adopt_children(mas, mas->node);
3127	}
3128
3129	mas_update_gap(mas);
3130}
3131
3132/*
3133 * mas_split_final_node() - Split the final node in a subtree operation.
3134 * @mast: the maple subtree state
3135 * @mas: The maple state
3136 * @height: The height of the tree in case it's a new root.
3137 */
3138static inline void mas_split_final_node(struct maple_subtree_state *mast,
3139					struct ma_state *mas, int height)
3140{
3141	struct maple_enode *ancestor;
3142
3143	if (mte_is_root(mas->node)) {
3144		if (mt_is_alloc(mas->tree))
3145			mast->bn->type = maple_arange_64;
3146		else
3147			mast->bn->type = maple_range_64;
3148		mas->depth = height;
3149	}
3150	/*
3151	 * Only a single node is used here, could be root.
3152	 * The Big_node data should just fit in a single node.
3153	 */
3154	ancestor = mas_new_ma_node(mas, mast->bn);
3155	mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3156	mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3157	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3158
3159	mast->l->node = ancestor;
3160	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3161	mas->offset = mast->bn->b_end - 1;
3162}
3163
3164/*
3165 * mast_fill_bnode() - Copy data into the big node in the subtree state
3166 * @mast: The maple subtree state
3167 * @mas: the maple state
3168 * @skip: The number of entries to skip for new nodes insertion.
3169 */
3170static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3171					 struct ma_state *mas,
3172					 unsigned char skip)
3173{
3174	bool cp = true;
3175	unsigned char split;
3176
3177	memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3178	memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3179	memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3180	mast->bn->b_end = 0;
3181
3182	if (mte_is_root(mas->node)) {
3183		cp = false;
3184	} else {
3185		mas_ascend(mas);
3186		mas->offset = mte_parent_slot(mas->node);
3187	}
3188
3189	if (cp && mast->l->offset)
3190		mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3191
3192	split = mast->bn->b_end;
3193	mab_set_b_end(mast->bn, mast->l, mast->l->node);
3194	mast->r->offset = mast->bn->b_end;
3195	mab_set_b_end(mast->bn, mast->r, mast->r->node);
3196	if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3197		cp = false;
3198
3199	if (cp)
3200		mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3201			   mast->bn, mast->bn->b_end);
3202
3203	mast->bn->b_end--;
3204	mast->bn->type = mte_node_type(mas->node);
3205}
3206
3207/*
3208 * mast_split_data() - Split the data in the subtree state big node into regular
3209 * nodes.
3210 * @mast: The maple subtree state
3211 * @mas: The maple state
3212 * @split: The location to split the big node
3213 */
3214static inline void mast_split_data(struct maple_subtree_state *mast,
3215	   struct ma_state *mas, unsigned char split)
3216{
3217	unsigned char p_slot;
3218
3219	mab_mas_cp(mast->bn, 0, split, mast->l, true);
3220	mte_set_pivot(mast->r->node, 0, mast->r->max);
3221	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3222	mast->l->offset = mte_parent_slot(mas->node);
3223	mast->l->max = mast->bn->pivot[split];
3224	mast->r->min = mast->l->max + 1;
3225	if (mte_is_leaf(mas->node))
3226		return;
3227
3228	p_slot = mast->orig_l->offset;
3229	mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3230			     &p_slot, split);
3231	mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3232			     &p_slot, split);
3233}
3234
3235/*
3236 * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3237 * data to the right or left node if there is room.
3238 * @mas: The maple state
3239 * @height: The current height of the maple state
3240 * @mast: The maple subtree state
3241 * @left: Push left or not.
3242 *
3243 * Keeping the height of the tree low means faster lookups.
3244 *
3245 * Return: True if pushed, false otherwise.
3246 */
3247static inline bool mas_push_data(struct ma_state *mas, int height,
3248				 struct maple_subtree_state *mast, bool left)
3249{
3250	unsigned char slot_total = mast->bn->b_end;
3251	unsigned char end, space, split;
3252
3253	MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3254	tmp_mas = *mas;
3255	tmp_mas.depth = mast->l->depth;
3256
3257	if (left && !mas_prev_sibling(&tmp_mas))
3258		return false;
3259	else if (!left && !mas_next_sibling(&tmp_mas))
3260		return false;
3261
3262	end = mas_data_end(&tmp_mas);
3263	slot_total += end;
3264	space = 2 * mt_slot_count(mas->node) - 2;
3265	/* -2 instead of -1 to ensure there isn't a triple split */
3266	if (ma_is_leaf(mast->bn->type))
3267		space--;
3268
3269	if (mas->max == ULONG_MAX)
3270		space--;
3271
3272	if (slot_total >= space)
3273		return false;
3274
3275	/* Get the data; Fill mast->bn */
3276	mast->bn->b_end++;
3277	if (left) {
3278		mab_shift_right(mast->bn, end + 1);
3279		mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3280		mast->bn->b_end = slot_total + 1;
3281	} else {
3282		mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3283	}
3284
3285	/* Configure mast for splitting of mast->bn */
3286	split = mt_slots[mast->bn->type] - 2;
3287	if (left) {
3288		/*  Switch mas to prev node  */
3289		*mas = tmp_mas;
3290		/* Start using mast->l for the left side. */
3291		tmp_mas.node = mast->l->node;
3292		*mast->l = tmp_mas;
3293	} else {
3294		tmp_mas.node = mast->r->node;
3295		*mast->r = tmp_mas;
3296		split = slot_total - split;
3297	}
3298	split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3299	/* Update parent slot for split calculation. */
3300	if (left)
3301		mast->orig_l->offset += end + 1;
3302
3303	mast_split_data(mast, mas, split);
3304	mast_fill_bnode(mast, mas, 2);
3305	mas_split_final_node(mast, mas, height + 1);
3306	return true;
3307}
3308
3309/*
3310 * mas_split() - Split data that is too big for one node into two.
3311 * @mas: The maple state
3312 * @b_node: The maple big node
3313 * Return: 1 on success, 0 on failure.
3314 */
3315static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3316{
3317	struct maple_subtree_state mast;
3318	int height = 0;
3319	unsigned char mid_split, split = 0;
3320	struct maple_enode *old;
3321
3322	/*
3323	 * Splitting is handled differently from any other B-tree; the Maple
3324	 * Tree splits upwards.  Splitting up means that the split operation
3325	 * occurs when the walk of the tree hits the leaves and not on the way
3326	 * down.  The reason for splitting up is that it is impossible to know
3327	 * how much space will be needed until the leaf is (or leaves are)
3328	 * reached.  Since overwriting data is allowed and a range could
3329	 * overwrite more than one range or result in changing one entry into 3
3330	 * entries, it is impossible to know if a split is required until the
3331	 * data is examined.
3332	 *
3333	 * Splitting is a balancing act between keeping allocations to a minimum
3334	 * and avoiding a 'jitter' event where a tree is expanded to make room
3335	 * for an entry followed by a contraction when the entry is removed.  To
3336	 * accomplish the balance, there are empty slots remaining in both left
3337	 * and right nodes after a split.
3338	 */
3339	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3340	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3341	MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3342	MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3343
3344	trace_ma_op(__func__, mas);
3345	mas->depth = mas_mt_height(mas);
3346	/* Allocation failures will happen early. */
3347	mas_node_count(mas, 1 + mas->depth * 2);
3348	if (mas_is_err(mas))
3349		return 0;
3350
3351	mast.l = &l_mas;
3352	mast.r = &r_mas;
3353	mast.orig_l = &prev_l_mas;
3354	mast.orig_r = &prev_r_mas;
3355	mast.bn = b_node;
3356
3357	while (height++ <= mas->depth) {
3358		if (mt_slots[b_node->type] > b_node->b_end) {
3359			mas_split_final_node(&mast, mas, height);
3360			break;
3361		}
3362
3363		l_mas = r_mas = *mas;
3364		l_mas.node = mas_new_ma_node(mas, b_node);
3365		r_mas.node = mas_new_ma_node(mas, b_node);
3366		/*
3367		 * Another way that 'jitter' is avoided is to terminate a split up early if the
3368		 * left or right node has space to spare.  This is referred to as "pushing left"
3369		 * or "pushing right" and is similar to the B* tree, except the nodes left or
3370		 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3371		 * is a significant savings.
3372		 */
3373		/* Try to push left. */
3374		if (mas_push_data(mas, height, &mast, true))
3375			break;
3376		/* Try to push right. */
3377		if (mas_push_data(mas, height, &mast, false))
3378			break;
3379
3380		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3381		mast_split_data(&mast, mas, split);
3382		/*
3383		 * Usually correct, mab_mas_cp in the above call overwrites
3384		 * r->max.
3385		 */
3386		mast.r->max = mas->max;
3387		mast_fill_bnode(&mast, mas, 1);
3388		prev_l_mas = *mast.l;
3389		prev_r_mas = *mast.r;
3390	}
3391
3392	/* Set the original node as dead */
3393	old = mas->node;
3394	mas->node = l_mas.node;
3395	mas_wmb_replace(mas, old);
3396	mtree_range_walk(mas);
3397	return 1;
3398}
3399
3400/*
3401 * mas_reuse_node() - Reuse the node to store the data.
3402 * @wr_mas: The maple write state
3403 * @bn: The maple big node
3404 * @end: The end of the data.
3405 *
3406 * Will always return false in RCU mode.
3407 *
3408 * Return: True if node was reused, false otherwise.
3409 */
3410static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3411			  struct maple_big_node *bn, unsigned char end)
3412{
3413	/* Need to be rcu safe. */
3414	if (mt_in_rcu(wr_mas->mas->tree))
3415		return false;
3416
3417	if (end > bn->b_end) {
3418		int clear = mt_slots[wr_mas->type] - bn->b_end;
3419
3420		memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3421		memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3422	}
3423	mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3424	return true;
3425}
3426
3427/*
3428 * mas_commit_b_node() - Commit the big node into the tree.
3429 * @wr_mas: The maple write state
3430 * @b_node: The maple big node
3431 * @end: The end of the data.
3432 */
3433static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3434			    struct maple_big_node *b_node, unsigned char end)
3435{
3436	struct maple_node *node;
3437	struct maple_enode *old_enode;
3438	unsigned char b_end = b_node->b_end;
3439	enum maple_type b_type = b_node->type;
3440
3441	old_enode = wr_mas->mas->node;
3442	if ((b_end < mt_min_slots[b_type]) &&
3443	    (!mte_is_root(old_enode)) &&
3444	    (mas_mt_height(wr_mas->mas) > 1))
3445		return mas_rebalance(wr_mas->mas, b_node);
3446
3447	if (b_end >= mt_slots[b_type])
3448		return mas_split(wr_mas->mas, b_node);
3449
3450	if (mas_reuse_node(wr_mas, b_node, end))
3451		goto reuse_node;
3452
3453	mas_node_count(wr_mas->mas, 1);
3454	if (mas_is_err(wr_mas->mas))
3455		return 0;
3456
3457	node = mas_pop_node(wr_mas->mas);
3458	node->parent = mas_mn(wr_mas->mas)->parent;
3459	wr_mas->mas->node = mt_mk_node(node, b_type);
3460	mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3461	mas_replace_node(wr_mas->mas, old_enode);
3462reuse_node:
3463	mas_update_gap(wr_mas->mas);
3464	wr_mas->mas->end = b_end;
3465	return 1;
3466}
3467
3468/*
3469 * mas_root_expand() - Expand a root to a node
3470 * @mas: The maple state
3471 * @entry: The entry to store into the tree
3472 */
3473static inline int mas_root_expand(struct ma_state *mas, void *entry)
3474{
3475	void *contents = mas_root_locked(mas);
3476	enum maple_type type = maple_leaf_64;
3477	struct maple_node *node;
3478	void __rcu **slots;
3479	unsigned long *pivots;
3480	int slot = 0;
3481
3482	mas_node_count(mas, 1);
3483	if (unlikely(mas_is_err(mas)))
3484		return 0;
3485
3486	node = mas_pop_node(mas);
3487	pivots = ma_pivots(node, type);
3488	slots = ma_slots(node, type);
3489	node->parent = ma_parent_ptr(mas_tree_parent(mas));
3490	mas->node = mt_mk_node(node, type);
3491	mas->status = ma_active;
3492
3493	if (mas->index) {
3494		if (contents) {
3495			rcu_assign_pointer(slots[slot], contents);
3496			if (likely(mas->index > 1))
3497				slot++;
3498		}
3499		pivots[slot++] = mas->index - 1;
3500	}
3501
3502	rcu_assign_pointer(slots[slot], entry);
3503	mas->offset = slot;
3504	pivots[slot] = mas->last;
3505	if (mas->last != ULONG_MAX)
3506		pivots[++slot] = ULONG_MAX;
3507
3508	mas->depth = 1;
3509	mas_set_height(mas);
3510	ma_set_meta(node, maple_leaf_64, 0, slot);
3511	/* swap the new root into the tree */
3512	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3513	return slot;
3514}
3515
3516static inline void mas_store_root(struct ma_state *mas, void *entry)
3517{
3518	if (likely((mas->last != 0) || (mas->index != 0)))
3519		mas_root_expand(mas, entry);
3520	else if (((unsigned long) (entry) & 3) == 2)
3521		mas_root_expand(mas, entry);
3522	else {
3523		rcu_assign_pointer(mas->tree->ma_root, entry);
3524		mas->status = ma_start;
3525	}
3526}
3527
3528/*
3529 * mas_is_span_wr() - Check if the write needs to be treated as a write that
3530 * spans the node.
3531 * @mas: The maple state
3532 * @piv: The pivot value being written
3533 * @type: The maple node type
3534 * @entry: The data to write
3535 *
3536 * Spanning writes are writes that start in one node and end in another OR if
3537 * the write of a %NULL will cause the node to end with a %NULL.
3538 *
3539 * Return: True if this is a spanning write, false otherwise.
3540 */
3541static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3542{
3543	unsigned long max = wr_mas->r_max;
3544	unsigned long last = wr_mas->mas->last;
3545	enum maple_type type = wr_mas->type;
3546	void *entry = wr_mas->entry;
3547
3548	/* Contained in this pivot, fast path */
3549	if (last < max)
3550		return false;
3551
3552	if (ma_is_leaf(type)) {
3553		max = wr_mas->mas->max;
3554		if (last < max)
3555			return false;
3556	}
3557
3558	if (last == max) {
3559		/*
3560		 * The last entry of leaf node cannot be NULL unless it is the
3561		 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3562		 */
3563		if (entry || last == ULONG_MAX)
3564			return false;
3565	}
3566
3567	trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3568	return true;
3569}
3570
3571static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3572{
3573	wr_mas->type = mte_node_type(wr_mas->mas->node);
3574	mas_wr_node_walk(wr_mas);
3575	wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3576}
3577
3578static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3579{
3580	wr_mas->mas->max = wr_mas->r_max;
3581	wr_mas->mas->min = wr_mas->r_min;
3582	wr_mas->mas->node = wr_mas->content;
3583	wr_mas->mas->offset = 0;
3584	wr_mas->mas->depth++;
3585}
3586/*
3587 * mas_wr_walk() - Walk the tree for a write.
3588 * @wr_mas: The maple write state
3589 *
3590 * Uses mas_slot_locked() and does not need to worry about dead nodes.
3591 *
3592 * Return: True if it's contained in a node, false on spanning write.
3593 */
3594static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3595{
3596	struct ma_state *mas = wr_mas->mas;
3597
3598	while (true) {
3599		mas_wr_walk_descend(wr_mas);
3600		if (unlikely(mas_is_span_wr(wr_mas)))
3601			return false;
3602
3603		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3604						  mas->offset);
3605		if (ma_is_leaf(wr_mas->type))
3606			return true;
3607
3608		mas_wr_walk_traverse(wr_mas);
3609	}
3610
3611	return true;
3612}
3613
3614static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3615{
3616	struct ma_state *mas = wr_mas->mas;
3617
3618	while (true) {
3619		mas_wr_walk_descend(wr_mas);
3620		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3621						  mas->offset);
3622		if (ma_is_leaf(wr_mas->type))
3623			return true;
3624		mas_wr_walk_traverse(wr_mas);
3625
3626	}
3627	return true;
3628}
3629/*
3630 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3631 * @l_wr_mas: The left maple write state
3632 * @r_wr_mas: The right maple write state
3633 */
3634static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3635					    struct ma_wr_state *r_wr_mas)
3636{
3637	struct ma_state *r_mas = r_wr_mas->mas;
3638	struct ma_state *l_mas = l_wr_mas->mas;
3639	unsigned char l_slot;
3640
3641	l_slot = l_mas->offset;
3642	if (!l_wr_mas->content)
3643		l_mas->index = l_wr_mas->r_min;
3644
3645	if ((l_mas->index == l_wr_mas->r_min) &&
3646		 (l_slot &&
3647		  !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3648		if (l_slot > 1)
3649			l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3650		else
3651			l_mas->index = l_mas->min;
3652
3653		l_mas->offset = l_slot - 1;
3654	}
3655
3656	if (!r_wr_mas->content) {
3657		if (r_mas->last < r_wr_mas->r_max)
3658			r_mas->last = r_wr_mas->r_max;
3659		r_mas->offset++;
3660	} else if ((r_mas->last == r_wr_mas->r_max) &&
3661	    (r_mas->last < r_mas->max) &&
3662	    !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3663		r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3664					     r_wr_mas->type, r_mas->offset + 1);
3665		r_mas->offset++;
3666	}
3667}
3668
3669static inline void *mas_state_walk(struct ma_state *mas)
3670{
3671	void *entry;
3672
3673	entry = mas_start(mas);
3674	if (mas_is_none(mas))
3675		return NULL;
3676
3677	if (mas_is_ptr(mas))
3678		return entry;
3679
3680	return mtree_range_walk(mas);
3681}
3682
3683/*
3684 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3685 * to date.
3686 *
3687 * @mas: The maple state.
3688 *
3689 * Note: Leaves mas in undesirable state.
3690 * Return: The entry for @mas->index or %NULL on dead node.
3691 */
3692static inline void *mtree_lookup_walk(struct ma_state *mas)
3693{
3694	unsigned long *pivots;
3695	unsigned char offset;
3696	struct maple_node *node;
3697	struct maple_enode *next;
3698	enum maple_type type;
3699	void __rcu **slots;
3700	unsigned char end;
3701
3702	next = mas->node;
3703	do {
3704		node = mte_to_node(next);
3705		type = mte_node_type(next);
3706		pivots = ma_pivots(node, type);
3707		end = mt_pivots[type];
3708		offset = 0;
3709		do {
3710			if (pivots[offset] >= mas->index)
3711				break;
3712		} while (++offset < end);
3713
3714		slots = ma_slots(node, type);
3715		next = mt_slot(mas->tree, slots, offset);
3716		if (unlikely(ma_dead_node(node)))
3717			goto dead_node;
3718	} while (!ma_is_leaf(type));
3719
3720	return (void *)next;
3721
3722dead_node:
3723	mas_reset(mas);
3724	return NULL;
3725}
3726
3727static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
3728/*
3729 * mas_new_root() - Create a new root node that only contains the entry passed
3730 * in.
3731 * @mas: The maple state
3732 * @entry: The entry to store.
3733 *
3734 * Only valid when the index == 0 and the last == ULONG_MAX
3735 *
3736 * Return 0 on error, 1 on success.
3737 */
3738static inline int mas_new_root(struct ma_state *mas, void *entry)
3739{
3740	struct maple_enode *root = mas_root_locked(mas);
3741	enum maple_type type = maple_leaf_64;
3742	struct maple_node *node;
3743	void __rcu **slots;
3744	unsigned long *pivots;
3745
3746	if (!entry && !mas->index && mas->last == ULONG_MAX) {
3747		mas->depth = 0;
3748		mas_set_height(mas);
3749		rcu_assign_pointer(mas->tree->ma_root, entry);
3750		mas->status = ma_start;
3751		goto done;
3752	}
3753
3754	mas_node_count(mas, 1);
3755	if (mas_is_err(mas))
3756		return 0;
3757
3758	node = mas_pop_node(mas);
3759	pivots = ma_pivots(node, type);
3760	slots = ma_slots(node, type);
3761	node->parent = ma_parent_ptr(mas_tree_parent(mas));
3762	mas->node = mt_mk_node(node, type);
3763	mas->status = ma_active;
3764	rcu_assign_pointer(slots[0], entry);
3765	pivots[0] = mas->last;
3766	mas->depth = 1;
3767	mas_set_height(mas);
3768	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3769
3770done:
3771	if (xa_is_node(root))
3772		mte_destroy_walk(root, mas->tree);
3773
3774	return 1;
3775}
3776/*
3777 * mas_wr_spanning_store() - Create a subtree with the store operation completed
3778 * and new nodes where necessary, then place the sub-tree in the actual tree.
3779 * Note that mas is expected to point to the node which caused the store to
3780 * span.
3781 * @wr_mas: The maple write state
3782 *
3783 * Return: 0 on error, positive on success.
3784 */
3785static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3786{
3787	struct maple_subtree_state mast;
3788	struct maple_big_node b_node;
3789	struct ma_state *mas;
3790	unsigned char height;
3791
3792	/* Left and Right side of spanning store */
3793	MA_STATE(l_mas, NULL, 0, 0);
3794	MA_STATE(r_mas, NULL, 0, 0);
3795	MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3796	MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3797
3798	/*
3799	 * A store operation that spans multiple nodes is called a spanning
3800	 * store and is handled early in the store call stack by the function
3801	 * mas_is_span_wr().  When a spanning store is identified, the maple
3802	 * state is duplicated.  The first maple state walks the left tree path
3803	 * to ``index``, the duplicate walks the right tree path to ``last``.
3804	 * The data in the two nodes are combined into a single node, two nodes,
3805	 * or possibly three nodes (see the 3-way split above).  A ``NULL``
3806	 * written to the last entry of a node is considered a spanning store as
3807	 * a rebalance is required for the operation to complete and an overflow
3808	 * of data may happen.
3809	 */
3810	mas = wr_mas->mas;
3811	trace_ma_op(__func__, mas);
3812
3813	if (unlikely(!mas->index && mas->last == ULONG_MAX))
3814		return mas_new_root(mas, wr_mas->entry);
3815	/*
3816	 * Node rebalancing may occur due to this store, so there may be three new
3817	 * entries per level plus a new root.
3818	 */
3819	height = mas_mt_height(mas);
3820	mas_node_count(mas, 1 + height * 3);
3821	if (mas_is_err(mas))
3822		return 0;
3823
3824	/*
3825	 * Set up right side.  Need to get to the next offset after the spanning
3826	 * store to ensure it's not NULL and to combine both the next node and
3827	 * the node with the start together.
3828	 */
3829	r_mas = *mas;
3830	/* Avoid overflow, walk to next slot in the tree. */
3831	if (r_mas.last + 1)
3832		r_mas.last++;
3833
3834	r_mas.index = r_mas.last;
3835	mas_wr_walk_index(&r_wr_mas);
3836	r_mas.last = r_mas.index = mas->last;
3837
3838	/* Set up left side. */
3839	l_mas = *mas;
3840	mas_wr_walk_index(&l_wr_mas);
3841
3842	if (!wr_mas->entry) {
3843		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
3844		mas->offset = l_mas.offset;
3845		mas->index = l_mas.index;
3846		mas->last = l_mas.last = r_mas.last;
3847	}
3848
3849	/* expanding NULLs may make this cover the entire range */
3850	if (!l_mas.index && r_mas.last == ULONG_MAX) {
3851		mas_set_range(mas, 0, ULONG_MAX);
3852		return mas_new_root(mas, wr_mas->entry);
3853	}
3854
3855	memset(&b_node, 0, sizeof(struct maple_big_node));
3856	/* Copy l_mas and store the value in b_node. */
3857	mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
3858	/* Copy r_mas into b_node. */
3859	if (r_mas.offset <= r_mas.end)
3860		mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
3861			   &b_node, b_node.b_end + 1);
3862	else
3863		b_node.b_end++;
3864
3865	/* Stop spanning searches by searching for just index. */
3866	l_mas.index = l_mas.last = mas->index;
3867
3868	mast.bn = &b_node;
3869	mast.orig_l = &l_mas;
3870	mast.orig_r = &r_mas;
3871	/* Combine l_mas and r_mas and split them up evenly again. */
3872	return mas_spanning_rebalance(mas, &mast, height + 1);
3873}
3874
3875/*
3876 * mas_wr_node_store() - Attempt to store the value in a node
3877 * @wr_mas: The maple write state
3878 *
3879 * Attempts to reuse the node, but may allocate.
3880 *
3881 * Return: True if stored, false otherwise
3882 */
3883static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
3884				     unsigned char new_end)
3885{
3886	struct ma_state *mas = wr_mas->mas;
3887	void __rcu **dst_slots;
3888	unsigned long *dst_pivots;
3889	unsigned char dst_offset, offset_end = wr_mas->offset_end;
3890	struct maple_node reuse, *newnode;
3891	unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
3892	bool in_rcu = mt_in_rcu(mas->tree);
3893
3894	/* Check if there is enough data. The room is enough. */
3895	if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
3896	    !(mas->mas_flags & MA_STATE_BULK))
3897		return false;
3898
3899	if (mas->last == wr_mas->end_piv)
3900		offset_end++; /* don't copy this offset */
3901	else if (unlikely(wr_mas->r_max == ULONG_MAX))
3902		mas_bulk_rebalance(mas, mas->end, wr_mas->type);
3903
3904	/* set up node. */
3905	if (in_rcu) {
3906		mas_node_count(mas, 1);
3907		if (mas_is_err(mas))
3908			return false;
3909
3910		newnode = mas_pop_node(mas);
3911	} else {
3912		memset(&reuse, 0, sizeof(struct maple_node));
3913		newnode = &reuse;
3914	}
3915
3916	newnode->parent = mas_mn(mas)->parent;
3917	dst_pivots = ma_pivots(newnode, wr_mas->type);
3918	dst_slots = ma_slots(newnode, wr_mas->type);
3919	/* Copy from start to insert point */
3920	memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
3921	memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
3922
3923	/* Handle insert of new range starting after old range */
3924	if (wr_mas->r_min < mas->index) {
3925		rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
3926		dst_pivots[mas->offset++] = mas->index - 1;
3927	}
3928
3929	/* Store the new entry and range end. */
3930	if (mas->offset < node_pivots)
3931		dst_pivots[mas->offset] = mas->last;
3932	rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
3933
3934	/*
3935	 * this range wrote to the end of the node or it overwrote the rest of
3936	 * the data
3937	 */
3938	if (offset_end > mas->end)
3939		goto done;
3940
3941	dst_offset = mas->offset + 1;
3942	/* Copy to the end of node if necessary. */
3943	copy_size = mas->end - offset_end + 1;
3944	memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
3945	       sizeof(void *) * copy_size);
3946	memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
3947	       sizeof(unsigned long) * (copy_size - 1));
3948
3949	if (new_end < node_pivots)
3950		dst_pivots[new_end] = mas->max;
3951
3952done:
3953	mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
3954	if (in_rcu) {
3955		struct maple_enode *old_enode = mas->node;
3956
3957		mas->node = mt_mk_node(newnode, wr_mas->type);
3958		mas_replace_node(mas, old_enode);
3959	} else {
3960		memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
3961	}
3962	trace_ma_write(__func__, mas, 0, wr_mas->entry);
3963	mas_update_gap(mas);
3964	mas->end = new_end;
3965	return true;
3966}
3967
3968/*
3969 * mas_wr_slot_store: Attempt to store a value in a slot.
3970 * @wr_mas: the maple write state
3971 *
3972 * Return: True if stored, false otherwise
3973 */
3974static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
3975{
3976	struct ma_state *mas = wr_mas->mas;
3977	unsigned char offset = mas->offset;
3978	void __rcu **slots = wr_mas->slots;
3979	bool gap = false;
3980
3981	gap |= !mt_slot_locked(mas->tree, slots, offset);
3982	gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
3983
3984	if (wr_mas->offset_end - offset == 1) {
3985		if (mas->index == wr_mas->r_min) {
3986			/* Overwriting the range and a part of the next one */
3987			rcu_assign_pointer(slots[offset], wr_mas->entry);
3988			wr_mas->pivots[offset] = mas->last;
3989		} else {
3990			/* Overwriting a part of the range and the next one */
3991			rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
3992			wr_mas->pivots[offset] = mas->index - 1;
3993			mas->offset++; /* Keep mas accurate. */
3994		}
3995	} else if (!mt_in_rcu(mas->tree)) {
3996		/*
3997		 * Expand the range, only partially overwriting the previous and
3998		 * next ranges
3999		 */
4000		gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
4001		rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4002		wr_mas->pivots[offset] = mas->index - 1;
4003		wr_mas->pivots[offset + 1] = mas->last;
4004		mas->offset++; /* Keep mas accurate. */
4005	} else {
4006		return false;
4007	}
4008
4009	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4010	/*
4011	 * Only update gap when the new entry is empty or there is an empty
4012	 * entry in the original two ranges.
4013	 */
4014	if (!wr_mas->entry || gap)
4015		mas_update_gap(mas);
4016
4017	return true;
4018}
4019
4020static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4021{
4022	struct ma_state *mas = wr_mas->mas;
4023
4024	if (!wr_mas->slots[wr_mas->offset_end]) {
4025		/* If this one is null, the next and prev are not */
4026		mas->last = wr_mas->end_piv;
4027	} else {
4028		/* Check next slot(s) if we are overwriting the end */
4029		if ((mas->last == wr_mas->end_piv) &&
4030		    (mas->end != wr_mas->offset_end) &&
4031		    !wr_mas->slots[wr_mas->offset_end + 1]) {
4032			wr_mas->offset_end++;
4033			if (wr_mas->offset_end == mas->end)
4034				mas->last = mas->max;
4035			else
4036				mas->last = wr_mas->pivots[wr_mas->offset_end];
4037			wr_mas->end_piv = mas->last;
4038		}
4039	}
4040
4041	if (!wr_mas->content) {
4042		/* If this one is null, the next and prev are not */
4043		mas->index = wr_mas->r_min;
4044	} else {
4045		/* Check prev slot if we are overwriting the start */
4046		if (mas->index == wr_mas->r_min && mas->offset &&
4047		    !wr_mas->slots[mas->offset - 1]) {
4048			mas->offset--;
4049			wr_mas->r_min = mas->index =
4050				mas_safe_min(mas, wr_mas->pivots, mas->offset);
4051			wr_mas->r_max = wr_mas->pivots[mas->offset];
4052		}
4053	}
4054}
4055
4056static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4057{
4058	while ((wr_mas->offset_end < wr_mas->mas->end) &&
4059	       (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4060		wr_mas->offset_end++;
4061
4062	if (wr_mas->offset_end < wr_mas->mas->end)
4063		wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4064	else
4065		wr_mas->end_piv = wr_mas->mas->max;
4066
4067	if (!wr_mas->entry)
4068		mas_wr_extend_null(wr_mas);
4069}
4070
4071static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4072{
4073	struct ma_state *mas = wr_mas->mas;
4074	unsigned char new_end = mas->end + 2;
4075
4076	new_end -= wr_mas->offset_end - mas->offset;
4077	if (wr_mas->r_min == mas->index)
4078		new_end--;
4079
4080	if (wr_mas->end_piv == mas->last)
4081		new_end--;
4082
4083	return new_end;
4084}
4085
4086/*
4087 * mas_wr_append: Attempt to append
4088 * @wr_mas: the maple write state
4089 * @new_end: The end of the node after the modification
4090 *
4091 * This is currently unsafe in rcu mode since the end of the node may be cached
4092 * by readers while the node contents may be updated which could result in
4093 * inaccurate information.
4094 *
4095 * Return: True if appended, false otherwise
4096 */
4097static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4098		unsigned char new_end)
4099{
4100	struct ma_state *mas;
4101	void __rcu **slots;
4102	unsigned char end;
4103
4104	mas = wr_mas->mas;
4105	if (mt_in_rcu(mas->tree))
4106		return false;
4107
4108	end = mas->end;
4109	if (mas->offset != end)
4110		return false;
4111
4112	if (new_end < mt_pivots[wr_mas->type]) {
4113		wr_mas->pivots[new_end] = wr_mas->pivots[end];
4114		ma_set_meta(wr_mas->node, wr_mas->type, 0, new_end);
4115	}
4116
4117	slots = wr_mas->slots;
4118	if (new_end == end + 1) {
4119		if (mas->last == wr_mas->r_max) {
4120			/* Append to end of range */
4121			rcu_assign_pointer(slots[new_end], wr_mas->entry);
4122			wr_mas->pivots[end] = mas->index - 1;
4123			mas->offset = new_end;
4124		} else {
4125			/* Append to start of range */
4126			rcu_assign_pointer(slots[new_end], wr_mas->content);
4127			wr_mas->pivots[end] = mas->last;
4128			rcu_assign_pointer(slots[end], wr_mas->entry);
4129		}
4130	} else {
4131		/* Append to the range without touching any boundaries. */
4132		rcu_assign_pointer(slots[new_end], wr_mas->content);
4133		wr_mas->pivots[end + 1] = mas->last;
4134		rcu_assign_pointer(slots[end + 1], wr_mas->entry);
4135		wr_mas->pivots[end] = mas->index - 1;
4136		mas->offset = end + 1;
4137	}
4138
4139	if (!wr_mas->content || !wr_mas->entry)
4140		mas_update_gap(mas);
4141
4142	mas->end = new_end;
4143	trace_ma_write(__func__, mas, new_end, wr_mas->entry);
4144	return  true;
4145}
4146
4147/*
4148 * mas_wr_bnode() - Slow path for a modification.
4149 * @wr_mas: The write maple state
4150 *
4151 * This is where split, rebalance end up.
4152 */
4153static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4154{
4155	struct maple_big_node b_node;
4156
4157	trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4158	memset(&b_node, 0, sizeof(struct maple_big_node));
4159	mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4160	mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
4161}
4162
4163static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4164{
4165	struct ma_state *mas = wr_mas->mas;
4166	unsigned char new_end;
4167
4168	/* Direct replacement */
4169	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4170		rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4171		if (!!wr_mas->entry ^ !!wr_mas->content)
4172			mas_update_gap(mas);
4173		return;
4174	}
4175
4176	/*
4177	 * new_end exceeds the size of the maple node and cannot enter the fast
4178	 * path.
4179	 */
4180	new_end = mas_wr_new_end(wr_mas);
4181	if (new_end >= mt_slots[wr_mas->type])
4182		goto slow_path;
4183
4184	/* Attempt to append */
4185	if (mas_wr_append(wr_mas, new_end))
4186		return;
4187
4188	if (new_end == mas->end && mas_wr_slot_store(wr_mas))
4189		return;
4190
4191	if (mas_wr_node_store(wr_mas, new_end))
4192		return;
4193
4194	if (mas_is_err(mas))
4195		return;
4196
4197slow_path:
4198	mas_wr_bnode(wr_mas);
4199}
4200
4201/*
4202 * mas_wr_store_entry() - Internal call to store a value
4203 * @mas: The maple state
4204 * @entry: The entry to store.
4205 *
4206 * Return: The contents that was stored at the index.
4207 */
4208static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4209{
4210	struct ma_state *mas = wr_mas->mas;
4211
4212	wr_mas->content = mas_start(mas);
4213	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4214		mas_store_root(mas, wr_mas->entry);
4215		return wr_mas->content;
4216	}
4217
4218	if (unlikely(!mas_wr_walk(wr_mas))) {
4219		mas_wr_spanning_store(wr_mas);
4220		return wr_mas->content;
4221	}
4222
4223	/* At this point, we are at the leaf node that needs to be altered. */
4224	mas_wr_end_piv(wr_mas);
4225	/* New root for a single pointer */
4226	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4227		mas_new_root(mas, wr_mas->entry);
4228		return wr_mas->content;
4229	}
4230
4231	mas_wr_modify(wr_mas);
4232	return wr_mas->content;
4233}
4234
4235/**
4236 * mas_insert() - Internal call to insert a value
4237 * @mas: The maple state
4238 * @entry: The entry to store
4239 *
4240 * Return: %NULL or the contents that already exists at the requested index
4241 * otherwise.  The maple state needs to be checked for error conditions.
4242 */
4243static inline void *mas_insert(struct ma_state *mas, void *entry)
4244{
4245	MA_WR_STATE(wr_mas, mas, entry);
4246
4247	/*
4248	 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4249	 * tree.  If the insert fits exactly into an existing gap with a value
4250	 * of NULL, then the slot only needs to be written with the new value.
4251	 * If the range being inserted is adjacent to another range, then only a
4252	 * single pivot needs to be inserted (as well as writing the entry).  If
4253	 * the new range is within a gap but does not touch any other ranges,
4254	 * then two pivots need to be inserted: the start - 1, and the end.  As
4255	 * usual, the entry must be written.  Most operations require a new node
4256	 * to be allocated and replace an existing node to ensure RCU safety,
4257	 * when in RCU mode.  The exception to requiring a newly allocated node
4258	 * is when inserting at the end of a node (appending).  When done
4259	 * carefully, appending can reuse the node in place.
4260	 */
4261	wr_mas.content = mas_start(mas);
4262	if (wr_mas.content)
4263		goto exists;
4264
4265	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4266		mas_store_root(mas, entry);
4267		return NULL;
4268	}
4269
4270	/* spanning writes always overwrite something */
4271	if (!mas_wr_walk(&wr_mas))
4272		goto exists;
4273
4274	/* At this point, we are at the leaf node that needs to be altered. */
4275	wr_mas.offset_end = mas->offset;
4276	wr_mas.end_piv = wr_mas.r_max;
4277
4278	if (wr_mas.content || (mas->last > wr_mas.r_max))
4279		goto exists;
4280
4281	if (!entry)
4282		return NULL;
4283
4284	mas_wr_modify(&wr_mas);
4285	return wr_mas.content;
4286
4287exists:
4288	mas_set_err(mas, -EEXIST);
4289	return wr_mas.content;
4290
4291}
4292
4293static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4294{
4295retry:
4296	mas_set(mas, index);
4297	mas_state_walk(mas);
4298	if (mas_is_start(mas))
4299		goto retry;
4300}
4301
4302static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
4303		struct maple_node *node, const unsigned long index)
4304{
4305	if (unlikely(ma_dead_node(node))) {
4306		mas_rewalk(mas, index);
4307		return true;
4308	}
4309	return false;
4310}
4311
4312/*
4313 * mas_prev_node() - Find the prev non-null entry at the same level in the
4314 * tree.  The prev value will be mas->node[mas->offset] or the status will be
4315 * ma_none.
4316 * @mas: The maple state
4317 * @min: The lower limit to search
4318 *
4319 * The prev node value will be mas->node[mas->offset] or the status will be
4320 * ma_none.
4321 * Return: 1 if the node is dead, 0 otherwise.
4322 */
4323static int mas_prev_node(struct ma_state *mas, unsigned long min)
4324{
4325	enum maple_type mt;
4326	int offset, level;
4327	void __rcu **slots;
4328	struct maple_node *node;
4329	unsigned long *pivots;
4330	unsigned long max;
4331
4332	node = mas_mn(mas);
4333	if (!mas->min)
4334		goto no_entry;
4335
4336	max = mas->min - 1;
4337	if (max < min)
4338		goto no_entry;
4339
4340	level = 0;
4341	do {
4342		if (ma_is_root(node))
4343			goto no_entry;
4344
4345		/* Walk up. */
4346		if (unlikely(mas_ascend(mas)))
4347			return 1;
4348		offset = mas->offset;
4349		level++;
4350		node = mas_mn(mas);
4351	} while (!offset);
4352
4353	offset--;
4354	mt = mte_node_type(mas->node);
4355	while (level > 1) {
4356		level--;
4357		slots = ma_slots(node, mt);
4358		mas->node = mas_slot(mas, slots, offset);
4359		if (unlikely(ma_dead_node(node)))
4360			return 1;
4361
4362		mt = mte_node_type(mas->node);
4363		node = mas_mn(mas);
4364		pivots = ma_pivots(node, mt);
4365		offset = ma_data_end(node, mt, pivots, max);
4366		if (unlikely(ma_dead_node(node)))
4367			return 1;
4368	}
4369
4370	slots = ma_slots(node, mt);
4371	mas->node = mas_slot(mas, slots, offset);
4372	pivots = ma_pivots(node, mt);
4373	if (unlikely(ma_dead_node(node)))
4374		return 1;
4375
4376	if (likely(offset))
4377		mas->min = pivots[offset - 1] + 1;
4378	mas->max = max;
4379	mas->offset = mas_data_end(mas);
4380	if (unlikely(mte_dead_node(mas->node)))
4381		return 1;
4382
4383	mas->end = mas->offset;
4384	return 0;
4385
4386no_entry:
4387	if (unlikely(ma_dead_node(node)))
4388		return 1;
4389
4390	mas->status = ma_underflow;
4391	return 0;
4392}
4393
4394/*
4395 * mas_prev_slot() - Get the entry in the previous slot
4396 *
4397 * @mas: The maple state
4398 * @max: The minimum starting range
4399 * @empty: Can be empty
4400 * @set_underflow: Set the @mas->node to underflow state on limit.
4401 *
4402 * Return: The entry in the previous slot which is possibly NULL
4403 */
4404static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4405{
4406	void *entry;
4407	void __rcu **slots;
4408	unsigned long pivot;
4409	enum maple_type type;
4410	unsigned long *pivots;
4411	struct maple_node *node;
4412	unsigned long save_point = mas->index;
4413
4414retry:
4415	node = mas_mn(mas);
4416	type = mte_node_type(mas->node);
4417	pivots = ma_pivots(node, type);
4418	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4419		goto retry;
4420
4421	if (mas->min <= min) {
4422		pivot = mas_safe_min(mas, pivots, mas->offset);
4423
4424		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4425			goto retry;
4426
4427		if (pivot <= min)
4428			goto underflow;
4429	}
4430
4431again:
4432	if (likely(mas->offset)) {
4433		mas->offset--;
4434		mas->last = mas->index - 1;
4435		mas->index = mas_safe_min(mas, pivots, mas->offset);
4436	} else  {
4437		if (mas->index <= min)
4438			goto underflow;
4439
4440		if (mas_prev_node(mas, min)) {
4441			mas_rewalk(mas, save_point);
4442			goto retry;
4443		}
4444
4445		if (WARN_ON_ONCE(mas_is_underflow(mas)))
4446			return NULL;
4447
4448		mas->last = mas->max;
4449		node = mas_mn(mas);
4450		type = mte_node_type(mas->node);
4451		pivots = ma_pivots(node, type);
4452		mas->index = pivots[mas->offset - 1] + 1;
4453	}
4454
4455	slots = ma_slots(node, type);
4456	entry = mas_slot(mas, slots, mas->offset);
4457	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4458		goto retry;
4459
4460
4461	if (likely(entry))
4462		return entry;
4463
4464	if (!empty) {
4465		if (mas->index <= min) {
4466			mas->status = ma_underflow;
4467			return NULL;
4468		}
4469
4470		goto again;
4471	}
4472
4473	return entry;
4474
4475underflow:
4476	mas->status = ma_underflow;
4477	return NULL;
4478}
4479
4480/*
4481 * mas_next_node() - Get the next node at the same level in the tree.
4482 * @mas: The maple state
4483 * @max: The maximum pivot value to check.
4484 *
4485 * The next value will be mas->node[mas->offset] or the status will have
4486 * overflowed.
4487 * Return: 1 on dead node, 0 otherwise.
4488 */
4489static int mas_next_node(struct ma_state *mas, struct maple_node *node,
4490		unsigned long max)
4491{
4492	unsigned long min;
4493	unsigned long *pivots;
4494	struct maple_enode *enode;
4495	struct maple_node *tmp;
4496	int level = 0;
4497	unsigned char node_end;
4498	enum maple_type mt;
4499	void __rcu **slots;
4500
4501	if (mas->max >= max)
4502		goto overflow;
4503
4504	min = mas->max + 1;
4505	level = 0;
4506	do {
4507		if (ma_is_root(node))
4508			goto overflow;
4509
4510		/* Walk up. */
4511		if (unlikely(mas_ascend(mas)))
4512			return 1;
4513
4514		level++;
4515		node = mas_mn(mas);
4516		mt = mte_node_type(mas->node);
4517		pivots = ma_pivots(node, mt);
4518		node_end = ma_data_end(node, mt, pivots, mas->max);
4519		if (unlikely(ma_dead_node(node)))
4520			return 1;
4521
4522	} while (unlikely(mas->offset == node_end));
4523
4524	slots = ma_slots(node, mt);
4525	mas->offset++;
4526	enode = mas_slot(mas, slots, mas->offset);
4527	if (unlikely(ma_dead_node(node)))
4528		return 1;
4529
4530	if (level > 1)
4531		mas->offset = 0;
4532
4533	while (unlikely(level > 1)) {
4534		level--;
4535		mas->node = enode;
4536		node = mas_mn(mas);
4537		mt = mte_node_type(mas->node);
4538		slots = ma_slots(node, mt);
4539		enode = mas_slot(mas, slots, 0);
4540		if (unlikely(ma_dead_node(node)))
4541			return 1;
4542	}
4543
4544	if (!mas->offset)
4545		pivots = ma_pivots(node, mt);
4546
4547	mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4548	tmp = mte_to_node(enode);
4549	mt = mte_node_type(enode);
4550	pivots = ma_pivots(tmp, mt);
4551	mas->end = ma_data_end(tmp, mt, pivots, mas->max);
4552	if (unlikely(ma_dead_node(node)))
4553		return 1;
4554
4555	mas->node = enode;
4556	mas->min = min;
4557	return 0;
4558
4559overflow:
4560	if (unlikely(ma_dead_node(node)))
4561		return 1;
4562
4563	mas->status = ma_overflow;
4564	return 0;
4565}
4566
4567/*
4568 * mas_next_slot() - Get the entry in the next slot
4569 *
4570 * @mas: The maple state
4571 * @max: The maximum starting range
4572 * @empty: Can be empty
4573 * @set_overflow: Should @mas->node be set to overflow when the limit is
4574 * reached.
4575 *
4576 * Return: The entry in the next slot which is possibly NULL
4577 */
4578static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4579{
4580	void __rcu **slots;
4581	unsigned long *pivots;
4582	unsigned long pivot;
4583	enum maple_type type;
4584	struct maple_node *node;
4585	unsigned long save_point = mas->last;
4586	void *entry;
4587
4588retry:
4589	node = mas_mn(mas);
4590	type = mte_node_type(mas->node);
4591	pivots = ma_pivots(node, type);
4592	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4593		goto retry;
4594
4595	if (mas->max >= max) {
4596		if (likely(mas->offset < mas->end))
4597			pivot = pivots[mas->offset];
4598		else
4599			pivot = mas->max;
4600
4601		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4602			goto retry;
4603
4604		if (pivot >= max) { /* Was at the limit, next will extend beyond */
4605			mas->status = ma_overflow;
4606			return NULL;
4607		}
4608	}
4609
4610	if (likely(mas->offset < mas->end)) {
4611		mas->index = pivots[mas->offset] + 1;
4612again:
4613		mas->offset++;
4614		if (likely(mas->offset < mas->end))
4615			mas->last = pivots[mas->offset];
4616		else
4617			mas->last = mas->max;
4618	} else  {
4619		if (mas->last >= max) {
4620			mas->status = ma_overflow;
4621			return NULL;
4622		}
4623
4624		if (mas_next_node(mas, node, max)) {
4625			mas_rewalk(mas, save_point);
4626			goto retry;
4627		}
4628
4629		if (WARN_ON_ONCE(mas_is_overflow(mas)))
4630			return NULL;
4631
4632		mas->offset = 0;
4633		mas->index = mas->min;
4634		node = mas_mn(mas);
4635		type = mte_node_type(mas->node);
4636		pivots = ma_pivots(node, type);
4637		mas->last = pivots[0];
4638	}
4639
4640	slots = ma_slots(node, type);
4641	entry = mt_slot(mas->tree, slots, mas->offset);
4642	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4643		goto retry;
4644
4645	if (entry)
4646		return entry;
4647
4648
4649	if (!empty) {
4650		if (mas->last >= max) {
4651			mas->status = ma_overflow;
4652			return NULL;
4653		}
4654
4655		mas->index = mas->last + 1;
4656		goto again;
4657	}
4658
4659	return entry;
4660}
4661
4662/*
4663 * mas_next_entry() - Internal function to get the next entry.
4664 * @mas: The maple state
4665 * @limit: The maximum range start.
4666 *
4667 * Set the @mas->node to the next entry and the range_start to
4668 * the beginning value for the entry.  Does not check beyond @limit.
4669 * Sets @mas->index and @mas->last to the range, Does not update @mas->index and
4670 * @mas->last on overflow.
4671 * Restarts on dead nodes.
4672 *
4673 * Return: the next entry or %NULL.
4674 */
4675static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4676{
4677	if (mas->last >= limit) {
4678		mas->status = ma_overflow;
4679		return NULL;
4680	}
4681
4682	return mas_next_slot(mas, limit, false);
4683}
4684
4685/*
4686 * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4687 * highest gap address of a given size in a given node and descend.
4688 * @mas: The maple state
4689 * @size: The needed size.
4690 *
4691 * Return: True if found in a leaf, false otherwise.
4692 *
4693 */
4694static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4695		unsigned long *gap_min, unsigned long *gap_max)
4696{
4697	enum maple_type type = mte_node_type(mas->node);
4698	struct maple_node *node = mas_mn(mas);
4699	unsigned long *pivots, *gaps;
4700	void __rcu **slots;
4701	unsigned long gap = 0;
4702	unsigned long max, min;
4703	unsigned char offset;
4704
4705	if (unlikely(mas_is_err(mas)))
4706		return true;
4707
4708	if (ma_is_dense(type)) {
4709		/* dense nodes. */
4710		mas->offset = (unsigned char)(mas->index - mas->min);
4711		return true;
4712	}
4713
4714	pivots = ma_pivots(node, type);
4715	slots = ma_slots(node, type);
4716	gaps = ma_gaps(node, type);
4717	offset = mas->offset;
4718	min = mas_safe_min(mas, pivots, offset);
4719	/* Skip out of bounds. */
4720	while (mas->last < min)
4721		min = mas_safe_min(mas, pivots, --offset);
4722
4723	max = mas_safe_pivot(mas, pivots, offset, type);
4724	while (mas->index <= max) {
4725		gap = 0;
4726		if (gaps)
4727			gap = gaps[offset];
4728		else if (!mas_slot(mas, slots, offset))
4729			gap = max - min + 1;
4730
4731		if (gap) {
4732			if ((size <= gap) && (size <= mas->last - min + 1))
4733				break;
4734
4735			if (!gaps) {
4736				/* Skip the next slot, it cannot be a gap. */
4737				if (offset < 2)
4738					goto ascend;
4739
4740				offset -= 2;
4741				max = pivots[offset];
4742				min = mas_safe_min(mas, pivots, offset);
4743				continue;
4744			}
4745		}
4746
4747		if (!offset)
4748			goto ascend;
4749
4750		offset--;
4751		max = min - 1;
4752		min = mas_safe_min(mas, pivots, offset);
4753	}
4754
4755	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4756		goto no_space;
4757
4758	if (unlikely(ma_is_leaf(type))) {
4759		mas->offset = offset;
4760		*gap_min = min;
4761		*gap_max = min + gap - 1;
4762		return true;
4763	}
4764
4765	/* descend, only happens under lock. */
4766	mas->node = mas_slot(mas, slots, offset);
4767	mas->min = min;
4768	mas->max = max;
4769	mas->offset = mas_data_end(mas);
4770	return false;
4771
4772ascend:
4773	if (!mte_is_root(mas->node))
4774		return false;
4775
4776no_space:
4777	mas_set_err(mas, -EBUSY);
4778	return false;
4779}
4780
4781static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4782{
4783	enum maple_type type = mte_node_type(mas->node);
4784	unsigned long pivot, min, gap = 0;
4785	unsigned char offset, data_end;
4786	unsigned long *gaps, *pivots;
4787	void __rcu **slots;
4788	struct maple_node *node;
4789	bool found = false;
4790
4791	if (ma_is_dense(type)) {
4792		mas->offset = (unsigned char)(mas->index - mas->min);
4793		return true;
4794	}
4795
4796	node = mas_mn(mas);
4797	pivots = ma_pivots(node, type);
4798	slots = ma_slots(node, type);
4799	gaps = ma_gaps(node, type);
4800	offset = mas->offset;
4801	min = mas_safe_min(mas, pivots, offset);
4802	data_end = ma_data_end(node, type, pivots, mas->max);
4803	for (; offset <= data_end; offset++) {
4804		pivot = mas_safe_pivot(mas, pivots, offset, type);
4805
4806		/* Not within lower bounds */
4807		if (mas->index > pivot)
4808			goto next_slot;
4809
4810		if (gaps)
4811			gap = gaps[offset];
4812		else if (!mas_slot(mas, slots, offset))
4813			gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4814		else
4815			goto next_slot;
4816
4817		if (gap >= size) {
4818			if (ma_is_leaf(type)) {
4819				found = true;
4820				goto done;
4821			}
4822			if (mas->index <= pivot) {
4823				mas->node = mas_slot(mas, slots, offset);
4824				mas->min = min;
4825				mas->max = pivot;
4826				offset = 0;
4827				break;
4828			}
4829		}
4830next_slot:
4831		min = pivot + 1;
4832		if (mas->last <= pivot) {
4833			mas_set_err(mas, -EBUSY);
4834			return true;
4835		}
4836	}
4837
4838	if (mte_is_root(mas->node))
4839		found = true;
4840done:
4841	mas->offset = offset;
4842	return found;
4843}
4844
4845/**
4846 * mas_walk() - Search for @mas->index in the tree.
4847 * @mas: The maple state.
4848 *
4849 * mas->index and mas->last will be set to the range if there is a value.  If
4850 * mas->status is ma_none, reset to ma_start
4851 *
4852 * Return: the entry at the location or %NULL.
4853 */
4854void *mas_walk(struct ma_state *mas)
4855{
4856	void *entry;
4857
4858	if (!mas_is_active(mas) || !mas_is_start(mas))
4859		mas->status = ma_start;
4860retry:
4861	entry = mas_state_walk(mas);
4862	if (mas_is_start(mas)) {
4863		goto retry;
4864	} else if (mas_is_none(mas)) {
4865		mas->index = 0;
4866		mas->last = ULONG_MAX;
4867	} else if (mas_is_ptr(mas)) {
4868		if (!mas->index) {
4869			mas->last = 0;
4870			return entry;
4871		}
4872
4873		mas->index = 1;
4874		mas->last = ULONG_MAX;
4875		mas->status = ma_none;
4876		return NULL;
4877	}
4878
4879	return entry;
4880}
4881EXPORT_SYMBOL_GPL(mas_walk);
4882
4883static inline bool mas_rewind_node(struct ma_state *mas)
4884{
4885	unsigned char slot;
4886
4887	do {
4888		if (mte_is_root(mas->node)) {
4889			slot = mas->offset;
4890			if (!slot)
4891				return false;
4892		} else {
4893			mas_ascend(mas);
4894			slot = mas->offset;
4895		}
4896	} while (!slot);
4897
4898	mas->offset = --slot;
4899	return true;
4900}
4901
4902/*
4903 * mas_skip_node() - Internal function.  Skip over a node.
4904 * @mas: The maple state.
4905 *
4906 * Return: true if there is another node, false otherwise.
4907 */
4908static inline bool mas_skip_node(struct ma_state *mas)
4909{
4910	if (mas_is_err(mas))
4911		return false;
4912
4913	do {
4914		if (mte_is_root(mas->node)) {
4915			if (mas->offset >= mas_data_end(mas)) {
4916				mas_set_err(mas, -EBUSY);
4917				return false;
4918			}
4919		} else {
4920			mas_ascend(mas);
4921		}
4922	} while (mas->offset >= mas_data_end(mas));
4923
4924	mas->offset++;
4925	return true;
4926}
4927
4928/*
4929 * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
4930 * @size
4931 * @mas: The maple state
4932 * @size: The size of the gap required
4933 *
4934 * Search between @mas->index and @mas->last for a gap of @size.
4935 */
4936static inline void mas_awalk(struct ma_state *mas, unsigned long size)
4937{
4938	struct maple_enode *last = NULL;
4939
4940	/*
4941	 * There are 4 options:
4942	 * go to child (descend)
4943	 * go back to parent (ascend)
4944	 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
4945	 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
4946	 */
4947	while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
4948		if (last == mas->node)
4949			mas_skip_node(mas);
4950		else
4951			last = mas->node;
4952	}
4953}
4954
4955/*
4956 * mas_sparse_area() - Internal function.  Return upper or lower limit when
4957 * searching for a gap in an empty tree.
4958 * @mas: The maple state
4959 * @min: the minimum range
4960 * @max: The maximum range
4961 * @size: The size of the gap
4962 * @fwd: Searching forward or back
4963 */
4964static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
4965				unsigned long max, unsigned long size, bool fwd)
4966{
4967	if (!unlikely(mas_is_none(mas)) && min == 0) {
4968		min++;
4969		/*
4970		 * At this time, min is increased, we need to recheck whether
4971		 * the size is satisfied.
4972		 */
4973		if (min > max || max - min + 1 < size)
4974			return -EBUSY;
4975	}
4976	/* mas_is_ptr */
4977
4978	if (fwd) {
4979		mas->index = min;
4980		mas->last = min + size - 1;
4981	} else {
4982		mas->last = max;
4983		mas->index = max - size + 1;
4984	}
4985	return 0;
4986}
4987
4988/*
4989 * mas_empty_area() - Get the lowest address within the range that is
4990 * sufficient for the size requested.
4991 * @mas: The maple state
4992 * @min: The lowest value of the range
4993 * @max: The highest value of the range
4994 * @size: The size needed
4995 */
4996int mas_empty_area(struct ma_state *mas, unsigned long min,
4997		unsigned long max, unsigned long size)
4998{
4999	unsigned char offset;
5000	unsigned long *pivots;
5001	enum maple_type mt;
5002	struct maple_node *node;
5003
5004	if (min > max)
5005		return -EINVAL;
5006
5007	if (size == 0 || max - min < size - 1)
5008		return -EINVAL;
5009
5010	if (mas_is_start(mas))
5011		mas_start(mas);
5012	else if (mas->offset >= 2)
5013		mas->offset -= 2;
5014	else if (!mas_skip_node(mas))
5015		return -EBUSY;
5016
5017	/* Empty set */
5018	if (mas_is_none(mas) || mas_is_ptr(mas))
5019		return mas_sparse_area(mas, min, max, size, true);
5020
5021	/* The start of the window can only be within these values */
5022	mas->index = min;
5023	mas->last = max;
5024	mas_awalk(mas, size);
5025
5026	if (unlikely(mas_is_err(mas)))
5027		return xa_err(mas->node);
5028
5029	offset = mas->offset;
5030	if (unlikely(offset == MAPLE_NODE_SLOTS))
5031		return -EBUSY;
5032
5033	node = mas_mn(mas);
5034	mt = mte_node_type(mas->node);
5035	pivots = ma_pivots(node, mt);
5036	min = mas_safe_min(mas, pivots, offset);
5037	if (mas->index < min)
5038		mas->index = min;
5039	mas->last = mas->index + size - 1;
5040	mas->end = ma_data_end(node, mt, pivots, mas->max);
5041	return 0;
5042}
5043EXPORT_SYMBOL_GPL(mas_empty_area);
5044
5045/*
5046 * mas_empty_area_rev() - Get the highest address within the range that is
5047 * sufficient for the size requested.
5048 * @mas: The maple state
5049 * @min: The lowest value of the range
5050 * @max: The highest value of the range
5051 * @size: The size needed
5052 */
5053int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5054		unsigned long max, unsigned long size)
5055{
5056	struct maple_enode *last = mas->node;
5057
5058	if (min > max)
5059		return -EINVAL;
5060
5061	if (size == 0 || max - min < size - 1)
5062		return -EINVAL;
5063
5064	if (mas_is_start(mas)) {
5065		mas_start(mas);
5066		mas->offset = mas_data_end(mas);
5067	} else if (mas->offset >= 2) {
5068		mas->offset -= 2;
5069	} else if (!mas_rewind_node(mas)) {
5070		return -EBUSY;
5071	}
5072
5073	/* Empty set. */
5074	if (mas_is_none(mas) || mas_is_ptr(mas))
5075		return mas_sparse_area(mas, min, max, size, false);
5076
5077	/* The start of the window can only be within these values. */
5078	mas->index = min;
5079	mas->last = max;
5080
5081	while (!mas_rev_awalk(mas, size, &min, &max)) {
5082		if (last == mas->node) {
5083			if (!mas_rewind_node(mas))
5084				return -EBUSY;
5085		} else {
5086			last = mas->node;
5087		}
5088	}
5089
5090	if (mas_is_err(mas))
5091		return xa_err(mas->node);
5092
5093	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5094		return -EBUSY;
5095
5096	/* Trim the upper limit to the max. */
5097	if (max < mas->last)
5098		mas->last = max;
5099
5100	mas->index = mas->last - size + 1;
5101	mas->end = mas_data_end(mas);
5102	return 0;
5103}
5104EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5105
5106/*
5107 * mte_dead_leaves() - Mark all leaves of a node as dead.
5108 * @mas: The maple state
5109 * @slots: Pointer to the slot array
5110 * @type: The maple node type
5111 *
5112 * Must hold the write lock.
5113 *
5114 * Return: The number of leaves marked as dead.
5115 */
5116static inline
5117unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5118			      void __rcu **slots)
5119{
5120	struct maple_node *node;
5121	enum maple_type type;
5122	void *entry;
5123	int offset;
5124
5125	for (offset = 0; offset < mt_slot_count(enode); offset++) {
5126		entry = mt_slot(mt, slots, offset);
5127		type = mte_node_type(entry);
5128		node = mte_to_node(entry);
5129		/* Use both node and type to catch LE & BE metadata */
5130		if (!node || !type)
5131			break;
5132
5133		mte_set_node_dead(entry);
5134		node->type = type;
5135		rcu_assign_pointer(slots[offset], node);
5136	}
5137
5138	return offset;
5139}
5140
5141/**
5142 * mte_dead_walk() - Walk down a dead tree to just before the leaves
5143 * @enode: The maple encoded node
5144 * @offset: The starting offset
5145 *
5146 * Note: This can only be used from the RCU callback context.
5147 */
5148static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5149{
5150	struct maple_node *node, *next;
5151	void __rcu **slots = NULL;
5152
5153	next = mte_to_node(*enode);
5154	do {
5155		*enode = ma_enode_ptr(next);
5156		node = mte_to_node(*enode);
5157		slots = ma_slots(node, node->type);
5158		next = rcu_dereference_protected(slots[offset],
5159					lock_is_held(&rcu_callback_map));
5160		offset = 0;
5161	} while (!ma_is_leaf(next->type));
5162
5163	return slots;
5164}
5165
5166/**
5167 * mt_free_walk() - Walk & free a tree in the RCU callback context
5168 * @head: The RCU head that's within the node.
5169 *
5170 * Note: This can only be used from the RCU callback context.
5171 */
5172static void mt_free_walk(struct rcu_head *head)
5173{
5174	void __rcu **slots;
5175	struct maple_node *node, *start;
5176	struct maple_enode *enode;
5177	unsigned char offset;
5178	enum maple_type type;
5179
5180	node = container_of(head, struct maple_node, rcu);
5181
5182	if (ma_is_leaf(node->type))
5183		goto free_leaf;
5184
5185	start = node;
5186	enode = mt_mk_node(node, node->type);
5187	slots = mte_dead_walk(&enode, 0);
5188	node = mte_to_node(enode);
5189	do {
5190		mt_free_bulk(node->slot_len, slots);
5191		offset = node->parent_slot + 1;
5192		enode = node->piv_parent;
5193		if (mte_to_node(enode) == node)
5194			goto free_leaf;
5195
5196		type = mte_node_type(enode);
5197		slots = ma_slots(mte_to_node(enode), type);
5198		if ((offset < mt_slots[type]) &&
5199		    rcu_dereference_protected(slots[offset],
5200					      lock_is_held(&rcu_callback_map)))
5201			slots = mte_dead_walk(&enode, offset);
5202		node = mte_to_node(enode);
5203	} while ((node != start) || (node->slot_len < offset));
5204
5205	slots = ma_slots(node, node->type);
5206	mt_free_bulk(node->slot_len, slots);
5207
5208free_leaf:
5209	mt_free_rcu(&node->rcu);
5210}
5211
5212static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5213	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5214{
5215	struct maple_node *node;
5216	struct maple_enode *next = *enode;
5217	void __rcu **slots = NULL;
5218	enum maple_type type;
5219	unsigned char next_offset = 0;
5220
5221	do {
5222		*enode = next;
5223		node = mte_to_node(*enode);
5224		type = mte_node_type(*enode);
5225		slots = ma_slots(node, type);
5226		next = mt_slot_locked(mt, slots, next_offset);
5227		if ((mte_dead_node(next)))
5228			next = mt_slot_locked(mt, slots, ++next_offset);
5229
5230		mte_set_node_dead(*enode);
5231		node->type = type;
5232		node->piv_parent = prev;
5233		node->parent_slot = offset;
5234		offset = next_offset;
5235		next_offset = 0;
5236		prev = *enode;
5237	} while (!mte_is_leaf(next));
5238
5239	return slots;
5240}
5241
5242static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5243			    bool free)
5244{
5245	void __rcu **slots;
5246	struct maple_node *node = mte_to_node(enode);
5247	struct maple_enode *start;
5248
5249	if (mte_is_leaf(enode)) {
5250		node->type = mte_node_type(enode);
5251		goto free_leaf;
5252	}
5253
5254	start = enode;
5255	slots = mte_destroy_descend(&enode, mt, start, 0);
5256	node = mte_to_node(enode); // Updated in the above call.
5257	do {
5258		enum maple_type type;
5259		unsigned char offset;
5260		struct maple_enode *parent, *tmp;
5261
5262		node->slot_len = mte_dead_leaves(enode, mt, slots);
5263		if (free)
5264			mt_free_bulk(node->slot_len, slots);
5265		offset = node->parent_slot + 1;
5266		enode = node->piv_parent;
5267		if (mte_to_node(enode) == node)
5268			goto free_leaf;
5269
5270		type = mte_node_type(enode);
5271		slots = ma_slots(mte_to_node(enode), type);
5272		if (offset >= mt_slots[type])
5273			goto next;
5274
5275		tmp = mt_slot_locked(mt, slots, offset);
5276		if (mte_node_type(tmp) && mte_to_node(tmp)) {
5277			parent = enode;
5278			enode = tmp;
5279			slots = mte_destroy_descend(&enode, mt, parent, offset);
5280		}
5281next:
5282		node = mte_to_node(enode);
5283	} while (start != enode);
5284
5285	node = mte_to_node(enode);
5286	node->slot_len = mte_dead_leaves(enode, mt, slots);
5287	if (free)
5288		mt_free_bulk(node->slot_len, slots);
5289
5290free_leaf:
5291	if (free)
5292		mt_free_rcu(&node->rcu);
5293	else
5294		mt_clear_meta(mt, node, node->type);
5295}
5296
5297/*
5298 * mte_destroy_walk() - Free a tree or sub-tree.
5299 * @enode: the encoded maple node (maple_enode) to start
5300 * @mt: the tree to free - needed for node types.
5301 *
5302 * Must hold the write lock.
5303 */
5304static inline void mte_destroy_walk(struct maple_enode *enode,
5305				    struct maple_tree *mt)
5306{
5307	struct maple_node *node = mte_to_node(enode);
5308
5309	if (mt_in_rcu(mt)) {
5310		mt_destroy_walk(enode, mt, false);
5311		call_rcu(&node->rcu, mt_free_walk);
5312	} else {
5313		mt_destroy_walk(enode, mt, true);
5314	}
5315}
5316
5317static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5318{
5319	if (!mas_is_active(wr_mas->mas)) {
5320		if (mas_is_start(wr_mas->mas))
5321			return;
5322
5323		if (unlikely(mas_is_paused(wr_mas->mas)))
5324			goto reset;
5325
5326		if (unlikely(mas_is_none(wr_mas->mas)))
5327			goto reset;
5328
5329		if (unlikely(mas_is_overflow(wr_mas->mas)))
5330			goto reset;
5331
5332		if (unlikely(mas_is_underflow(wr_mas->mas)))
5333			goto reset;
5334	}
5335
5336	/*
5337	 * A less strict version of mas_is_span_wr() where we allow spanning
5338	 * writes within this node.  This is to stop partial walks in
5339	 * mas_prealloc() from being reset.
5340	 */
5341	if (wr_mas->mas->last > wr_mas->mas->max)
5342		goto reset;
5343
5344	if (wr_mas->entry)
5345		return;
5346
5347	if (mte_is_leaf(wr_mas->mas->node) &&
5348	    wr_mas->mas->last == wr_mas->mas->max)
5349		goto reset;
5350
5351	return;
5352
5353reset:
5354	mas_reset(wr_mas->mas);
5355}
5356
5357/* Interface */
5358
5359/**
5360 * mas_store() - Store an @entry.
5361 * @mas: The maple state.
5362 * @entry: The entry to store.
5363 *
5364 * The @mas->index and @mas->last is used to set the range for the @entry.
5365 * Note: The @mas should have pre-allocated entries to ensure there is memory to
5366 * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5367 *
5368 * Return: the first entry between mas->index and mas->last or %NULL.
5369 */
5370void *mas_store(struct ma_state *mas, void *entry)
5371{
5372	MA_WR_STATE(wr_mas, mas, entry);
5373
5374	trace_ma_write(__func__, mas, 0, entry);
5375#ifdef CONFIG_DEBUG_MAPLE_TREE
5376	if (MAS_WARN_ON(mas, mas->index > mas->last))
5377		pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5378
5379	if (mas->index > mas->last) {
5380		mas_set_err(mas, -EINVAL);
5381		return NULL;
5382	}
5383
5384#endif
5385
5386	/*
5387	 * Storing is the same operation as insert with the added caveat that it
5388	 * can overwrite entries.  Although this seems simple enough, one may
5389	 * want to examine what happens if a single store operation was to
5390	 * overwrite multiple entries within a self-balancing B-Tree.
5391	 */
5392	mas_wr_store_setup(&wr_mas);
5393	mas_wr_store_entry(&wr_mas);
5394	return wr_mas.content;
5395}
5396EXPORT_SYMBOL_GPL(mas_store);
5397
5398/**
5399 * mas_store_gfp() - Store a value into the tree.
5400 * @mas: The maple state
5401 * @entry: The entry to store
5402 * @gfp: The GFP_FLAGS to use for allocations if necessary.
5403 *
5404 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5405 * be allocated.
5406 */
5407int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5408{
5409	MA_WR_STATE(wr_mas, mas, entry);
5410
5411	mas_wr_store_setup(&wr_mas);
5412	trace_ma_write(__func__, mas, 0, entry);
5413retry:
5414	mas_wr_store_entry(&wr_mas);
5415	if (unlikely(mas_nomem(mas, gfp)))
5416		goto retry;
5417
5418	if (unlikely(mas_is_err(mas)))
5419		return xa_err(mas->node);
5420
5421	return 0;
5422}
5423EXPORT_SYMBOL_GPL(mas_store_gfp);
5424
5425/**
5426 * mas_store_prealloc() - Store a value into the tree using memory
5427 * preallocated in the maple state.
5428 * @mas: The maple state
5429 * @entry: The entry to store.
5430 */
5431void mas_store_prealloc(struct ma_state *mas, void *entry)
5432{
5433	MA_WR_STATE(wr_mas, mas, entry);
5434
5435	mas_wr_store_setup(&wr_mas);
5436	trace_ma_write(__func__, mas, 0, entry);
5437	mas_wr_store_entry(&wr_mas);
5438	MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5439	mas_destroy(mas);
5440}
5441EXPORT_SYMBOL_GPL(mas_store_prealloc);
5442
5443/**
5444 * mas_preallocate() - Preallocate enough nodes for a store operation
5445 * @mas: The maple state
5446 * @entry: The entry that will be stored
5447 * @gfp: The GFP_FLAGS to use for allocations.
5448 *
5449 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5450 */
5451int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5452{
5453	MA_WR_STATE(wr_mas, mas, entry);
5454	unsigned char node_size;
5455	int request = 1;
5456	int ret;
5457
5458
5459	if (unlikely(!mas->index && mas->last == ULONG_MAX))
5460		goto ask_now;
5461
5462	mas_wr_store_setup(&wr_mas);
5463	wr_mas.content = mas_start(mas);
5464	/* Root expand */
5465	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5466		goto ask_now;
5467
5468	if (unlikely(!mas_wr_walk(&wr_mas))) {
5469		/* Spanning store, use worst case for now */
5470		request = 1 + mas_mt_height(mas) * 3;
5471		goto ask_now;
5472	}
5473
5474	/* At this point, we are at the leaf node that needs to be altered. */
5475	/* Exact fit, no nodes needed. */
5476	if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5477		return 0;
5478
5479	mas_wr_end_piv(&wr_mas);
5480	node_size = mas_wr_new_end(&wr_mas);
5481
5482	/* Slot store, does not require additional nodes */
5483	if (node_size == mas->end) {
5484		/* reuse node */
5485		if (!mt_in_rcu(mas->tree))
5486			return 0;
5487		/* shifting boundary */
5488		if (wr_mas.offset_end - mas->offset == 1)
5489			return 0;
5490	}
5491
5492	if (node_size >= mt_slots[wr_mas.type]) {
5493		/* Split, worst case for now. */
5494		request = 1 + mas_mt_height(mas) * 2;
5495		goto ask_now;
5496	}
5497
5498	/* New root needs a single node */
5499	if (unlikely(mte_is_root(mas->node)))
5500		goto ask_now;
5501
5502	/* Potential spanning rebalance collapsing a node, use worst-case */
5503	if (node_size  - 1 <= mt_min_slots[wr_mas.type])
5504		request = mas_mt_height(mas) * 2 - 1;
5505
5506	/* node store, slot store needs one node */
5507ask_now:
5508	mas_node_count_gfp(mas, request, gfp);
5509	mas->mas_flags |= MA_STATE_PREALLOC;
5510	if (likely(!mas_is_err(mas)))
5511		return 0;
5512
5513	mas_set_alloc_req(mas, 0);
5514	ret = xa_err(mas->node);
5515	mas_reset(mas);
5516	mas_destroy(mas);
5517	mas_reset(mas);
5518	return ret;
5519}
5520EXPORT_SYMBOL_GPL(mas_preallocate);
5521
5522/*
5523 * mas_destroy() - destroy a maple state.
5524 * @mas: The maple state
5525 *
5526 * Upon completion, check the left-most node and rebalance against the node to
5527 * the right if necessary.  Frees any allocated nodes associated with this maple
5528 * state.
5529 */
5530void mas_destroy(struct ma_state *mas)
5531{
5532	struct maple_alloc *node;
5533	unsigned long total;
5534
5535	/*
5536	 * When using mas_for_each() to insert an expected number of elements,
5537	 * it is possible that the number inserted is less than the expected
5538	 * number.  To fix an invalid final node, a check is performed here to
5539	 * rebalance the previous node with the final node.
5540	 */
5541	if (mas->mas_flags & MA_STATE_REBALANCE) {
5542		unsigned char end;
5543
5544		mas_start(mas);
5545		mtree_range_walk(mas);
5546		end = mas->end + 1;
5547		if (end < mt_min_slot_count(mas->node) - 1)
5548			mas_destroy_rebalance(mas, end);
5549
5550		mas->mas_flags &= ~MA_STATE_REBALANCE;
5551	}
5552	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5553
5554	total = mas_allocated(mas);
5555	while (total) {
5556		node = mas->alloc;
5557		mas->alloc = node->slot[0];
5558		if (node->node_count > 1) {
5559			size_t count = node->node_count - 1;
5560
5561			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5562			total -= count;
5563		}
5564		mt_free_one(ma_mnode_ptr(node));
5565		total--;
5566	}
5567
5568	mas->alloc = NULL;
5569}
5570EXPORT_SYMBOL_GPL(mas_destroy);
5571
5572/*
5573 * mas_expected_entries() - Set the expected number of entries that will be inserted.
5574 * @mas: The maple state
5575 * @nr_entries: The number of expected entries.
5576 *
5577 * This will attempt to pre-allocate enough nodes to store the expected number
5578 * of entries.  The allocations will occur using the bulk allocator interface
5579 * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5580 * to ensure any unused nodes are freed.
5581 *
5582 * Return: 0 on success, -ENOMEM if memory could not be allocated.
5583 */
5584int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5585{
5586	int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5587	struct maple_enode *enode = mas->node;
5588	int nr_nodes;
5589	int ret;
5590
5591	/*
5592	 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5593	 * forking a process and duplicating the VMAs from one tree to a new
5594	 * tree.  When such a situation arises, it is known that the new tree is
5595	 * not going to be used until the entire tree is populated.  For
5596	 * performance reasons, it is best to use a bulk load with RCU disabled.
5597	 * This allows for optimistic splitting that favours the left and reuse
5598	 * of nodes during the operation.
5599	 */
5600
5601	/* Optimize splitting for bulk insert in-order */
5602	mas->mas_flags |= MA_STATE_BULK;
5603
5604	/*
5605	 * Avoid overflow, assume a gap between each entry and a trailing null.
5606	 * If this is wrong, it just means allocation can happen during
5607	 * insertion of entries.
5608	 */
5609	nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5610	if (!mt_is_alloc(mas->tree))
5611		nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5612
5613	/* Leaves; reduce slots to keep space for expansion */
5614	nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5615	/* Internal nodes */
5616	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5617	/* Add working room for split (2 nodes) + new parents */
5618	mas_node_count_gfp(mas, nr_nodes + 3, GFP_KERNEL);
5619
5620	/* Detect if allocations run out */
5621	mas->mas_flags |= MA_STATE_PREALLOC;
5622
5623	if (!mas_is_err(mas))
5624		return 0;
5625
5626	ret = xa_err(mas->node);
5627	mas->node = enode;
5628	mas_destroy(mas);
5629	return ret;
5630
5631}
5632EXPORT_SYMBOL_GPL(mas_expected_entries);
5633
5634static bool mas_next_setup(struct ma_state *mas, unsigned long max,
5635		void **entry)
5636{
5637	bool was_none = mas_is_none(mas);
5638
5639	if (unlikely(mas->last >= max)) {
5640		mas->status = ma_overflow;
5641		return true;
5642	}
5643
5644	switch (mas->status) {
5645	case ma_active:
5646		return false;
5647	case ma_none:
5648		fallthrough;
5649	case ma_pause:
5650		mas->status = ma_start;
5651		fallthrough;
5652	case ma_start:
5653		mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5654		break;
5655	case ma_overflow:
5656		/* Overflowed before, but the max changed */
5657		mas->status = ma_active;
5658		break;
5659	case ma_underflow:
5660		/* The user expects the mas to be one before where it is */
5661		mas->status = ma_active;
5662		*entry = mas_walk(mas);
5663		if (*entry)
5664			return true;
5665		break;
5666	case ma_root:
5667		break;
5668	case ma_error:
5669		return true;
5670	}
5671
5672	if (likely(mas_is_active(mas))) /* Fast path */
5673		return false;
5674
5675	if (mas_is_ptr(mas)) {
5676		*entry = NULL;
5677		if (was_none && mas->index == 0) {
5678			mas->index = mas->last = 0;
5679			return true;
5680		}
5681		mas->index = 1;
5682		mas->last = ULONG_MAX;
5683		mas->status = ma_none;
5684		return true;
5685	}
5686
5687	if (mas_is_none(mas))
5688		return true;
5689
5690	return false;
5691}
5692
5693/**
5694 * mas_next() - Get the next entry.
5695 * @mas: The maple state
5696 * @max: The maximum index to check.
5697 *
5698 * Returns the next entry after @mas->index.
5699 * Must hold rcu_read_lock or the write lock.
5700 * Can return the zero entry.
5701 *
5702 * Return: The next entry or %NULL
5703 */
5704void *mas_next(struct ma_state *mas, unsigned long max)
5705{
5706	void *entry = NULL;
5707
5708	if (mas_next_setup(mas, max, &entry))
5709		return entry;
5710
5711	/* Retries on dead nodes handled by mas_next_slot */
5712	return mas_next_slot(mas, max, false);
5713}
5714EXPORT_SYMBOL_GPL(mas_next);
5715
5716/**
5717 * mas_next_range() - Advance the maple state to the next range
5718 * @mas: The maple state
5719 * @max: The maximum index to check.
5720 *
5721 * Sets @mas->index and @mas->last to the range.
5722 * Must hold rcu_read_lock or the write lock.
5723 * Can return the zero entry.
5724 *
5725 * Return: The next entry or %NULL
5726 */
5727void *mas_next_range(struct ma_state *mas, unsigned long max)
5728{
5729	void *entry = NULL;
5730
5731	if (mas_next_setup(mas, max, &entry))
5732		return entry;
5733
5734	/* Retries on dead nodes handled by mas_next_slot */
5735	return mas_next_slot(mas, max, true);
5736}
5737EXPORT_SYMBOL_GPL(mas_next_range);
5738
5739/**
5740 * mt_next() - get the next value in the maple tree
5741 * @mt: The maple tree
5742 * @index: The start index
5743 * @max: The maximum index to check
5744 *
5745 * Takes RCU read lock internally to protect the search, which does not
5746 * protect the returned pointer after dropping RCU read lock.
5747 * See also: Documentation/core-api/maple_tree.rst
5748 *
5749 * Return: The entry higher than @index or %NULL if nothing is found.
5750 */
5751void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5752{
5753	void *entry = NULL;
5754	MA_STATE(mas, mt, index, index);
5755
5756	rcu_read_lock();
5757	entry = mas_next(&mas, max);
5758	rcu_read_unlock();
5759	return entry;
5760}
5761EXPORT_SYMBOL_GPL(mt_next);
5762
5763static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
5764{
5765	if (unlikely(mas->index <= min)) {
5766		mas->status = ma_underflow;
5767		return true;
5768	}
5769
5770	switch (mas->status) {
5771	case ma_active:
5772		return false;
5773	case ma_start:
5774		break;
5775	case ma_none:
5776		fallthrough;
5777	case ma_pause:
5778		mas->status = ma_start;
5779		break;
5780	case ma_underflow:
5781		/* underflowed before but the min changed */
5782		mas->status = ma_active;
5783		break;
5784	case ma_overflow:
5785		/* User expects mas to be one after where it is */
5786		mas->status = ma_active;
5787		*entry = mas_walk(mas);
5788		if (*entry)
5789			return true;
5790		break;
5791	case ma_root:
5792		break;
5793	case ma_error:
5794		return true;
5795	}
5796
5797	if (mas_is_start(mas))
5798		mas_walk(mas);
5799
5800	if (unlikely(mas_is_ptr(mas))) {
5801		if (!mas->index) {
5802			mas->status = ma_none;
5803			return true;
5804		}
5805		mas->index = mas->last = 0;
5806		*entry = mas_root(mas);
5807		return true;
5808	}
5809
5810	if (mas_is_none(mas)) {
5811		if (mas->index) {
5812			/* Walked to out-of-range pointer? */
5813			mas->index = mas->last = 0;
5814			mas->status = ma_root;
5815			*entry = mas_root(mas);
5816			return true;
5817		}
5818		return true;
5819	}
5820
5821	return false;
5822}
5823
5824/**
5825 * mas_prev() - Get the previous entry
5826 * @mas: The maple state
5827 * @min: The minimum value to check.
5828 *
5829 * Must hold rcu_read_lock or the write lock.
5830 * Will reset mas to ma_start if the status is ma_none.  Will stop on not
5831 * searchable nodes.
5832 *
5833 * Return: the previous value or %NULL.
5834 */
5835void *mas_prev(struct ma_state *mas, unsigned long min)
5836{
5837	void *entry = NULL;
5838
5839	if (mas_prev_setup(mas, min, &entry))
5840		return entry;
5841
5842	return mas_prev_slot(mas, min, false);
5843}
5844EXPORT_SYMBOL_GPL(mas_prev);
5845
5846/**
5847 * mas_prev_range() - Advance to the previous range
5848 * @mas: The maple state
5849 * @min: The minimum value to check.
5850 *
5851 * Sets @mas->index and @mas->last to the range.
5852 * Must hold rcu_read_lock or the write lock.
5853 * Will reset mas to ma_start if the node is ma_none.  Will stop on not
5854 * searchable nodes.
5855 *
5856 * Return: the previous value or %NULL.
5857 */
5858void *mas_prev_range(struct ma_state *mas, unsigned long min)
5859{
5860	void *entry = NULL;
5861
5862	if (mas_prev_setup(mas, min, &entry))
5863		return entry;
5864
5865	return mas_prev_slot(mas, min, true);
5866}
5867EXPORT_SYMBOL_GPL(mas_prev_range);
5868
5869/**
5870 * mt_prev() - get the previous value in the maple tree
5871 * @mt: The maple tree
5872 * @index: The start index
5873 * @min: The minimum index to check
5874 *
5875 * Takes RCU read lock internally to protect the search, which does not
5876 * protect the returned pointer after dropping RCU read lock.
5877 * See also: Documentation/core-api/maple_tree.rst
5878 *
5879 * Return: The entry before @index or %NULL if nothing is found.
5880 */
5881void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5882{
5883	void *entry = NULL;
5884	MA_STATE(mas, mt, index, index);
5885
5886	rcu_read_lock();
5887	entry = mas_prev(&mas, min);
5888	rcu_read_unlock();
5889	return entry;
5890}
5891EXPORT_SYMBOL_GPL(mt_prev);
5892
5893/**
5894 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5895 * @mas: The maple state to pause
5896 *
5897 * Some users need to pause a walk and drop the lock they're holding in
5898 * order to yield to a higher priority thread or carry out an operation
5899 * on an entry.  Those users should call this function before they drop
5900 * the lock.  It resets the @mas to be suitable for the next iteration
5901 * of the loop after the user has reacquired the lock.  If most entries
5902 * found during a walk require you to call mas_pause(), the mt_for_each()
5903 * iterator may be more appropriate.
5904 *
5905 */
5906void mas_pause(struct ma_state *mas)
5907{
5908	mas->status = ma_pause;
5909	mas->node = NULL;
5910}
5911EXPORT_SYMBOL_GPL(mas_pause);
5912
5913/**
5914 * mas_find_setup() - Internal function to set up mas_find*().
5915 * @mas: The maple state
5916 * @max: The maximum index
5917 * @entry: Pointer to the entry
5918 *
5919 * Returns: True if entry is the answer, false otherwise.
5920 */
5921static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
5922{
5923	switch (mas->status) {
5924	case ma_active:
5925		if (mas->last < max)
5926			return false;
5927		return true;
5928	case ma_start:
5929		break;
5930	case ma_pause:
5931		if (unlikely(mas->last >= max))
5932			return true;
5933
5934		mas->index = ++mas->last;
5935		mas->status = ma_start;
5936		break;
5937	case ma_none:
5938		if (unlikely(mas->last >= max))
5939			return true;
5940
5941		mas->index = mas->last;
5942		mas->status = ma_start;
5943		break;
5944	case ma_underflow:
5945		/* mas is pointing at entry before unable to go lower */
5946		if (unlikely(mas->index >= max)) {
5947			mas->status = ma_overflow;
5948			return true;
5949		}
5950
5951		mas->status = ma_active;
5952		*entry = mas_walk(mas);
5953		if (*entry)
5954			return true;
5955		break;
5956	case ma_overflow:
5957		if (unlikely(mas->last >= max))
5958			return true;
5959
5960		mas->status = ma_active;
5961		*entry = mas_walk(mas);
5962		if (*entry)
5963			return true;
5964		break;
5965	case ma_root:
5966		break;
5967	case ma_error:
5968		return true;
5969	}
5970
5971	if (mas_is_start(mas)) {
5972		/* First run or continue */
5973		if (mas->index > max)
5974			return true;
5975
5976		*entry = mas_walk(mas);
5977		if (*entry)
5978			return true;
5979
5980	}
5981
5982	if (unlikely(mas_is_ptr(mas)))
5983		goto ptr_out_of_range;
5984
5985	if (unlikely(mas_is_none(mas)))
5986		return true;
5987
5988	if (mas->index == max)
5989		return true;
5990
5991	return false;
5992
5993ptr_out_of_range:
5994	mas->status = ma_none;
5995	mas->index = 1;
5996	mas->last = ULONG_MAX;
5997	return true;
5998}
5999
6000/**
6001 * mas_find() - On the first call, find the entry at or after mas->index up to
6002 * %max.  Otherwise, find the entry after mas->index.
6003 * @mas: The maple state
6004 * @max: The maximum value to check.
6005 *
6006 * Must hold rcu_read_lock or the write lock.
6007 * If an entry exists, last and index are updated accordingly.
6008 * May set @mas->status to ma_overflow.
6009 *
6010 * Return: The entry or %NULL.
6011 */
6012void *mas_find(struct ma_state *mas, unsigned long max)
6013{
6014	void *entry = NULL;
6015
6016	if (mas_find_setup(mas, max, &entry))
6017		return entry;
6018
6019	/* Retries on dead nodes handled by mas_next_slot */
6020	entry = mas_next_slot(mas, max, false);
6021	/* Ignore overflow */
6022	mas->status = ma_active;
6023	return entry;
6024}
6025EXPORT_SYMBOL_GPL(mas_find);
6026
6027/**
6028 * mas_find_range() - On the first call, find the entry at or after
6029 * mas->index up to %max.  Otherwise, advance to the next slot mas->index.
6030 * @mas: The maple state
6031 * @max: The maximum value to check.
6032 *
6033 * Must hold rcu_read_lock or the write lock.
6034 * If an entry exists, last and index are updated accordingly.
6035 * May set @mas->status to ma_overflow.
6036 *
6037 * Return: The entry or %NULL.
6038 */
6039void *mas_find_range(struct ma_state *mas, unsigned long max)
6040{
6041	void *entry = NULL;
6042
6043	if (mas_find_setup(mas, max, &entry))
6044		return entry;
6045
6046	/* Retries on dead nodes handled by mas_next_slot */
6047	return mas_next_slot(mas, max, true);
6048}
6049EXPORT_SYMBOL_GPL(mas_find_range);
6050
6051/**
6052 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6053 * @mas: The maple state
6054 * @min: The minimum index
6055 * @entry: Pointer to the entry
6056 *
6057 * Returns: True if entry is the answer, false otherwise.
6058 */
6059static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6060		void **entry)
6061{
6062
6063	switch (mas->status) {
6064	case ma_active:
6065		goto active;
6066	case ma_start:
6067		break;
6068	case ma_pause:
6069		if (unlikely(mas->index <= min)) {
6070			mas->status = ma_underflow;
6071			return true;
6072		}
6073		mas->last = --mas->index;
6074		mas->status = ma_start;
6075		break;
6076	case ma_none:
6077		if (mas->index <= min)
6078			goto none;
6079
6080		mas->last = mas->index;
6081		mas->status = ma_start;
6082		break;
6083	case ma_overflow: /* user expects the mas to be one after where it is */
6084		if (unlikely(mas->index <= min)) {
6085			mas->status = ma_underflow;
6086			return true;
6087		}
6088
6089		mas->status = ma_active;
6090		break;
6091	case ma_underflow: /* user expects the mas to be one before where it is */
6092		if (unlikely(mas->index <= min))
6093			return true;
6094
6095		mas->status = ma_active;
6096		break;
6097	case ma_root:
6098		break;
6099	case ma_error:
6100		return true;
6101	}
6102
6103	if (mas_is_start(mas)) {
6104		/* First run or continue */
6105		if (mas->index < min)
6106			return true;
6107
6108		*entry = mas_walk(mas);
6109		if (*entry)
6110			return true;
6111	}
6112
6113	if (unlikely(mas_is_ptr(mas)))
6114		goto none;
6115
6116	if (unlikely(mas_is_none(mas))) {
6117		/*
6118		 * Walked to the location, and there was nothing so the previous
6119		 * location is 0.
6120		 */
6121		mas->last = mas->index = 0;
6122		mas->status = ma_root;
6123		*entry = mas_root(mas);
6124		return true;
6125	}
6126
6127active:
6128	if (mas->index < min)
6129		return true;
6130
6131	return false;
6132
6133none:
6134	mas->status = ma_none;
6135	return true;
6136}
6137
6138/**
6139 * mas_find_rev: On the first call, find the first non-null entry at or below
6140 * mas->index down to %min.  Otherwise find the first non-null entry below
6141 * mas->index down to %min.
6142 * @mas: The maple state
6143 * @min: The minimum value to check.
6144 *
6145 * Must hold rcu_read_lock or the write lock.
6146 * If an entry exists, last and index are updated accordingly.
6147 * May set @mas->status to ma_underflow.
6148 *
6149 * Return: The entry or %NULL.
6150 */
6151void *mas_find_rev(struct ma_state *mas, unsigned long min)
6152{
6153	void *entry = NULL;
6154
6155	if (mas_find_rev_setup(mas, min, &entry))
6156		return entry;
6157
6158	/* Retries on dead nodes handled by mas_prev_slot */
6159	return mas_prev_slot(mas, min, false);
6160
6161}
6162EXPORT_SYMBOL_GPL(mas_find_rev);
6163
6164/**
6165 * mas_find_range_rev: On the first call, find the first non-null entry at or
6166 * below mas->index down to %min.  Otherwise advance to the previous slot after
6167 * mas->index down to %min.
6168 * @mas: The maple state
6169 * @min: The minimum value to check.
6170 *
6171 * Must hold rcu_read_lock or the write lock.
6172 * If an entry exists, last and index are updated accordingly.
6173 * May set @mas->status to ma_underflow.
6174 *
6175 * Return: The entry or %NULL.
6176 */
6177void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6178{
6179	void *entry = NULL;
6180
6181	if (mas_find_rev_setup(mas, min, &entry))
6182		return entry;
6183
6184	/* Retries on dead nodes handled by mas_prev_slot */
6185	return mas_prev_slot(mas, min, true);
6186}
6187EXPORT_SYMBOL_GPL(mas_find_range_rev);
6188
6189/**
6190 * mas_erase() - Find the range in which index resides and erase the entire
6191 * range.
6192 * @mas: The maple state
6193 *
6194 * Must hold the write lock.
6195 * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6196 * erases that range.
6197 *
6198 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6199 */
6200void *mas_erase(struct ma_state *mas)
6201{
6202	void *entry;
6203	MA_WR_STATE(wr_mas, mas, NULL);
6204
6205	if (!mas_is_active(mas) || !mas_is_start(mas))
6206		mas->status = ma_start;
6207
6208	/* Retry unnecessary when holding the write lock. */
6209	entry = mas_state_walk(mas);
6210	if (!entry)
6211		return NULL;
6212
6213write_retry:
6214	/* Must reset to ensure spanning writes of last slot are detected */
6215	mas_reset(mas);
6216	mas_wr_store_setup(&wr_mas);
6217	mas_wr_store_entry(&wr_mas);
6218	if (mas_nomem(mas, GFP_KERNEL))
6219		goto write_retry;
6220
6221	return entry;
6222}
6223EXPORT_SYMBOL_GPL(mas_erase);
6224
6225/**
6226 * mas_nomem() - Check if there was an error allocating and do the allocation
6227 * if necessary If there are allocations, then free them.
6228 * @mas: The maple state
6229 * @gfp: The GFP_FLAGS to use for allocations
6230 * Return: true on allocation, false otherwise.
6231 */
6232bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6233	__must_hold(mas->tree->ma_lock)
6234{
6235	if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6236		mas_destroy(mas);
6237		return false;
6238	}
6239
6240	if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6241		mtree_unlock(mas->tree);
6242		mas_alloc_nodes(mas, gfp);
6243		mtree_lock(mas->tree);
6244	} else {
6245		mas_alloc_nodes(mas, gfp);
6246	}
6247
6248	if (!mas_allocated(mas))
6249		return false;
6250
6251	mas->status = ma_start;
6252	return true;
6253}
6254
6255void __init maple_tree_init(void)
6256{
6257	maple_node_cache = kmem_cache_create("maple_node",
6258			sizeof(struct maple_node), sizeof(struct maple_node),
6259			SLAB_PANIC, NULL);
6260}
6261
6262/**
6263 * mtree_load() - Load a value stored in a maple tree
6264 * @mt: The maple tree
6265 * @index: The index to load
6266 *
6267 * Return: the entry or %NULL
6268 */
6269void *mtree_load(struct maple_tree *mt, unsigned long index)
6270{
6271	MA_STATE(mas, mt, index, index);
6272	void *entry;
6273
6274	trace_ma_read(__func__, &mas);
6275	rcu_read_lock();
6276retry:
6277	entry = mas_start(&mas);
6278	if (unlikely(mas_is_none(&mas)))
6279		goto unlock;
6280
6281	if (unlikely(mas_is_ptr(&mas))) {
6282		if (index)
6283			entry = NULL;
6284
6285		goto unlock;
6286	}
6287
6288	entry = mtree_lookup_walk(&mas);
6289	if (!entry && unlikely(mas_is_start(&mas)))
6290		goto retry;
6291unlock:
6292	rcu_read_unlock();
6293	if (xa_is_zero(entry))
6294		return NULL;
6295
6296	return entry;
6297}
6298EXPORT_SYMBOL(mtree_load);
6299
6300/**
6301 * mtree_store_range() - Store an entry at a given range.
6302 * @mt: The maple tree
6303 * @index: The start of the range
6304 * @last: The end of the range
6305 * @entry: The entry to store
6306 * @gfp: The GFP_FLAGS to use for allocations
6307 *
6308 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6309 * be allocated.
6310 */
6311int mtree_store_range(struct maple_tree *mt, unsigned long index,
6312		unsigned long last, void *entry, gfp_t gfp)
6313{
6314	MA_STATE(mas, mt, index, last);
6315	MA_WR_STATE(wr_mas, &mas, entry);
6316
6317	trace_ma_write(__func__, &mas, 0, entry);
6318	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6319		return -EINVAL;
6320
6321	if (index > last)
6322		return -EINVAL;
6323
6324	mtree_lock(mt);
6325retry:
6326	mas_wr_store_entry(&wr_mas);
6327	if (mas_nomem(&mas, gfp))
6328		goto retry;
6329
6330	mtree_unlock(mt);
6331	if (mas_is_err(&mas))
6332		return xa_err(mas.node);
6333
6334	return 0;
6335}
6336EXPORT_SYMBOL(mtree_store_range);
6337
6338/**
6339 * mtree_store() - Store an entry at a given index.
6340 * @mt: The maple tree
6341 * @index: The index to store the value
6342 * @entry: The entry to store
6343 * @gfp: The GFP_FLAGS to use for allocations
6344 *
6345 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6346 * be allocated.
6347 */
6348int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6349		 gfp_t gfp)
6350{
6351	return mtree_store_range(mt, index, index, entry, gfp);
6352}
6353EXPORT_SYMBOL(mtree_store);
6354
6355/**
6356 * mtree_insert_range() - Insert an entry at a given range if there is no value.
6357 * @mt: The maple tree
6358 * @first: The start of the range
6359 * @last: The end of the range
6360 * @entry: The entry to store
6361 * @gfp: The GFP_FLAGS to use for allocations.
6362 *
6363 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6364 * request, -ENOMEM if memory could not be allocated.
6365 */
6366int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6367		unsigned long last, void *entry, gfp_t gfp)
6368{
6369	MA_STATE(ms, mt, first, last);
6370
6371	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6372		return -EINVAL;
6373
6374	if (first > last)
6375		return -EINVAL;
6376
6377	mtree_lock(mt);
6378retry:
6379	mas_insert(&ms, entry);
6380	if (mas_nomem(&ms, gfp))
6381		goto retry;
6382
6383	mtree_unlock(mt);
6384	if (mas_is_err(&ms))
6385		return xa_err(ms.node);
6386
6387	return 0;
6388}
6389EXPORT_SYMBOL(mtree_insert_range);
6390
6391/**
6392 * mtree_insert() - Insert an entry at a given index if there is no value.
6393 * @mt: The maple tree
6394 * @index : The index to store the value
6395 * @entry: The entry to store
6396 * @gfp: The GFP_FLAGS to use for allocations.
6397 *
6398 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6399 * request, -ENOMEM if memory could not be allocated.
6400 */
6401int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6402		 gfp_t gfp)
6403{
6404	return mtree_insert_range(mt, index, index, entry, gfp);
6405}
6406EXPORT_SYMBOL(mtree_insert);
6407
6408int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6409		void *entry, unsigned long size, unsigned long min,
6410		unsigned long max, gfp_t gfp)
6411{
6412	int ret = 0;
6413
6414	MA_STATE(mas, mt, 0, 0);
6415	if (!mt_is_alloc(mt))
6416		return -EINVAL;
6417
6418	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6419		return -EINVAL;
6420
6421	mtree_lock(mt);
6422retry:
6423	ret = mas_empty_area(&mas, min, max, size);
6424	if (ret)
6425		goto unlock;
6426
6427	mas_insert(&mas, entry);
6428	/*
6429	 * mas_nomem() may release the lock, causing the allocated area
6430	 * to be unavailable, so try to allocate a free area again.
6431	 */
6432	if (mas_nomem(&mas, gfp))
6433		goto retry;
6434
6435	if (mas_is_err(&mas))
6436		ret = xa_err(mas.node);
6437	else
6438		*startp = mas.index;
6439
6440unlock:
6441	mtree_unlock(mt);
6442	return ret;
6443}
6444EXPORT_SYMBOL(mtree_alloc_range);
6445
6446int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6447		void *entry, unsigned long size, unsigned long min,
6448		unsigned long max, gfp_t gfp)
6449{
6450	int ret = 0;
6451
6452	MA_STATE(mas, mt, 0, 0);
6453	if (!mt_is_alloc(mt))
6454		return -EINVAL;
6455
6456	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6457		return -EINVAL;
6458
6459	mtree_lock(mt);
6460retry:
6461	ret = mas_empty_area_rev(&mas, min, max, size);
6462	if (ret)
6463		goto unlock;
6464
6465	mas_insert(&mas, entry);
6466	/*
6467	 * mas_nomem() may release the lock, causing the allocated area
6468	 * to be unavailable, so try to allocate a free area again.
6469	 */
6470	if (mas_nomem(&mas, gfp))
6471		goto retry;
6472
6473	if (mas_is_err(&mas))
6474		ret = xa_err(mas.node);
6475	else
6476		*startp = mas.index;
6477
6478unlock:
6479	mtree_unlock(mt);
6480	return ret;
6481}
6482EXPORT_SYMBOL(mtree_alloc_rrange);
6483
6484/**
6485 * mtree_erase() - Find an index and erase the entire range.
6486 * @mt: The maple tree
6487 * @index: The index to erase
6488 *
6489 * Erasing is the same as a walk to an entry then a store of a NULL to that
6490 * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6491 *
6492 * Return: The entry stored at the @index or %NULL
6493 */
6494void *mtree_erase(struct maple_tree *mt, unsigned long index)
6495{
6496	void *entry = NULL;
6497
6498	MA_STATE(mas, mt, index, index);
6499	trace_ma_op(__func__, &mas);
6500
6501	mtree_lock(mt);
6502	entry = mas_erase(&mas);
6503	mtree_unlock(mt);
6504
6505	return entry;
6506}
6507EXPORT_SYMBOL(mtree_erase);
6508
6509/*
6510 * mas_dup_free() - Free an incomplete duplication of a tree.
6511 * @mas: The maple state of a incomplete tree.
6512 *
6513 * The parameter @mas->node passed in indicates that the allocation failed on
6514 * this node. This function frees all nodes starting from @mas->node in the
6515 * reverse order of mas_dup_build(). There is no need to hold the source tree
6516 * lock at this time.
6517 */
6518static void mas_dup_free(struct ma_state *mas)
6519{
6520	struct maple_node *node;
6521	enum maple_type type;
6522	void __rcu **slots;
6523	unsigned char count, i;
6524
6525	/* Maybe the first node allocation failed. */
6526	if (mas_is_none(mas))
6527		return;
6528
6529	while (!mte_is_root(mas->node)) {
6530		mas_ascend(mas);
6531		if (mas->offset) {
6532			mas->offset--;
6533			do {
6534				mas_descend(mas);
6535				mas->offset = mas_data_end(mas);
6536			} while (!mte_is_leaf(mas->node));
6537
6538			mas_ascend(mas);
6539		}
6540
6541		node = mte_to_node(mas->node);
6542		type = mte_node_type(mas->node);
6543		slots = ma_slots(node, type);
6544		count = mas_data_end(mas) + 1;
6545		for (i = 0; i < count; i++)
6546			((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
6547		mt_free_bulk(count, slots);
6548	}
6549
6550	node = mte_to_node(mas->node);
6551	mt_free_one(node);
6552}
6553
6554/*
6555 * mas_copy_node() - Copy a maple node and replace the parent.
6556 * @mas: The maple state of source tree.
6557 * @new_mas: The maple state of new tree.
6558 * @parent: The parent of the new node.
6559 *
6560 * Copy @mas->node to @new_mas->node, set @parent to be the parent of
6561 * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
6562 */
6563static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
6564		struct maple_pnode *parent)
6565{
6566	struct maple_node *node = mte_to_node(mas->node);
6567	struct maple_node *new_node = mte_to_node(new_mas->node);
6568	unsigned long val;
6569
6570	/* Copy the node completely. */
6571	memcpy(new_node, node, sizeof(struct maple_node));
6572	/* Update the parent node pointer. */
6573	val = (unsigned long)node->parent & MAPLE_NODE_MASK;
6574	new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
6575}
6576
6577/*
6578 * mas_dup_alloc() - Allocate child nodes for a maple node.
6579 * @mas: The maple state of source tree.
6580 * @new_mas: The maple state of new tree.
6581 * @gfp: The GFP_FLAGS to use for allocations.
6582 *
6583 * This function allocates child nodes for @new_mas->node during the duplication
6584 * process. If memory allocation fails, @mas is set to -ENOMEM.
6585 */
6586static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
6587		gfp_t gfp)
6588{
6589	struct maple_node *node = mte_to_node(mas->node);
6590	struct maple_node *new_node = mte_to_node(new_mas->node);
6591	enum maple_type type;
6592	unsigned char request, count, i;
6593	void __rcu **slots;
6594	void __rcu **new_slots;
6595	unsigned long val;
6596
6597	/* Allocate memory for child nodes. */
6598	type = mte_node_type(mas->node);
6599	new_slots = ma_slots(new_node, type);
6600	request = mas_data_end(mas) + 1;
6601	count = mt_alloc_bulk(gfp, request, (void **)new_slots);
6602	if (unlikely(count < request)) {
6603		memset(new_slots, 0, request * sizeof(void *));
6604		mas_set_err(mas, -ENOMEM);
6605		return;
6606	}
6607
6608	/* Restore node type information in slots. */
6609	slots = ma_slots(node, type);
6610	for (i = 0; i < count; i++) {
6611		val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
6612		val &= MAPLE_NODE_MASK;
6613		((unsigned long *)new_slots)[i] |= val;
6614	}
6615}
6616
6617/*
6618 * mas_dup_build() - Build a new maple tree from a source tree
6619 * @mas: The maple state of source tree, need to be in MAS_START state.
6620 * @new_mas: The maple state of new tree, need to be in MAS_START state.
6621 * @gfp: The GFP_FLAGS to use for allocations.
6622 *
6623 * This function builds a new tree in DFS preorder. If the memory allocation
6624 * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
6625 * last node. mas_dup_free() will free the incomplete duplication of a tree.
6626 *
6627 * Note that the attributes of the two trees need to be exactly the same, and the
6628 * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
6629 */
6630static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
6631		gfp_t gfp)
6632{
6633	struct maple_node *node;
6634	struct maple_pnode *parent = NULL;
6635	struct maple_enode *root;
6636	enum maple_type type;
6637
6638	if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
6639	    unlikely(!mtree_empty(new_mas->tree))) {
6640		mas_set_err(mas, -EINVAL);
6641		return;
6642	}
6643
6644	root = mas_start(mas);
6645	if (mas_is_ptr(mas) || mas_is_none(mas))
6646		goto set_new_tree;
6647
6648	node = mt_alloc_one(gfp);
6649	if (!node) {
6650		new_mas->status = ma_none;
6651		mas_set_err(mas, -ENOMEM);
6652		return;
6653	}
6654
6655	type = mte_node_type(mas->node);
6656	root = mt_mk_node(node, type);
6657	new_mas->node = root;
6658	new_mas->min = 0;
6659	new_mas->max = ULONG_MAX;
6660	root = mte_mk_root(root);
6661	while (1) {
6662		mas_copy_node(mas, new_mas, parent);
6663		if (!mte_is_leaf(mas->node)) {
6664			/* Only allocate child nodes for non-leaf nodes. */
6665			mas_dup_alloc(mas, new_mas, gfp);
6666			if (unlikely(mas_is_err(mas)))
6667				return;
6668		} else {
6669			/*
6670			 * This is the last leaf node and duplication is
6671			 * completed.
6672			 */
6673			if (mas->max == ULONG_MAX)
6674				goto done;
6675
6676			/* This is not the last leaf node and needs to go up. */
6677			do {
6678				mas_ascend(mas);
6679				mas_ascend(new_mas);
6680			} while (mas->offset == mas_data_end(mas));
6681
6682			/* Move to the next subtree. */
6683			mas->offset++;
6684			new_mas->offset++;
6685		}
6686
6687		mas_descend(mas);
6688		parent = ma_parent_ptr(mte_to_node(new_mas->node));
6689		mas_descend(new_mas);
6690		mas->offset = 0;
6691		new_mas->offset = 0;
6692	}
6693done:
6694	/* Specially handle the parent of the root node. */
6695	mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
6696set_new_tree:
6697	/* Make them the same height */
6698	new_mas->tree->ma_flags = mas->tree->ma_flags;
6699	rcu_assign_pointer(new_mas->tree->ma_root, root);
6700}
6701
6702/**
6703 * __mt_dup(): Duplicate an entire maple tree
6704 * @mt: The source maple tree
6705 * @new: The new maple tree
6706 * @gfp: The GFP_FLAGS to use for allocations
6707 *
6708 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6709 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6710 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6711 * source node except for all the addresses stored in it. It will be faster than
6712 * traversing all elements in the source tree and inserting them one by one into
6713 * the new tree.
6714 * The user needs to ensure that the attributes of the source tree and the new
6715 * tree are the same, and the new tree needs to be an empty tree, otherwise
6716 * -EINVAL will be returned.
6717 * Note that the user needs to manually lock the source tree and the new tree.
6718 *
6719 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6720 * the attributes of the two trees are different or the new tree is not an empty
6721 * tree.
6722 */
6723int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6724{
6725	int ret = 0;
6726	MA_STATE(mas, mt, 0, 0);
6727	MA_STATE(new_mas, new, 0, 0);
6728
6729	mas_dup_build(&mas, &new_mas, gfp);
6730	if (unlikely(mas_is_err(&mas))) {
6731		ret = xa_err(mas.node);
6732		if (ret == -ENOMEM)
6733			mas_dup_free(&new_mas);
6734	}
6735
6736	return ret;
6737}
6738EXPORT_SYMBOL(__mt_dup);
6739
6740/**
6741 * mtree_dup(): Duplicate an entire maple tree
6742 * @mt: The source maple tree
6743 * @new: The new maple tree
6744 * @gfp: The GFP_FLAGS to use for allocations
6745 *
6746 * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
6747 * traversal. It uses memcpy() to copy nodes in the source tree and allocate
6748 * new child nodes in non-leaf nodes. The new node is exactly the same as the
6749 * source node except for all the addresses stored in it. It will be faster than
6750 * traversing all elements in the source tree and inserting them one by one into
6751 * the new tree.
6752 * The user needs to ensure that the attributes of the source tree and the new
6753 * tree are the same, and the new tree needs to be an empty tree, otherwise
6754 * -EINVAL will be returned.
6755 *
6756 * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
6757 * the attributes of the two trees are different or the new tree is not an empty
6758 * tree.
6759 */
6760int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
6761{
6762	int ret = 0;
6763	MA_STATE(mas, mt, 0, 0);
6764	MA_STATE(new_mas, new, 0, 0);
6765
6766	mas_lock(&new_mas);
6767	mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
6768	mas_dup_build(&mas, &new_mas, gfp);
6769	mas_unlock(&mas);
6770	if (unlikely(mas_is_err(&mas))) {
6771		ret = xa_err(mas.node);
6772		if (ret == -ENOMEM)
6773			mas_dup_free(&new_mas);
6774	}
6775
6776	mas_unlock(&new_mas);
6777	return ret;
6778}
6779EXPORT_SYMBOL(mtree_dup);
6780
6781/**
6782 * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6783 * @mt: The maple tree
6784 *
6785 * Note: Does not handle locking.
6786 */
6787void __mt_destroy(struct maple_tree *mt)
6788{
6789	void *root = mt_root_locked(mt);
6790
6791	rcu_assign_pointer(mt->ma_root, NULL);
6792	if (xa_is_node(root))
6793		mte_destroy_walk(root, mt);
6794
6795	mt->ma_flags = mt_attr(mt);
6796}
6797EXPORT_SYMBOL_GPL(__mt_destroy);
6798
6799/**
6800 * mtree_destroy() - Destroy a maple tree
6801 * @mt: The maple tree
6802 *
6803 * Frees all resources used by the tree.  Handles locking.
6804 */
6805void mtree_destroy(struct maple_tree *mt)
6806{
6807	mtree_lock(mt);
6808	__mt_destroy(mt);
6809	mtree_unlock(mt);
6810}
6811EXPORT_SYMBOL(mtree_destroy);
6812
6813/**
6814 * mt_find() - Search from the start up until an entry is found.
6815 * @mt: The maple tree
6816 * @index: Pointer which contains the start location of the search
6817 * @max: The maximum value of the search range
6818 *
6819 * Takes RCU read lock internally to protect the search, which does not
6820 * protect the returned pointer after dropping RCU read lock.
6821 * See also: Documentation/core-api/maple_tree.rst
6822 *
6823 * In case that an entry is found @index is updated to point to the next
6824 * possible entry independent whether the found entry is occupying a
6825 * single index or a range if indices.
6826 *
6827 * Return: The entry at or after the @index or %NULL
6828 */
6829void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6830{
6831	MA_STATE(mas, mt, *index, *index);
6832	void *entry;
6833#ifdef CONFIG_DEBUG_MAPLE_TREE
6834	unsigned long copy = *index;
6835#endif
6836
6837	trace_ma_read(__func__, &mas);
6838
6839	if ((*index) > max)
6840		return NULL;
6841
6842	rcu_read_lock();
6843retry:
6844	entry = mas_state_walk(&mas);
6845	if (mas_is_start(&mas))
6846		goto retry;
6847
6848	if (unlikely(xa_is_zero(entry)))
6849		entry = NULL;
6850
6851	if (entry)
6852		goto unlock;
6853
6854	while (mas_is_active(&mas) && (mas.last < max)) {
6855		entry = mas_next_entry(&mas, max);
6856		if (likely(entry && !xa_is_zero(entry)))
6857			break;
6858	}
6859
6860	if (unlikely(xa_is_zero(entry)))
6861		entry = NULL;
6862unlock:
6863	rcu_read_unlock();
6864	if (likely(entry)) {
6865		*index = mas.last + 1;
6866#ifdef CONFIG_DEBUG_MAPLE_TREE
6867		if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6868			pr_err("index not increased! %lx <= %lx\n",
6869			       *index, copy);
6870#endif
6871	}
6872
6873	return entry;
6874}
6875EXPORT_SYMBOL(mt_find);
6876
6877/**
6878 * mt_find_after() - Search from the start up until an entry is found.
6879 * @mt: The maple tree
6880 * @index: Pointer which contains the start location of the search
6881 * @max: The maximum value to check
6882 *
6883 * Same as mt_find() except that it checks @index for 0 before
6884 * searching. If @index == 0, the search is aborted. This covers a wrap
6885 * around of @index to 0 in an iterator loop.
6886 *
6887 * Return: The entry at or after the @index or %NULL
6888 */
6889void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6890		    unsigned long max)
6891{
6892	if (!(*index))
6893		return NULL;
6894
6895	return mt_find(mt, index, max);
6896}
6897EXPORT_SYMBOL(mt_find_after);
6898
6899#ifdef CONFIG_DEBUG_MAPLE_TREE
6900atomic_t maple_tree_tests_run;
6901EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6902atomic_t maple_tree_tests_passed;
6903EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6904
6905#ifndef __KERNEL__
6906extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6907void mt_set_non_kernel(unsigned int val)
6908{
6909	kmem_cache_set_non_kernel(maple_node_cache, val);
6910}
6911
6912extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6913unsigned long mt_get_alloc_size(void)
6914{
6915	return kmem_cache_get_alloc(maple_node_cache);
6916}
6917
6918extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6919void mt_zero_nr_tallocated(void)
6920{
6921	kmem_cache_zero_nr_tallocated(maple_node_cache);
6922}
6923
6924extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6925unsigned int mt_nr_tallocated(void)
6926{
6927	return kmem_cache_nr_tallocated(maple_node_cache);
6928}
6929
6930extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6931unsigned int mt_nr_allocated(void)
6932{
6933	return kmem_cache_nr_allocated(maple_node_cache);
6934}
6935
6936void mt_cache_shrink(void)
6937{
6938}
6939#else
6940/*
6941 * mt_cache_shrink() - For testing, don't use this.
6942 *
6943 * Certain testcases can trigger an OOM when combined with other memory
6944 * debugging configuration options.  This function is used to reduce the
6945 * possibility of an out of memory even due to kmem_cache objects remaining
6946 * around for longer than usual.
6947 */
6948void mt_cache_shrink(void)
6949{
6950	kmem_cache_shrink(maple_node_cache);
6951
6952}
6953EXPORT_SYMBOL_GPL(mt_cache_shrink);
6954
6955#endif /* not defined __KERNEL__ */
6956/*
6957 * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6958 * @mas: The maple state
6959 * @offset: The offset into the slot array to fetch.
6960 *
6961 * Return: The entry stored at @offset.
6962 */
6963static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6964		unsigned char offset)
6965{
6966	return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6967			offset);
6968}
6969
6970/* Depth first search, post-order */
6971static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6972{
6973
6974	struct maple_enode *p, *mn = mas->node;
6975	unsigned long p_min, p_max;
6976
6977	mas_next_node(mas, mas_mn(mas), max);
6978	if (!mas_is_overflow(mas))
6979		return;
6980
6981	if (mte_is_root(mn))
6982		return;
6983
6984	mas->node = mn;
6985	mas_ascend(mas);
6986	do {
6987		p = mas->node;
6988		p_min = mas->min;
6989		p_max = mas->max;
6990		mas_prev_node(mas, 0);
6991	} while (!mas_is_underflow(mas));
6992
6993	mas->node = p;
6994	mas->max = p_max;
6995	mas->min = p_min;
6996}
6997
6998/* Tree validations */
6999static void mt_dump_node(const struct maple_tree *mt, void *entry,
7000		unsigned long min, unsigned long max, unsigned int depth,
7001		enum mt_dump_format format);
7002static void mt_dump_range(unsigned long min, unsigned long max,
7003			  unsigned int depth, enum mt_dump_format format)
7004{
7005	static const char spaces[] = "                                ";
7006
7007	switch(format) {
7008	case mt_dump_hex:
7009		if (min == max)
7010			pr_info("%.*s%lx: ", depth * 2, spaces, min);
7011		else
7012			pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
7013		break;
7014	case mt_dump_dec:
7015		if (min == max)
7016			pr_info("%.*s%lu: ", depth * 2, spaces, min);
7017		else
7018			pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
7019	}
7020}
7021
7022static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
7023			  unsigned int depth, enum mt_dump_format format)
7024{
7025	mt_dump_range(min, max, depth, format);
7026
7027	if (xa_is_value(entry))
7028		pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
7029				xa_to_value(entry), entry);
7030	else if (xa_is_zero(entry))
7031		pr_cont("zero (%ld)\n", xa_to_internal(entry));
7032	else if (mt_is_reserved(entry))
7033		pr_cont("UNKNOWN ENTRY (%p)\n", entry);
7034	else
7035		pr_cont("%p\n", entry);
7036}
7037
7038static void mt_dump_range64(const struct maple_tree *mt, void *entry,
7039		unsigned long min, unsigned long max, unsigned int depth,
7040		enum mt_dump_format format)
7041{
7042	struct maple_range_64 *node = &mte_to_node(entry)->mr64;
7043	bool leaf = mte_is_leaf(entry);
7044	unsigned long first = min;
7045	int i;
7046
7047	pr_cont(" contents: ");
7048	for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
7049		switch(format) {
7050		case mt_dump_hex:
7051			pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7052			break;
7053		case mt_dump_dec:
7054			pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7055		}
7056	}
7057	pr_cont("%p\n", node->slot[i]);
7058	for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
7059		unsigned long last = max;
7060
7061		if (i < (MAPLE_RANGE64_SLOTS - 1))
7062			last = node->pivot[i];
7063		else if (!node->slot[i] && max != mt_node_max(entry))
7064			break;
7065		if (last == 0 && i > 0)
7066			break;
7067		if (leaf)
7068			mt_dump_entry(mt_slot(mt, node->slot, i),
7069					first, last, depth + 1, format);
7070		else if (node->slot[i])
7071			mt_dump_node(mt, mt_slot(mt, node->slot, i),
7072					first, last, depth + 1, format);
7073
7074		if (last == max)
7075			break;
7076		if (last > max) {
7077			switch(format) {
7078			case mt_dump_hex:
7079				pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
7080					node, last, max, i);
7081				break;
7082			case mt_dump_dec:
7083				pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7084					node, last, max, i);
7085			}
7086		}
7087		first = last + 1;
7088	}
7089}
7090
7091static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
7092	unsigned long min, unsigned long max, unsigned int depth,
7093	enum mt_dump_format format)
7094{
7095	struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
7096	bool leaf = mte_is_leaf(entry);
7097	unsigned long first = min;
7098	int i;
7099
7100	pr_cont(" contents: ");
7101	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7102		switch (format) {
7103		case mt_dump_hex:
7104			pr_cont("%lx ", node->gap[i]);
7105			break;
7106		case mt_dump_dec:
7107			pr_cont("%lu ", node->gap[i]);
7108		}
7109	}
7110	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
7111	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) {
7112		switch (format) {
7113		case mt_dump_hex:
7114			pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
7115			break;
7116		case mt_dump_dec:
7117			pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
7118		}
7119	}
7120	pr_cont("%p\n", node->slot[i]);
7121	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
7122		unsigned long last = max;
7123
7124		if (i < (MAPLE_ARANGE64_SLOTS - 1))
7125			last = node->pivot[i];
7126		else if (!node->slot[i])
7127			break;
7128		if (last == 0 && i > 0)
7129			break;
7130		if (leaf)
7131			mt_dump_entry(mt_slot(mt, node->slot, i),
7132					first, last, depth + 1, format);
7133		else if (node->slot[i])
7134			mt_dump_node(mt, mt_slot(mt, node->slot, i),
7135					first, last, depth + 1, format);
7136
7137		if (last == max)
7138			break;
7139		if (last > max) {
7140			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
7141					node, last, max, i);
7142			break;
7143		}
7144		first = last + 1;
7145	}
7146}
7147
7148static void mt_dump_node(const struct maple_tree *mt, void *entry,
7149		unsigned long min, unsigned long max, unsigned int depth,
7150		enum mt_dump_format format)
7151{
7152	struct maple_node *node = mte_to_node(entry);
7153	unsigned int type = mte_node_type(entry);
7154	unsigned int i;
7155
7156	mt_dump_range(min, max, depth, format);
7157
7158	pr_cont("node %p depth %d type %d parent %p", node, depth, type,
7159			node ? node->parent : NULL);
7160	switch (type) {
7161	case maple_dense:
7162		pr_cont("\n");
7163		for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
7164			if (min + i > max)
7165				pr_cont("OUT OF RANGE: ");
7166			mt_dump_entry(mt_slot(mt, node->slot, i),
7167					min + i, min + i, depth, format);
7168		}
7169		break;
7170	case maple_leaf_64:
7171	case maple_range_64:
7172		mt_dump_range64(mt, entry, min, max, depth, format);
7173		break;
7174	case maple_arange_64:
7175		mt_dump_arange64(mt, entry, min, max, depth, format);
7176		break;
7177
7178	default:
7179		pr_cont(" UNKNOWN TYPE\n");
7180	}
7181}
7182
7183void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
7184{
7185	void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
7186
7187	pr_info("maple_tree(%p) flags %X, height %u root %p\n",
7188		 mt, mt->ma_flags, mt_height(mt), entry);
7189	if (!xa_is_node(entry))
7190		mt_dump_entry(entry, 0, 0, 0, format);
7191	else if (entry)
7192		mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
7193}
7194EXPORT_SYMBOL_GPL(mt_dump);
7195
7196/*
7197 * Calculate the maximum gap in a node and check if that's what is reported in
7198 * the parent (unless root).
7199 */
7200static void mas_validate_gaps(struct ma_state *mas)
7201{
7202	struct maple_enode *mte = mas->node;
7203	struct maple_node *p_mn, *node = mte_to_node(mte);
7204	enum maple_type mt = mte_node_type(mas->node);
7205	unsigned long gap = 0, max_gap = 0;
7206	unsigned long p_end, p_start = mas->min;
7207	unsigned char p_slot, offset;
7208	unsigned long *gaps = NULL;
7209	unsigned long *pivots = ma_pivots(node, mt);
7210	unsigned int i;
7211
7212	if (ma_is_dense(mt)) {
7213		for (i = 0; i < mt_slot_count(mte); i++) {
7214			if (mas_get_slot(mas, i)) {
7215				if (gap > max_gap)
7216					max_gap = gap;
7217				gap = 0;
7218				continue;
7219			}
7220			gap++;
7221		}
7222		goto counted;
7223	}
7224
7225	gaps = ma_gaps(node, mt);
7226	for (i = 0; i < mt_slot_count(mte); i++) {
7227		p_end = mas_safe_pivot(mas, pivots, i, mt);
7228
7229		if (!gaps) {
7230			if (!mas_get_slot(mas, i))
7231				gap = p_end - p_start + 1;
7232		} else {
7233			void *entry = mas_get_slot(mas, i);
7234
7235			gap = gaps[i];
7236			MT_BUG_ON(mas->tree, !entry);
7237
7238			if (gap > p_end - p_start + 1) {
7239				pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7240				       mas_mn(mas), i, gap, p_end, p_start,
7241				       p_end - p_start + 1);
7242				MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
7243			}
7244		}
7245
7246		if (gap > max_gap)
7247			max_gap = gap;
7248
7249		p_start = p_end + 1;
7250		if (p_end >= mas->max)
7251			break;
7252	}
7253
7254counted:
7255	if (mt == maple_arange_64) {
7256		MT_BUG_ON(mas->tree, !gaps);
7257		offset = ma_meta_gap(node);
7258		if (offset > i) {
7259			pr_err("gap offset %p[%u] is invalid\n", node, offset);
7260			MT_BUG_ON(mas->tree, 1);
7261		}
7262
7263		if (gaps[offset] != max_gap) {
7264			pr_err("gap %p[%u] is not the largest gap %lu\n",
7265			       node, offset, max_gap);
7266			MT_BUG_ON(mas->tree, 1);
7267		}
7268
7269		for (i++ ; i < mt_slot_count(mte); i++) {
7270			if (gaps[i] != 0) {
7271				pr_err("gap %p[%u] beyond node limit != 0\n",
7272				       node, i);
7273				MT_BUG_ON(mas->tree, 1);
7274			}
7275		}
7276	}
7277
7278	if (mte_is_root(mte))
7279		return;
7280
7281	p_slot = mte_parent_slot(mas->node);
7282	p_mn = mte_parent(mte);
7283	MT_BUG_ON(mas->tree, max_gap > mas->max);
7284	if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7285		pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7286		mt_dump(mas->tree, mt_dump_hex);
7287		MT_BUG_ON(mas->tree, 1);
7288	}
7289}
7290
7291static void mas_validate_parent_slot(struct ma_state *mas)
7292{
7293	struct maple_node *parent;
7294	struct maple_enode *node;
7295	enum maple_type p_type;
7296	unsigned char p_slot;
7297	void __rcu **slots;
7298	int i;
7299
7300	if (mte_is_root(mas->node))
7301		return;
7302
7303	p_slot = mte_parent_slot(mas->node);
7304	p_type = mas_parent_type(mas, mas->node);
7305	parent = mte_parent(mas->node);
7306	slots = ma_slots(parent, p_type);
7307	MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7308
7309	/* Check prev/next parent slot for duplicate node entry */
7310
7311	for (i = 0; i < mt_slots[p_type]; i++) {
7312		node = mas_slot(mas, slots, i);
7313		if (i == p_slot) {
7314			if (node != mas->node)
7315				pr_err("parent %p[%u] does not have %p\n",
7316					parent, i, mas_mn(mas));
7317			MT_BUG_ON(mas->tree, node != mas->node);
7318		} else if (node == mas->node) {
7319			pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7320			       mas_mn(mas), parent, i, p_slot);
7321			MT_BUG_ON(mas->tree, node == mas->node);
7322		}
7323	}
7324}
7325
7326static void mas_validate_child_slot(struct ma_state *mas)
7327{
7328	enum maple_type type = mte_node_type(mas->node);
7329	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7330	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7331	struct maple_enode *child;
7332	unsigned char i;
7333
7334	if (mte_is_leaf(mas->node))
7335		return;
7336
7337	for (i = 0; i < mt_slots[type]; i++) {
7338		child = mas_slot(mas, slots, i);
7339
7340		if (!child) {
7341			pr_err("Non-leaf node lacks child at %p[%u]\n",
7342			       mas_mn(mas), i);
7343			MT_BUG_ON(mas->tree, 1);
7344		}
7345
7346		if (mte_parent_slot(child) != i) {
7347			pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7348			       mas_mn(mas), i, mte_to_node(child),
7349			       mte_parent_slot(child));
7350			MT_BUG_ON(mas->tree, 1);
7351		}
7352
7353		if (mte_parent(child) != mte_to_node(mas->node)) {
7354			pr_err("child %p has parent %p not %p\n",
7355			       mte_to_node(child), mte_parent(child),
7356			       mte_to_node(mas->node));
7357			MT_BUG_ON(mas->tree, 1);
7358		}
7359
7360		if (i < mt_pivots[type] && pivots[i] == mas->max)
7361			break;
7362	}
7363}
7364
7365/*
7366 * Validate all pivots are within mas->min and mas->max, check metadata ends
7367 * where the maximum ends and ensure there is no slots or pivots set outside of
7368 * the end of the data.
7369 */
7370static void mas_validate_limits(struct ma_state *mas)
7371{
7372	int i;
7373	unsigned long prev_piv = 0;
7374	enum maple_type type = mte_node_type(mas->node);
7375	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7376	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7377
7378	for (i = 0; i < mt_slots[type]; i++) {
7379		unsigned long piv;
7380
7381		piv = mas_safe_pivot(mas, pivots, i, type);
7382
7383		if (!piv && (i != 0)) {
7384			pr_err("Missing node limit pivot at %p[%u]",
7385			       mas_mn(mas), i);
7386			MAS_WARN_ON(mas, 1);
7387		}
7388
7389		if (prev_piv > piv) {
7390			pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7391				mas_mn(mas), i, piv, prev_piv);
7392			MAS_WARN_ON(mas, piv < prev_piv);
7393		}
7394
7395		if (piv < mas->min) {
7396			pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7397				piv, mas->min);
7398			MAS_WARN_ON(mas, piv < mas->min);
7399		}
7400		if (piv > mas->max) {
7401			pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7402				piv, mas->max);
7403			MAS_WARN_ON(mas, piv > mas->max);
7404		}
7405		prev_piv = piv;
7406		if (piv == mas->max)
7407			break;
7408	}
7409
7410	if (mas_data_end(mas) != i) {
7411		pr_err("node%p: data_end %u != the last slot offset %u\n",
7412		       mas_mn(mas), mas_data_end(mas), i);
7413		MT_BUG_ON(mas->tree, 1);
7414	}
7415
7416	for (i += 1; i < mt_slots[type]; i++) {
7417		void *entry = mas_slot(mas, slots, i);
7418
7419		if (entry && (i != mt_slots[type] - 1)) {
7420			pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7421			       i, entry);
7422			MT_BUG_ON(mas->tree, entry != NULL);
7423		}
7424
7425		if (i < mt_pivots[type]) {
7426			unsigned long piv = pivots[i];
7427
7428			if (!piv)
7429				continue;
7430
7431			pr_err("%p[%u] should not have piv %lu\n",
7432			       mas_mn(mas), i, piv);
7433			MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7434		}
7435	}
7436}
7437
7438static void mt_validate_nulls(struct maple_tree *mt)
7439{
7440	void *entry, *last = (void *)1;
7441	unsigned char offset = 0;
7442	void __rcu **slots;
7443	MA_STATE(mas, mt, 0, 0);
7444
7445	mas_start(&mas);
7446	if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
7447		return;
7448
7449	while (!mte_is_leaf(mas.node))
7450		mas_descend(&mas);
7451
7452	slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7453	do {
7454		entry = mas_slot(&mas, slots, offset);
7455		if (!last && !entry) {
7456			pr_err("Sequential nulls end at %p[%u]\n",
7457				mas_mn(&mas), offset);
7458		}
7459		MT_BUG_ON(mt, !last && !entry);
7460		last = entry;
7461		if (offset == mas_data_end(&mas)) {
7462			mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7463			if (mas_is_overflow(&mas))
7464				return;
7465			offset = 0;
7466			slots = ma_slots(mte_to_node(mas.node),
7467					 mte_node_type(mas.node));
7468		} else {
7469			offset++;
7470		}
7471
7472	} while (!mas_is_overflow(&mas));
7473}
7474
7475/*
7476 * validate a maple tree by checking:
7477 * 1. The limits (pivots are within mas->min to mas->max)
7478 * 2. The gap is correctly set in the parents
7479 */
7480void mt_validate(struct maple_tree *mt)
7481{
7482	unsigned char end;
7483
7484	MA_STATE(mas, mt, 0, 0);
7485	rcu_read_lock();
7486	mas_start(&mas);
7487	if (!mas_is_active(&mas))
7488		goto done;
7489
7490	while (!mte_is_leaf(mas.node))
7491		mas_descend(&mas);
7492
7493	while (!mas_is_overflow(&mas)) {
7494		MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7495		end = mas_data_end(&mas);
7496		if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7497				(mas.max != ULONG_MAX))) {
7498			pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7499		}
7500
7501		mas_validate_parent_slot(&mas);
7502		mas_validate_limits(&mas);
7503		mas_validate_child_slot(&mas);
7504		if (mt_is_alloc(mt))
7505			mas_validate_gaps(&mas);
7506		mas_dfs_postorder(&mas, ULONG_MAX);
7507	}
7508	mt_validate_nulls(mt);
7509done:
7510	rcu_read_unlock();
7511
7512}
7513EXPORT_SYMBOL_GPL(mt_validate);
7514
7515void mas_dump(const struct ma_state *mas)
7516{
7517	pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7518	switch (mas->status) {
7519	case ma_active:
7520		pr_err("(ma_active)");
7521		break;
7522	case ma_none:
7523		pr_err("(ma_none)");
7524		break;
7525	case ma_root:
7526		pr_err("(ma_root)");
7527		break;
7528	case ma_start:
7529		pr_err("(ma_start) ");
7530		break;
7531	case ma_pause:
7532		pr_err("(ma_pause) ");
7533		break;
7534	case ma_overflow:
7535		pr_err("(ma_overflow) ");
7536		break;
7537	case ma_underflow:
7538		pr_err("(ma_underflow) ");
7539		break;
7540	case ma_error:
7541		pr_err("(ma_error) ");
7542		break;
7543	}
7544
7545	pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
7546	       mas->index, mas->last);
7547	pr_err("     min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7548	       mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7549	if (mas->index > mas->last)
7550		pr_err("Check index & last\n");
7551}
7552EXPORT_SYMBOL_GPL(mas_dump);
7553
7554void mas_wr_dump(const struct ma_wr_state *wr_mas)
7555{
7556	pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7557	       wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7558	pr_err("        type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7559	       wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
7560	       wr_mas->end_piv);
7561}
7562EXPORT_SYMBOL_GPL(mas_wr_dump);
7563
7564#endif /* CONFIG_DEBUG_MAPLE_TREE */