xref: /openbmc/linux/lib/maple_tree.c (revision c7a806d9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Maple Tree implementation
4  * Copyright (c) 2018-2022 Oracle Corporation
5  * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6  *	    Matthew Wilcox <willy@infradead.org>
7  */
8 
9 /*
10  * DOC: Interesting implementation details of the Maple Tree
11  *
12  * Each node type has a number of slots for entries and a number of slots for
13  * pivots.  In the case of dense nodes, the pivots are implied by the position
14  * and are simply the slot index + the minimum of the node.
15  *
16  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
17  * indicate that the tree is specifying ranges,  Pivots may appear in the
18  * subtree with an entry attached to the value where as keys are unique to a
19  * specific position of a B-tree.  Pivot values are inclusive of the slot with
20  * the same index.
21  *
22  *
23  * The following illustrates the layout of a range64 nodes slots and pivots.
24  *
25  *
26  *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
27  *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
28  *           │   │   │   │     │    │    │    │    └─ Implied maximum
29  *           │   │   │   │     │    │    │    └─ Pivot 14
30  *           │   │   │   │     │    │    └─ Pivot 13
31  *           │   │   │   │     │    └─ Pivot 12
32  *           │   │   │   │     └─ Pivot 11
33  *           │   │   │   └─ Pivot 2
34  *           │   │   └─ Pivot 1
35  *           │   └─ Pivot 0
36  *           └─  Implied minimum
37  *
38  * Slot contents:
39  *  Internal (non-leaf) nodes contain pointers to other nodes.
40  *  Leaf nodes contain entries.
41  *
42  * The location of interest is often referred to as an offset.  All offsets have
43  * a slot, but the last offset has an implied pivot from the node above (or
44  * UINT_MAX for the root node.
45  *
46  * Ranges complicate certain write activities.  When modifying any of
47  * the B-tree variants, it is known that one entry will either be added or
48  * deleted.  When modifying the Maple Tree, one store operation may overwrite
49  * the entire data set, or one half of the tree, or the middle half of the tree.
50  *
51  */
52 
53 
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
61 
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
64 
65 #define MA_ROOT_PARENT 1
66 
67 /*
68  * Maple state flags
69  * * MA_STATE_BULK		- Bulk insert mode
70  * * MA_STATE_REBALANCE		- Indicate a rebalance during bulk insert
71  * * MA_STATE_PREALLOC		- Preallocated nodes, WARN_ON allocation
72  */
73 #define MA_STATE_BULK		1
74 #define MA_STATE_REBALANCE	2
75 #define MA_STATE_PREALLOC	4
76 
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
81 
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 	[maple_dense]		= MAPLE_NODE_SLOTS,
85 	[maple_leaf_64]		= ULONG_MAX,
86 	[maple_range_64]	= ULONG_MAX,
87 	[maple_arange_64]	= ULONG_MAX,
88 };
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
90 #endif
91 
92 static const unsigned char mt_slots[] = {
93 	[maple_dense]		= MAPLE_NODE_SLOTS,
94 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
95 	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
96 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
97 };
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
99 
100 static const unsigned char mt_pivots[] = {
101 	[maple_dense]		= 0,
102 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
103 	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
104 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
105 };
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
107 
108 static const unsigned char mt_min_slots[] = {
109 	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
110 	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
111 	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
112 	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 1,
113 };
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
115 
116 #define MAPLE_BIG_NODE_SLOTS	(MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS	(MAPLE_ARANGE64_SLOTS * 2 + 1)
118 
119 struct maple_big_node {
120 	struct maple_pnode *parent;
121 	unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
122 	union {
123 		struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
124 		struct {
125 			unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 			unsigned long gap[MAPLE_BIG_NODE_GAPS];
127 		};
128 	};
129 	unsigned char b_end;
130 	enum maple_type type;
131 };
132 
133 /*
134  * The maple_subtree_state is used to build a tree to replace a segment of an
135  * existing tree in a more atomic way.  Any walkers of the older tree will hit a
136  * dead node and restart on updates.
137  */
138 struct maple_subtree_state {
139 	struct ma_state *orig_l;	/* Original left side of subtree */
140 	struct ma_state *orig_r;	/* Original right side of subtree */
141 	struct ma_state *l;		/* New left side of subtree */
142 	struct ma_state *m;		/* New middle of subtree (rare) */
143 	struct ma_state *r;		/* New right side of subtree */
144 	struct ma_topiary *free;	/* nodes to be freed */
145 	struct ma_topiary *destroy;	/* Nodes to be destroyed (walked and freed) */
146 	struct maple_big_node *bn;
147 };
148 
149 /* Functions */
150 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
151 {
152 	return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO);
153 }
154 
155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
156 {
157 	return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size,
158 				     nodes);
159 }
160 
161 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
162 {
163 	kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
164 }
165 
166 static void mt_free_rcu(struct rcu_head *head)
167 {
168 	struct maple_node *node = container_of(head, struct maple_node, rcu);
169 
170 	kmem_cache_free(maple_node_cache, node);
171 }
172 
173 /*
174  * ma_free_rcu() - Use rcu callback to free a maple node
175  * @node: The node to free
176  *
177  * The maple tree uses the parent pointer to indicate this node is no longer in
178  * use and will be freed.
179  */
180 static void ma_free_rcu(struct maple_node *node)
181 {
182 	node->parent = ma_parent_ptr(node);
183 	call_rcu(&node->rcu, mt_free_rcu);
184 }
185 
186 
187 static void mas_set_height(struct ma_state *mas)
188 {
189 	unsigned int new_flags = mas->tree->ma_flags;
190 
191 	new_flags &= ~MT_FLAGS_HEIGHT_MASK;
192 	BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
193 	new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
194 	mas->tree->ma_flags = new_flags;
195 }
196 
197 static unsigned int mas_mt_height(struct ma_state *mas)
198 {
199 	return mt_height(mas->tree);
200 }
201 
202 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
203 {
204 	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
205 		MAPLE_NODE_TYPE_MASK;
206 }
207 
208 static inline bool ma_is_dense(const enum maple_type type)
209 {
210 	return type < maple_leaf_64;
211 }
212 
213 static inline bool ma_is_leaf(const enum maple_type type)
214 {
215 	return type < maple_range_64;
216 }
217 
218 static inline bool mte_is_leaf(const struct maple_enode *entry)
219 {
220 	return ma_is_leaf(mte_node_type(entry));
221 }
222 
223 /*
224  * We also reserve values with the bottom two bits set to '10' which are
225  * below 4096
226  */
227 static inline bool mt_is_reserved(const void *entry)
228 {
229 	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
230 		xa_is_internal(entry);
231 }
232 
233 static inline void mas_set_err(struct ma_state *mas, long err)
234 {
235 	mas->node = MA_ERROR(err);
236 }
237 
238 static inline bool mas_is_ptr(struct ma_state *mas)
239 {
240 	return mas->node == MAS_ROOT;
241 }
242 
243 static inline bool mas_is_start(struct ma_state *mas)
244 {
245 	return mas->node == MAS_START;
246 }
247 
248 bool mas_is_err(struct ma_state *mas)
249 {
250 	return xa_is_err(mas->node);
251 }
252 
253 static inline bool mas_searchable(struct ma_state *mas)
254 {
255 	if (mas_is_none(mas))
256 		return false;
257 
258 	if (mas_is_ptr(mas))
259 		return false;
260 
261 	return true;
262 }
263 
264 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
265 {
266 	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
267 }
268 
269 /*
270  * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
271  * @entry: The maple encoded node
272  *
273  * Return: a maple topiary pointer
274  */
275 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
276 {
277 	return (struct maple_topiary *)
278 		((unsigned long)entry & ~MAPLE_NODE_MASK);
279 }
280 
281 /*
282  * mas_mn() - Get the maple state node.
283  * @mas: The maple state
284  *
285  * Return: the maple node (not encoded - bare pointer).
286  */
287 static inline struct maple_node *mas_mn(const struct ma_state *mas)
288 {
289 	return mte_to_node(mas->node);
290 }
291 
292 /*
293  * mte_set_node_dead() - Set a maple encoded node as dead.
294  * @mn: The maple encoded node.
295  */
296 static inline void mte_set_node_dead(struct maple_enode *mn)
297 {
298 	mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
299 	smp_wmb(); /* Needed for RCU */
300 }
301 
302 /* Bit 1 indicates the root is a node */
303 #define MAPLE_ROOT_NODE			0x02
304 /* maple_type stored bit 3-6 */
305 #define MAPLE_ENODE_TYPE_SHIFT		0x03
306 /* Bit 2 means a NULL somewhere below */
307 #define MAPLE_ENODE_NULL		0x04
308 
309 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
310 					     enum maple_type type)
311 {
312 	return (void *)((unsigned long)node |
313 			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
314 }
315 
316 static inline void *mte_mk_root(const struct maple_enode *node)
317 {
318 	return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
319 }
320 
321 static inline void *mte_safe_root(const struct maple_enode *node)
322 {
323 	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
324 }
325 
326 static inline void *mte_set_full(const struct maple_enode *node)
327 {
328 	return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
329 }
330 
331 static inline void *mte_clear_full(const struct maple_enode *node)
332 {
333 	return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
334 }
335 
336 static inline bool mte_has_null(const struct maple_enode *node)
337 {
338 	return (unsigned long)node & MAPLE_ENODE_NULL;
339 }
340 
341 static inline bool ma_is_root(struct maple_node *node)
342 {
343 	return ((unsigned long)node->parent & MA_ROOT_PARENT);
344 }
345 
346 static inline bool mte_is_root(const struct maple_enode *node)
347 {
348 	return ma_is_root(mte_to_node(node));
349 }
350 
351 static inline bool mas_is_root_limits(const struct ma_state *mas)
352 {
353 	return !mas->min && mas->max == ULONG_MAX;
354 }
355 
356 static inline bool mt_is_alloc(struct maple_tree *mt)
357 {
358 	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
359 }
360 
361 /*
362  * The Parent Pointer
363  * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
364  * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
365  * bit values need an extra bit to store the offset.  This extra bit comes from
366  * a reuse of the last bit in the node type.  This is possible by using bit 1 to
367  * indicate if bit 2 is part of the type or the slot.
368  *
369  * Note types:
370  *  0x??1 = Root
371  *  0x?00 = 16 bit nodes
372  *  0x010 = 32 bit nodes
373  *  0x110 = 64 bit nodes
374  *
375  * Slot size and alignment
376  *  0b??1 : Root
377  *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
378  *  0b010 : 32 bit values, type in 0-2, slot in 3-7
379  *  0b110 : 64 bit values, type in 0-2, slot in 3-7
380  */
381 
382 #define MAPLE_PARENT_ROOT		0x01
383 
384 #define MAPLE_PARENT_SLOT_SHIFT		0x03
385 #define MAPLE_PARENT_SLOT_MASK		0xF8
386 
387 #define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
388 #define MAPLE_PARENT_16B_SLOT_MASK	0xFC
389 
390 #define MAPLE_PARENT_RANGE64		0x06
391 #define MAPLE_PARENT_RANGE32		0x04
392 #define MAPLE_PARENT_NOT_RANGE16	0x02
393 
394 /*
395  * mte_parent_shift() - Get the parent shift for the slot storage.
396  * @parent: The parent pointer cast as an unsigned long
397  * Return: The shift into that pointer to the star to of the slot
398  */
399 static inline unsigned long mte_parent_shift(unsigned long parent)
400 {
401 	/* Note bit 1 == 0 means 16B */
402 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
403 		return MAPLE_PARENT_SLOT_SHIFT;
404 
405 	return MAPLE_PARENT_16B_SLOT_SHIFT;
406 }
407 
408 /*
409  * mte_parent_slot_mask() - Get the slot mask for the parent.
410  * @parent: The parent pointer cast as an unsigned long.
411  * Return: The slot mask for that parent.
412  */
413 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
414 {
415 	/* Note bit 1 == 0 means 16B */
416 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
417 		return MAPLE_PARENT_SLOT_MASK;
418 
419 	return MAPLE_PARENT_16B_SLOT_MASK;
420 }
421 
422 /*
423  * mas_parent_enum() - Return the maple_type of the parent from the stored
424  * parent type.
425  * @mas: The maple state
426  * @node: The maple_enode to extract the parent's enum
427  * Return: The node->parent maple_type
428  */
429 static inline
430 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
431 				struct maple_tree *mt)
432 {
433 	unsigned long p_type;
434 
435 	p_type = (unsigned long)p_enode;
436 	if (p_type & MAPLE_PARENT_ROOT)
437 		return 0; /* Validated in the caller. */
438 
439 	p_type &= MAPLE_NODE_MASK;
440 	p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
441 
442 	switch (p_type) {
443 	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
444 		if (mt_is_alloc(mt))
445 			return maple_arange_64;
446 		return maple_range_64;
447 	}
448 
449 	return 0;
450 }
451 
452 static inline
453 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
454 {
455 	return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
456 }
457 
458 /*
459  * mte_set_parent() - Set the parent node and encode the slot
460  * @enode: The encoded maple node.
461  * @parent: The encoded maple node that is the parent of @enode.
462  * @slot: The slot that @enode resides in @parent.
463  *
464  * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
465  * parent type.
466  */
467 static inline
468 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
469 		    unsigned char slot)
470 {
471 	unsigned long val = (unsigned long) parent;
472 	unsigned long shift;
473 	unsigned long type;
474 	enum maple_type p_type = mte_node_type(parent);
475 
476 	BUG_ON(p_type == maple_dense);
477 	BUG_ON(p_type == maple_leaf_64);
478 
479 	switch (p_type) {
480 	case maple_range_64:
481 	case maple_arange_64:
482 		shift = MAPLE_PARENT_SLOT_SHIFT;
483 		type = MAPLE_PARENT_RANGE64;
484 		break;
485 	default:
486 	case maple_dense:
487 	case maple_leaf_64:
488 		shift = type = 0;
489 		break;
490 	}
491 
492 	val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
493 	val |= (slot << shift) | type;
494 	mte_to_node(enode)->parent = ma_parent_ptr(val);
495 }
496 
497 /*
498  * mte_parent_slot() - get the parent slot of @enode.
499  * @enode: The encoded maple node.
500  *
501  * Return: The slot in the parent node where @enode resides.
502  */
503 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
504 {
505 	unsigned long val = (unsigned long) mte_to_node(enode)->parent;
506 
507 	/* Root. */
508 	if (val & 1)
509 		return 0;
510 
511 	/*
512 	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
513 	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
514 	 */
515 	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
516 }
517 
518 /*
519  * mte_parent() - Get the parent of @node.
520  * @node: The encoded maple node.
521  *
522  * Return: The parent maple node.
523  */
524 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
525 {
526 	return (void *)((unsigned long)
527 			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
528 }
529 
530 /*
531  * ma_dead_node() - check if the @enode is dead.
532  * @enode: The encoded maple node
533  *
534  * Return: true if dead, false otherwise.
535  */
536 static inline bool ma_dead_node(const struct maple_node *node)
537 {
538 	struct maple_node *parent = (void *)((unsigned long)
539 					     node->parent & ~MAPLE_NODE_MASK);
540 
541 	return (parent == node);
542 }
543 /*
544  * mte_dead_node() - check if the @enode is dead.
545  * @enode: The encoded maple node
546  *
547  * Return: true if dead, false otherwise.
548  */
549 static inline bool mte_dead_node(const struct maple_enode *enode)
550 {
551 	struct maple_node *parent, *node;
552 
553 	node = mte_to_node(enode);
554 	parent = mte_parent(enode);
555 	return (parent == node);
556 }
557 
558 /*
559  * mas_allocated() - Get the number of nodes allocated in a maple state.
560  * @mas: The maple state
561  *
562  * The ma_state alloc member is overloaded to hold a pointer to the first
563  * allocated node or to the number of requested nodes to allocate.  If bit 0 is
564  * set, then the alloc contains the number of requested nodes.  If there is an
565  * allocated node, then the total allocated nodes is in that node.
566  *
567  * Return: The total number of nodes allocated
568  */
569 static inline unsigned long mas_allocated(const struct ma_state *mas)
570 {
571 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
572 		return 0;
573 
574 	return mas->alloc->total;
575 }
576 
577 /*
578  * mas_set_alloc_req() - Set the requested number of allocations.
579  * @mas: the maple state
580  * @count: the number of allocations.
581  *
582  * The requested number of allocations is either in the first allocated node,
583  * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
584  * no allocated node.  Set the request either in the node or do the necessary
585  * encoding to store in @mas->alloc directly.
586  */
587 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
588 {
589 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
590 		if (!count)
591 			mas->alloc = NULL;
592 		else
593 			mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
594 		return;
595 	}
596 
597 	mas->alloc->request_count = count;
598 }
599 
600 /*
601  * mas_alloc_req() - get the requested number of allocations.
602  * @mas: The maple state
603  *
604  * The alloc count is either stored directly in @mas, or in
605  * @mas->alloc->request_count if there is at least one node allocated.  Decode
606  * the request count if it's stored directly in @mas->alloc.
607  *
608  * Return: The allocation request count.
609  */
610 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
611 {
612 	if ((unsigned long)mas->alloc & 0x1)
613 		return (unsigned long)(mas->alloc) >> 1;
614 	else if (mas->alloc)
615 		return mas->alloc->request_count;
616 	return 0;
617 }
618 
619 /*
620  * ma_pivots() - Get a pointer to the maple node pivots.
621  * @node - the maple node
622  * @type - the node type
623  *
624  * Return: A pointer to the maple node pivots
625  */
626 static inline unsigned long *ma_pivots(struct maple_node *node,
627 					   enum maple_type type)
628 {
629 	switch (type) {
630 	case maple_arange_64:
631 		return node->ma64.pivot;
632 	case maple_range_64:
633 	case maple_leaf_64:
634 		return node->mr64.pivot;
635 	case maple_dense:
636 		return NULL;
637 	}
638 	return NULL;
639 }
640 
641 /*
642  * ma_gaps() - Get a pointer to the maple node gaps.
643  * @node - the maple node
644  * @type - the node type
645  *
646  * Return: A pointer to the maple node gaps
647  */
648 static inline unsigned long *ma_gaps(struct maple_node *node,
649 				     enum maple_type type)
650 {
651 	switch (type) {
652 	case maple_arange_64:
653 		return node->ma64.gap;
654 	case maple_range_64:
655 	case maple_leaf_64:
656 	case maple_dense:
657 		return NULL;
658 	}
659 	return NULL;
660 }
661 
662 /*
663  * mte_pivot() - Get the pivot at @piv of the maple encoded node.
664  * @mn: The maple encoded node.
665  * @piv: The pivot.
666  *
667  * Return: the pivot at @piv of @mn.
668  */
669 static inline unsigned long mte_pivot(const struct maple_enode *mn,
670 				 unsigned char piv)
671 {
672 	struct maple_node *node = mte_to_node(mn);
673 
674 	if (piv >= mt_pivots[piv]) {
675 		WARN_ON(1);
676 		return 0;
677 	}
678 	switch (mte_node_type(mn)) {
679 	case maple_arange_64:
680 		return node->ma64.pivot[piv];
681 	case maple_range_64:
682 	case maple_leaf_64:
683 		return node->mr64.pivot[piv];
684 	case maple_dense:
685 		return 0;
686 	}
687 	return 0;
688 }
689 
690 /*
691  * mas_safe_pivot() - get the pivot at @piv or mas->max.
692  * @mas: The maple state
693  * @pivots: The pointer to the maple node pivots
694  * @piv: The pivot to fetch
695  * @type: The maple node type
696  *
697  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
698  * otherwise.
699  */
700 static inline unsigned long
701 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
702 	       unsigned char piv, enum maple_type type)
703 {
704 	if (piv >= mt_pivots[type])
705 		return mas->max;
706 
707 	return pivots[piv];
708 }
709 
710 /*
711  * mas_safe_min() - Return the minimum for a given offset.
712  * @mas: The maple state
713  * @pivots: The pointer to the maple node pivots
714  * @offset: The offset into the pivot array
715  *
716  * Return: The minimum range value that is contained in @offset.
717  */
718 static inline unsigned long
719 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
720 {
721 	if (likely(offset))
722 		return pivots[offset - 1] + 1;
723 
724 	return mas->min;
725 }
726 
727 /*
728  * mas_logical_pivot() - Get the logical pivot of a given offset.
729  * @mas: The maple state
730  * @pivots: The pointer to the maple node pivots
731  * @offset: The offset into the pivot array
732  * @type: The maple node type
733  *
734  * When there is no value at a pivot (beyond the end of the data), then the
735  * pivot is actually @mas->max.
736  *
737  * Return: the logical pivot of a given @offset.
738  */
739 static inline unsigned long
740 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
741 		  unsigned char offset, enum maple_type type)
742 {
743 	unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
744 
745 	if (likely(lpiv))
746 		return lpiv;
747 
748 	if (likely(offset))
749 		return mas->max;
750 
751 	return lpiv;
752 }
753 
754 /*
755  * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
756  * @mn: The encoded maple node
757  * @piv: The pivot offset
758  * @val: The value of the pivot
759  */
760 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
761 				unsigned long val)
762 {
763 	struct maple_node *node = mte_to_node(mn);
764 	enum maple_type type = mte_node_type(mn);
765 
766 	BUG_ON(piv >= mt_pivots[type]);
767 	switch (type) {
768 	default:
769 	case maple_range_64:
770 	case maple_leaf_64:
771 		node->mr64.pivot[piv] = val;
772 		break;
773 	case maple_arange_64:
774 		node->ma64.pivot[piv] = val;
775 		break;
776 	case maple_dense:
777 		break;
778 	}
779 
780 }
781 
782 /*
783  * ma_slots() - Get a pointer to the maple node slots.
784  * @mn: The maple node
785  * @mt: The maple node type
786  *
787  * Return: A pointer to the maple node slots
788  */
789 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
790 {
791 	switch (mt) {
792 	default:
793 	case maple_arange_64:
794 		return mn->ma64.slot;
795 	case maple_range_64:
796 	case maple_leaf_64:
797 		return mn->mr64.slot;
798 	case maple_dense:
799 		return mn->slot;
800 	}
801 }
802 
803 static inline bool mt_locked(const struct maple_tree *mt)
804 {
805 	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
806 		lockdep_is_held(&mt->ma_lock);
807 }
808 
809 static inline void *mt_slot(const struct maple_tree *mt,
810 		void __rcu **slots, unsigned char offset)
811 {
812 	return rcu_dereference_check(slots[offset], mt_locked(mt));
813 }
814 
815 /*
816  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
817  * @mas: The maple state
818  * @slots: The pointer to the slots
819  * @offset: The offset into the slots array to fetch
820  *
821  * Return: The entry stored in @slots at the @offset.
822  */
823 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
824 				       unsigned char offset)
825 {
826 	return rcu_dereference_protected(slots[offset], mt_locked(mas->tree));
827 }
828 
829 /*
830  * mas_slot() - Get the slot value when not holding the maple tree lock.
831  * @mas: The maple state
832  * @slots: The pointer to the slots
833  * @offset: The offset into the slots array to fetch
834  *
835  * Return: The entry stored in @slots at the @offset
836  */
837 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
838 			     unsigned char offset)
839 {
840 	return mt_slot(mas->tree, slots, offset);
841 }
842 
843 /*
844  * mas_root() - Get the maple tree root.
845  * @mas: The maple state.
846  *
847  * Return: The pointer to the root of the tree
848  */
849 static inline void *mas_root(struct ma_state *mas)
850 {
851 	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
852 }
853 
854 static inline void *mt_root_locked(struct maple_tree *mt)
855 {
856 	return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
857 }
858 
859 /*
860  * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
861  * @mas: The maple state.
862  *
863  * Return: The pointer to the root of the tree
864  */
865 static inline void *mas_root_locked(struct ma_state *mas)
866 {
867 	return mt_root_locked(mas->tree);
868 }
869 
870 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
871 					     enum maple_type mt)
872 {
873 	switch (mt) {
874 	case maple_arange_64:
875 		return &mn->ma64.meta;
876 	default:
877 		return &mn->mr64.meta;
878 	}
879 }
880 
881 /*
882  * ma_set_meta() - Set the metadata information of a node.
883  * @mn: The maple node
884  * @mt: The maple node type
885  * @offset: The offset of the highest sub-gap in this node.
886  * @end: The end of the data in this node.
887  */
888 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
889 			       unsigned char offset, unsigned char end)
890 {
891 	struct maple_metadata *meta = ma_meta(mn, mt);
892 
893 	meta->gap = offset;
894 	meta->end = end;
895 }
896 
897 /*
898  * ma_meta_end() - Get the data end of a node from the metadata
899  * @mn: The maple node
900  * @mt: The maple node type
901  */
902 static inline unsigned char ma_meta_end(struct maple_node *mn,
903 					enum maple_type mt)
904 {
905 	struct maple_metadata *meta = ma_meta(mn, mt);
906 
907 	return meta->end;
908 }
909 
910 /*
911  * ma_meta_gap() - Get the largest gap location of a node from the metadata
912  * @mn: The maple node
913  * @mt: The maple node type
914  */
915 static inline unsigned char ma_meta_gap(struct maple_node *mn,
916 					enum maple_type mt)
917 {
918 	BUG_ON(mt != maple_arange_64);
919 
920 	return mn->ma64.meta.gap;
921 }
922 
923 /*
924  * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
925  * @mn: The maple node
926  * @mn: The maple node type
927  * @offset: The location of the largest gap.
928  */
929 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
930 				   unsigned char offset)
931 {
932 
933 	struct maple_metadata *meta = ma_meta(mn, mt);
934 
935 	meta->gap = offset;
936 }
937 
938 /*
939  * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
940  * @mat - the ma_topiary, a linked list of dead nodes.
941  * @dead_enode - the node to be marked as dead and added to the tail of the list
942  *
943  * Add the @dead_enode to the linked list in @mat.
944  */
945 static inline void mat_add(struct ma_topiary *mat,
946 			   struct maple_enode *dead_enode)
947 {
948 	mte_set_node_dead(dead_enode);
949 	mte_to_mat(dead_enode)->next = NULL;
950 	if (!mat->tail) {
951 		mat->tail = mat->head = dead_enode;
952 		return;
953 	}
954 
955 	mte_to_mat(mat->tail)->next = dead_enode;
956 	mat->tail = dead_enode;
957 }
958 
959 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
960 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
961 
962 /*
963  * mas_mat_free() - Free all nodes in a dead list.
964  * @mas - the maple state
965  * @mat - the ma_topiary linked list of dead nodes to free.
966  *
967  * Free walk a dead list.
968  */
969 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
970 {
971 	struct maple_enode *next;
972 
973 	while (mat->head) {
974 		next = mte_to_mat(mat->head)->next;
975 		mas_free(mas, mat->head);
976 		mat->head = next;
977 	}
978 }
979 
980 /*
981  * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
982  * @mas - the maple state
983  * @mat - the ma_topiary linked list of dead nodes to free.
984  *
985  * Destroy walk a dead list.
986  */
987 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
988 {
989 	struct maple_enode *next;
990 
991 	while (mat->head) {
992 		next = mte_to_mat(mat->head)->next;
993 		mte_destroy_walk(mat->head, mat->mtree);
994 		mat->head = next;
995 	}
996 }
997 /*
998  * mas_descend() - Descend into the slot stored in the ma_state.
999  * @mas - the maple state.
1000  *
1001  * Note: Not RCU safe, only use in write side or debug code.
1002  */
1003 static inline void mas_descend(struct ma_state *mas)
1004 {
1005 	enum maple_type type;
1006 	unsigned long *pivots;
1007 	struct maple_node *node;
1008 	void __rcu **slots;
1009 
1010 	node = mas_mn(mas);
1011 	type = mte_node_type(mas->node);
1012 	pivots = ma_pivots(node, type);
1013 	slots = ma_slots(node, type);
1014 
1015 	if (mas->offset)
1016 		mas->min = pivots[mas->offset - 1] + 1;
1017 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1018 	mas->node = mas_slot(mas, slots, mas->offset);
1019 }
1020 
1021 /*
1022  * mte_set_gap() - Set a maple node gap.
1023  * @mn: The encoded maple node
1024  * @gap: The offset of the gap to set
1025  * @val: The gap value
1026  */
1027 static inline void mte_set_gap(const struct maple_enode *mn,
1028 				 unsigned char gap, unsigned long val)
1029 {
1030 	switch (mte_node_type(mn)) {
1031 	default:
1032 		break;
1033 	case maple_arange_64:
1034 		mte_to_node(mn)->ma64.gap[gap] = val;
1035 		break;
1036 	}
1037 }
1038 
1039 /*
1040  * mas_ascend() - Walk up a level of the tree.
1041  * @mas: The maple state
1042  *
1043  * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1044  * may cause several levels of walking up to find the correct min and max.
1045  * May find a dead node which will cause a premature return.
1046  * Return: 1 on dead node, 0 otherwise
1047  */
1048 static int mas_ascend(struct ma_state *mas)
1049 {
1050 	struct maple_enode *p_enode; /* parent enode. */
1051 	struct maple_enode *a_enode; /* ancestor enode. */
1052 	struct maple_node *a_node; /* ancestor node. */
1053 	struct maple_node *p_node; /* parent node. */
1054 	unsigned char a_slot;
1055 	enum maple_type a_type;
1056 	unsigned long min, max;
1057 	unsigned long *pivots;
1058 	unsigned char offset;
1059 	bool set_max = false, set_min = false;
1060 
1061 	a_node = mas_mn(mas);
1062 	if (ma_is_root(a_node)) {
1063 		mas->offset = 0;
1064 		return 0;
1065 	}
1066 
1067 	p_node = mte_parent(mas->node);
1068 	if (unlikely(a_node == p_node))
1069 		return 1;
1070 	a_type = mas_parent_enum(mas, mas->node);
1071 	offset = mte_parent_slot(mas->node);
1072 	a_enode = mt_mk_node(p_node, a_type);
1073 
1074 	/* Check to make sure all parent information is still accurate */
1075 	if (p_node != mte_parent(mas->node))
1076 		return 1;
1077 
1078 	mas->node = a_enode;
1079 	mas->offset = offset;
1080 
1081 	if (mte_is_root(a_enode)) {
1082 		mas->max = ULONG_MAX;
1083 		mas->min = 0;
1084 		return 0;
1085 	}
1086 
1087 	min = 0;
1088 	max = ULONG_MAX;
1089 	do {
1090 		p_enode = a_enode;
1091 		a_type = mas_parent_enum(mas, p_enode);
1092 		a_node = mte_parent(p_enode);
1093 		a_slot = mte_parent_slot(p_enode);
1094 		pivots = ma_pivots(a_node, a_type);
1095 		a_enode = mt_mk_node(a_node, a_type);
1096 
1097 		if (!set_min && a_slot) {
1098 			set_min = true;
1099 			min = pivots[a_slot - 1] + 1;
1100 		}
1101 
1102 		if (!set_max && a_slot < mt_pivots[a_type]) {
1103 			set_max = true;
1104 			max = pivots[a_slot];
1105 		}
1106 
1107 		if (unlikely(ma_dead_node(a_node)))
1108 			return 1;
1109 
1110 		if (unlikely(ma_is_root(a_node)))
1111 			break;
1112 
1113 	} while (!set_min || !set_max);
1114 
1115 	mas->max = max;
1116 	mas->min = min;
1117 	return 0;
1118 }
1119 
1120 /*
1121  * mas_pop_node() - Get a previously allocated maple node from the maple state.
1122  * @mas: The maple state
1123  *
1124  * Return: A pointer to a maple node.
1125  */
1126 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1127 {
1128 	struct maple_alloc *ret, *node = mas->alloc;
1129 	unsigned long total = mas_allocated(mas);
1130 
1131 	/* nothing or a request pending. */
1132 	if (unlikely(!total))
1133 		return NULL;
1134 
1135 	if (total == 1) {
1136 		/* single allocation in this ma_state */
1137 		mas->alloc = NULL;
1138 		ret = node;
1139 		goto single_node;
1140 	}
1141 
1142 	if (!node->node_count) {
1143 		/* Single allocation in this node. */
1144 		mas->alloc = node->slot[0];
1145 		node->slot[0] = NULL;
1146 		mas->alloc->total = node->total - 1;
1147 		ret = node;
1148 		goto new_head;
1149 	}
1150 
1151 	node->total--;
1152 	ret = node->slot[node->node_count];
1153 	node->slot[node->node_count--] = NULL;
1154 
1155 single_node:
1156 new_head:
1157 	ret->total = 0;
1158 	ret->node_count = 0;
1159 	if (ret->request_count) {
1160 		mas_set_alloc_req(mas, ret->request_count + 1);
1161 		ret->request_count = 0;
1162 	}
1163 	return (struct maple_node *)ret;
1164 }
1165 
1166 /*
1167  * mas_push_node() - Push a node back on the maple state allocation.
1168  * @mas: The maple state
1169  * @used: The used maple node
1170  *
1171  * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1172  * requested node count as necessary.
1173  */
1174 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1175 {
1176 	struct maple_alloc *reuse = (struct maple_alloc *)used;
1177 	struct maple_alloc *head = mas->alloc;
1178 	unsigned long count;
1179 	unsigned int requested = mas_alloc_req(mas);
1180 
1181 	memset(reuse, 0, sizeof(*reuse));
1182 	count = mas_allocated(mas);
1183 
1184 	if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) {
1185 		if (head->slot[0])
1186 			head->node_count++;
1187 		head->slot[head->node_count] = reuse;
1188 		head->total++;
1189 		goto done;
1190 	}
1191 
1192 	reuse->total = 1;
1193 	if ((head) && !((unsigned long)head & 0x1)) {
1194 		head->request_count = 0;
1195 		reuse->slot[0] = head;
1196 		reuse->total += head->total;
1197 	}
1198 
1199 	mas->alloc = reuse;
1200 done:
1201 	if (requested > 1)
1202 		mas_set_alloc_req(mas, requested - 1);
1203 }
1204 
1205 /*
1206  * mas_alloc_nodes() - Allocate nodes into a maple state
1207  * @mas: The maple state
1208  * @gfp: The GFP Flags
1209  */
1210 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1211 {
1212 	struct maple_alloc *node;
1213 	unsigned long allocated = mas_allocated(mas);
1214 	unsigned long success = allocated;
1215 	unsigned int requested = mas_alloc_req(mas);
1216 	unsigned int count;
1217 	void **slots = NULL;
1218 	unsigned int max_req = 0;
1219 
1220 	if (!requested)
1221 		return;
1222 
1223 	mas_set_alloc_req(mas, 0);
1224 	if (mas->mas_flags & MA_STATE_PREALLOC) {
1225 		if (allocated)
1226 			return;
1227 		WARN_ON(!allocated);
1228 	}
1229 
1230 	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) {
1231 		node = (struct maple_alloc *)mt_alloc_one(gfp);
1232 		if (!node)
1233 			goto nomem_one;
1234 
1235 		if (allocated)
1236 			node->slot[0] = mas->alloc;
1237 
1238 		success++;
1239 		mas->alloc = node;
1240 		requested--;
1241 	}
1242 
1243 	node = mas->alloc;
1244 	while (requested) {
1245 		max_req = MAPLE_ALLOC_SLOTS;
1246 		if (node->slot[0]) {
1247 			unsigned int offset = node->node_count + 1;
1248 
1249 			slots = (void **)&node->slot[offset];
1250 			max_req -= offset;
1251 		} else {
1252 			slots = (void **)&node->slot;
1253 		}
1254 
1255 		max_req = min(requested, max_req);
1256 		count = mt_alloc_bulk(gfp, max_req, slots);
1257 		if (!count)
1258 			goto nomem_bulk;
1259 
1260 		node->node_count += count;
1261 		/* zero indexed. */
1262 		if (slots == (void **)&node->slot)
1263 			node->node_count--;
1264 
1265 		success += count;
1266 		node = node->slot[0];
1267 		requested -= count;
1268 	}
1269 	mas->alloc->total = success;
1270 	return;
1271 
1272 nomem_bulk:
1273 	/* Clean up potential freed allocations on bulk failure */
1274 	memset(slots, 0, max_req * sizeof(unsigned long));
1275 nomem_one:
1276 	mas_set_alloc_req(mas, requested);
1277 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1278 		mas->alloc->total = success;
1279 	mas_set_err(mas, -ENOMEM);
1280 	return;
1281 
1282 }
1283 
1284 /*
1285  * mas_free() - Free an encoded maple node
1286  * @mas: The maple state
1287  * @used: The encoded maple node to free.
1288  *
1289  * Uses rcu free if necessary, pushes @used back on the maple state allocations
1290  * otherwise.
1291  */
1292 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1293 {
1294 	struct maple_node *tmp = mte_to_node(used);
1295 
1296 	if (mt_in_rcu(mas->tree))
1297 		ma_free_rcu(tmp);
1298 	else
1299 		mas_push_node(mas, tmp);
1300 }
1301 
1302 /*
1303  * mas_node_count() - Check if enough nodes are allocated and request more if
1304  * there is not enough nodes.
1305  * @mas: The maple state
1306  * @count: The number of nodes needed
1307  * @gfp: the gfp flags
1308  */
1309 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1310 {
1311 	unsigned long allocated = mas_allocated(mas);
1312 
1313 	if (allocated < count) {
1314 		mas_set_alloc_req(mas, count - allocated);
1315 		mas_alloc_nodes(mas, gfp);
1316 	}
1317 }
1318 
1319 /*
1320  * mas_node_count() - Check if enough nodes are allocated and request more if
1321  * there is not enough nodes.
1322  * @mas: The maple state
1323  * @count: The number of nodes needed
1324  *
1325  * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1326  */
1327 static void mas_node_count(struct ma_state *mas, int count)
1328 {
1329 	return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1330 }
1331 
1332 /*
1333  * mas_start() - Sets up maple state for operations.
1334  * @mas: The maple state.
1335  *
1336  * If mas->node == MAS_START, then set the min, max, depth, and offset to
1337  * defaults.
1338  *
1339  * Return:
1340  * - If mas->node is an error or not MAS_START, return NULL.
1341  * - If it's an empty tree:     NULL & mas->node == MAS_NONE
1342  * - If it's a single entry:    The entry & mas->node == MAS_ROOT
1343  * - If it's a tree:            NULL & mas->node == safe root node.
1344  */
1345 static inline struct maple_enode *mas_start(struct ma_state *mas)
1346 {
1347 	if (likely(mas_is_start(mas))) {
1348 		struct maple_enode *root;
1349 
1350 		mas->node = MAS_NONE;
1351 		mas->min = 0;
1352 		mas->max = ULONG_MAX;
1353 		mas->depth = 0;
1354 		mas->offset = 0;
1355 
1356 		root = mas_root(mas);
1357 		/* Tree with nodes */
1358 		if (likely(xa_is_node(root))) {
1359 			mas->depth = 1;
1360 			mas->node = mte_safe_root(root);
1361 			return NULL;
1362 		}
1363 
1364 		/* empty tree */
1365 		if (unlikely(!root)) {
1366 			mas->offset = MAPLE_NODE_SLOTS;
1367 			return NULL;
1368 		}
1369 
1370 		/* Single entry tree */
1371 		mas->node = MAS_ROOT;
1372 		mas->offset = MAPLE_NODE_SLOTS;
1373 
1374 		/* Single entry tree. */
1375 		if (mas->index > 0)
1376 			return NULL;
1377 
1378 		return root;
1379 	}
1380 
1381 	return NULL;
1382 }
1383 
1384 /*
1385  * ma_data_end() - Find the end of the data in a node.
1386  * @node: The maple node
1387  * @type: The maple node type
1388  * @pivots: The array of pivots in the node
1389  * @max: The maximum value in the node
1390  *
1391  * Uses metadata to find the end of the data when possible.
1392  * Return: The zero indexed last slot with data (may be null).
1393  */
1394 static inline unsigned char ma_data_end(struct maple_node *node,
1395 					enum maple_type type,
1396 					unsigned long *pivots,
1397 					unsigned long max)
1398 {
1399 	unsigned char offset;
1400 
1401 	if (type == maple_arange_64)
1402 		return ma_meta_end(node, type);
1403 
1404 	offset = mt_pivots[type] - 1;
1405 	if (likely(!pivots[offset]))
1406 		return ma_meta_end(node, type);
1407 
1408 	if (likely(pivots[offset] == max))
1409 		return offset;
1410 
1411 	return mt_pivots[type];
1412 }
1413 
1414 /*
1415  * mas_data_end() - Find the end of the data (slot).
1416  * @mas: the maple state
1417  *
1418  * This method is optimized to check the metadata of a node if the node type
1419  * supports data end metadata.
1420  *
1421  * Return: The zero indexed last slot with data (may be null).
1422  */
1423 static inline unsigned char mas_data_end(struct ma_state *mas)
1424 {
1425 	enum maple_type type;
1426 	struct maple_node *node;
1427 	unsigned char offset;
1428 	unsigned long *pivots;
1429 
1430 	type = mte_node_type(mas->node);
1431 	node = mas_mn(mas);
1432 	if (type == maple_arange_64)
1433 		return ma_meta_end(node, type);
1434 
1435 	pivots = ma_pivots(node, type);
1436 	offset = mt_pivots[type] - 1;
1437 	if (likely(!pivots[offset]))
1438 		return ma_meta_end(node, type);
1439 
1440 	if (likely(pivots[offset] == mas->max))
1441 		return offset;
1442 
1443 	return mt_pivots[type];
1444 }
1445 
1446 /*
1447  * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1448  * @mas - the maple state
1449  *
1450  * Return: The maximum gap in the leaf.
1451  */
1452 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1453 {
1454 	enum maple_type mt;
1455 	unsigned long pstart, gap, max_gap;
1456 	struct maple_node *mn;
1457 	unsigned long *pivots;
1458 	void __rcu **slots;
1459 	unsigned char i;
1460 	unsigned char max_piv;
1461 
1462 	mt = mte_node_type(mas->node);
1463 	mn = mas_mn(mas);
1464 	slots = ma_slots(mn, mt);
1465 	max_gap = 0;
1466 	if (unlikely(ma_is_dense(mt))) {
1467 		gap = 0;
1468 		for (i = 0; i < mt_slots[mt]; i++) {
1469 			if (slots[i]) {
1470 				if (gap > max_gap)
1471 					max_gap = gap;
1472 				gap = 0;
1473 			} else {
1474 				gap++;
1475 			}
1476 		}
1477 		if (gap > max_gap)
1478 			max_gap = gap;
1479 		return max_gap;
1480 	}
1481 
1482 	/*
1483 	 * Check the first implied pivot optimizes the loop below and slot 1 may
1484 	 * be skipped if there is a gap in slot 0.
1485 	 */
1486 	pivots = ma_pivots(mn, mt);
1487 	if (likely(!slots[0])) {
1488 		max_gap = pivots[0] - mas->min + 1;
1489 		i = 2;
1490 	} else {
1491 		i = 1;
1492 	}
1493 
1494 	/* reduce max_piv as the special case is checked before the loop */
1495 	max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1496 	/*
1497 	 * Check end implied pivot which can only be a gap on the right most
1498 	 * node.
1499 	 */
1500 	if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1501 		gap = ULONG_MAX - pivots[max_piv];
1502 		if (gap > max_gap)
1503 			max_gap = gap;
1504 	}
1505 
1506 	for (; i <= max_piv; i++) {
1507 		/* data == no gap. */
1508 		if (likely(slots[i]))
1509 			continue;
1510 
1511 		pstart = pivots[i - 1];
1512 		gap = pivots[i] - pstart;
1513 		if (gap > max_gap)
1514 			max_gap = gap;
1515 
1516 		/* There cannot be two gaps in a row. */
1517 		i++;
1518 	}
1519 	return max_gap;
1520 }
1521 
1522 /*
1523  * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1524  * @node: The maple node
1525  * @gaps: The pointer to the gaps
1526  * @mt: The maple node type
1527  * @*off: Pointer to store the offset location of the gap.
1528  *
1529  * Uses the metadata data end to scan backwards across set gaps.
1530  *
1531  * Return: The maximum gap value
1532  */
1533 static inline unsigned long
1534 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1535 	    unsigned char *off)
1536 {
1537 	unsigned char offset, i;
1538 	unsigned long max_gap = 0;
1539 
1540 	i = offset = ma_meta_end(node, mt);
1541 	do {
1542 		if (gaps[i] > max_gap) {
1543 			max_gap = gaps[i];
1544 			offset = i;
1545 		}
1546 	} while (i--);
1547 
1548 	*off = offset;
1549 	return max_gap;
1550 }
1551 
1552 /*
1553  * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1554  * @mas: The maple state.
1555  *
1556  * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1557  *
1558  * Return: The gap value.
1559  */
1560 static inline unsigned long mas_max_gap(struct ma_state *mas)
1561 {
1562 	unsigned long *gaps;
1563 	unsigned char offset;
1564 	enum maple_type mt;
1565 	struct maple_node *node;
1566 
1567 	mt = mte_node_type(mas->node);
1568 	if (ma_is_leaf(mt))
1569 		return mas_leaf_max_gap(mas);
1570 
1571 	node = mas_mn(mas);
1572 	offset = ma_meta_gap(node, mt);
1573 	if (offset == MAPLE_ARANGE64_META_MAX)
1574 		return 0;
1575 
1576 	gaps = ma_gaps(node, mt);
1577 	return gaps[offset];
1578 }
1579 
1580 /*
1581  * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1582  * @mas: The maple state
1583  * @offset: The gap offset in the parent to set
1584  * @new: The new gap value.
1585  *
1586  * Set the parent gap then continue to set the gap upwards, using the metadata
1587  * of the parent to see if it is necessary to check the node above.
1588  */
1589 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1590 		unsigned long new)
1591 {
1592 	unsigned long meta_gap = 0;
1593 	struct maple_node *pnode;
1594 	struct maple_enode *penode;
1595 	unsigned long *pgaps;
1596 	unsigned char meta_offset;
1597 	enum maple_type pmt;
1598 
1599 	pnode = mte_parent(mas->node);
1600 	pmt = mas_parent_enum(mas, mas->node);
1601 	penode = mt_mk_node(pnode, pmt);
1602 	pgaps = ma_gaps(pnode, pmt);
1603 
1604 ascend:
1605 	meta_offset = ma_meta_gap(pnode, pmt);
1606 	if (meta_offset == MAPLE_ARANGE64_META_MAX)
1607 		meta_gap = 0;
1608 	else
1609 		meta_gap = pgaps[meta_offset];
1610 
1611 	pgaps[offset] = new;
1612 
1613 	if (meta_gap == new)
1614 		return;
1615 
1616 	if (offset != meta_offset) {
1617 		if (meta_gap > new)
1618 			return;
1619 
1620 		ma_set_meta_gap(pnode, pmt, offset);
1621 	} else if (new < meta_gap) {
1622 		meta_offset = 15;
1623 		new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1624 		ma_set_meta_gap(pnode, pmt, meta_offset);
1625 	}
1626 
1627 	if (ma_is_root(pnode))
1628 		return;
1629 
1630 	/* Go to the parent node. */
1631 	pnode = mte_parent(penode);
1632 	pmt = mas_parent_enum(mas, penode);
1633 	pgaps = ma_gaps(pnode, pmt);
1634 	offset = mte_parent_slot(penode);
1635 	penode = mt_mk_node(pnode, pmt);
1636 	goto ascend;
1637 }
1638 
1639 /*
1640  * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1641  * @mas - the maple state.
1642  */
1643 static inline void mas_update_gap(struct ma_state *mas)
1644 {
1645 	unsigned char pslot;
1646 	unsigned long p_gap;
1647 	unsigned long max_gap;
1648 
1649 	if (!mt_is_alloc(mas->tree))
1650 		return;
1651 
1652 	if (mte_is_root(mas->node))
1653 		return;
1654 
1655 	max_gap = mas_max_gap(mas);
1656 
1657 	pslot = mte_parent_slot(mas->node);
1658 	p_gap = ma_gaps(mte_parent(mas->node),
1659 			mas_parent_enum(mas, mas->node))[pslot];
1660 
1661 	if (p_gap != max_gap)
1662 		mas_parent_gap(mas, pslot, max_gap);
1663 }
1664 
1665 /*
1666  * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1667  * @parent with the slot encoded.
1668  * @mas - the maple state (for the tree)
1669  * @parent - the maple encoded node containing the children.
1670  */
1671 static inline void mas_adopt_children(struct ma_state *mas,
1672 		struct maple_enode *parent)
1673 {
1674 	enum maple_type type = mte_node_type(parent);
1675 	struct maple_node *node = mas_mn(mas);
1676 	void __rcu **slots = ma_slots(node, type);
1677 	unsigned long *pivots = ma_pivots(node, type);
1678 	struct maple_enode *child;
1679 	unsigned char offset;
1680 
1681 	offset = ma_data_end(node, type, pivots, mas->max);
1682 	do {
1683 		child = mas_slot_locked(mas, slots, offset);
1684 		mte_set_parent(child, parent, offset);
1685 	} while (offset--);
1686 }
1687 
1688 /*
1689  * mas_replace() - Replace a maple node in the tree with mas->node.  Uses the
1690  * parent encoding to locate the maple node in the tree.
1691  * @mas - the ma_state to use for operations.
1692  * @advanced - boolean to adopt the child nodes and free the old node (false) or
1693  * leave the node (true) and handle the adoption and free elsewhere.
1694  */
1695 static inline void mas_replace(struct ma_state *mas, bool advanced)
1696 	__must_hold(mas->tree->lock)
1697 {
1698 	struct maple_node *mn = mas_mn(mas);
1699 	struct maple_enode *old_enode;
1700 	unsigned char offset = 0;
1701 	void __rcu **slots = NULL;
1702 
1703 	if (ma_is_root(mn)) {
1704 		old_enode = mas_root_locked(mas);
1705 	} else {
1706 		offset = mte_parent_slot(mas->node);
1707 		slots = ma_slots(mte_parent(mas->node),
1708 				 mas_parent_enum(mas, mas->node));
1709 		old_enode = mas_slot_locked(mas, slots, offset);
1710 	}
1711 
1712 	if (!advanced && !mte_is_leaf(mas->node))
1713 		mas_adopt_children(mas, mas->node);
1714 
1715 	if (mte_is_root(mas->node)) {
1716 		mn->parent = ma_parent_ptr(
1717 			      ((unsigned long)mas->tree | MA_ROOT_PARENT));
1718 		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1719 		mas_set_height(mas);
1720 	} else {
1721 		rcu_assign_pointer(slots[offset], mas->node);
1722 	}
1723 
1724 	if (!advanced)
1725 		mas_free(mas, old_enode);
1726 }
1727 
1728 /*
1729  * mas_new_child() - Find the new child of a node.
1730  * @mas: the maple state
1731  * @child: the maple state to store the child.
1732  */
1733 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1734 	__must_hold(mas->tree->lock)
1735 {
1736 	enum maple_type mt;
1737 	unsigned char offset;
1738 	unsigned char end;
1739 	unsigned long *pivots;
1740 	struct maple_enode *entry;
1741 	struct maple_node *node;
1742 	void __rcu **slots;
1743 
1744 	mt = mte_node_type(mas->node);
1745 	node = mas_mn(mas);
1746 	slots = ma_slots(node, mt);
1747 	pivots = ma_pivots(node, mt);
1748 	end = ma_data_end(node, mt, pivots, mas->max);
1749 	for (offset = mas->offset; offset <= end; offset++) {
1750 		entry = mas_slot_locked(mas, slots, offset);
1751 		if (mte_parent(entry) == node) {
1752 			*child = *mas;
1753 			mas->offset = offset + 1;
1754 			child->offset = offset;
1755 			mas_descend(child);
1756 			child->offset = 0;
1757 			return true;
1758 		}
1759 	}
1760 	return false;
1761 }
1762 
1763 /*
1764  * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1765  * old data or set b_node->b_end.
1766  * @b_node: the maple_big_node
1767  * @shift: the shift count
1768  */
1769 static inline void mab_shift_right(struct maple_big_node *b_node,
1770 				 unsigned char shift)
1771 {
1772 	unsigned long size = b_node->b_end * sizeof(unsigned long);
1773 
1774 	memmove(b_node->pivot + shift, b_node->pivot, size);
1775 	memmove(b_node->slot + shift, b_node->slot, size);
1776 	if (b_node->type == maple_arange_64)
1777 		memmove(b_node->gap + shift, b_node->gap, size);
1778 }
1779 
1780 /*
1781  * mab_middle_node() - Check if a middle node is needed (unlikely)
1782  * @b_node: the maple_big_node that contains the data.
1783  * @size: the amount of data in the b_node
1784  * @split: the potential split location
1785  * @slot_count: the size that can be stored in a single node being considered.
1786  *
1787  * Return: true if a middle node is required.
1788  */
1789 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1790 				   unsigned char slot_count)
1791 {
1792 	unsigned char size = b_node->b_end;
1793 
1794 	if (size >= 2 * slot_count)
1795 		return true;
1796 
1797 	if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1798 		return true;
1799 
1800 	return false;
1801 }
1802 
1803 /*
1804  * mab_no_null_split() - ensure the split doesn't fall on a NULL
1805  * @b_node: the maple_big_node with the data
1806  * @split: the suggested split location
1807  * @slot_count: the number of slots in the node being considered.
1808  *
1809  * Return: the split location.
1810  */
1811 static inline int mab_no_null_split(struct maple_big_node *b_node,
1812 				    unsigned char split, unsigned char slot_count)
1813 {
1814 	if (!b_node->slot[split]) {
1815 		/*
1816 		 * If the split is less than the max slot && the right side will
1817 		 * still be sufficient, then increment the split on NULL.
1818 		 */
1819 		if ((split < slot_count - 1) &&
1820 		    (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1821 			split++;
1822 		else
1823 			split--;
1824 	}
1825 	return split;
1826 }
1827 
1828 /*
1829  * mab_calc_split() - Calculate the split location and if there needs to be two
1830  * splits.
1831  * @bn: The maple_big_node with the data
1832  * @mid_split: The second split, if required.  0 otherwise.
1833  *
1834  * Return: The first split location.  The middle split is set in @mid_split.
1835  */
1836 static inline int mab_calc_split(struct ma_state *mas,
1837 	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1838 {
1839 	unsigned char b_end = bn->b_end;
1840 	int split = b_end / 2; /* Assume equal split. */
1841 	unsigned char slot_min, slot_count = mt_slots[bn->type];
1842 
1843 	/*
1844 	 * To support gap tracking, all NULL entries are kept together and a node cannot
1845 	 * end on a NULL entry, with the exception of the left-most leaf.  The
1846 	 * limitation means that the split of a node must be checked for this condition
1847 	 * and be able to put more data in one direction or the other.
1848 	 */
1849 	if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1850 		*mid_split = 0;
1851 		split = b_end - mt_min_slots[bn->type];
1852 
1853 		if (!ma_is_leaf(bn->type))
1854 			return split;
1855 
1856 		mas->mas_flags |= MA_STATE_REBALANCE;
1857 		if (!bn->slot[split])
1858 			split--;
1859 		return split;
1860 	}
1861 
1862 	/*
1863 	 * Although extremely rare, it is possible to enter what is known as the 3-way
1864 	 * split scenario.  The 3-way split comes about by means of a store of a range
1865 	 * that overwrites the end and beginning of two full nodes.  The result is a set
1866 	 * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1867 	 * also be located in different parent nodes which are also full.  This can
1868 	 * carry upwards all the way to the root in the worst case.
1869 	 */
1870 	if (unlikely(mab_middle_node(bn, split, slot_count))) {
1871 		split = b_end / 3;
1872 		*mid_split = split * 2;
1873 	} else {
1874 		slot_min = mt_min_slots[bn->type];
1875 
1876 		*mid_split = 0;
1877 		/*
1878 		 * Avoid having a range less than the slot count unless it
1879 		 * causes one node to be deficient.
1880 		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1881 		 */
1882 		while (((bn->pivot[split] - min) < slot_count - 1) &&
1883 		       (split < slot_count - 1) && (b_end - split > slot_min))
1884 			split++;
1885 	}
1886 
1887 	/* Avoid ending a node on a NULL entry */
1888 	split = mab_no_null_split(bn, split, slot_count);
1889 	if (!(*mid_split))
1890 		return split;
1891 
1892 	*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1893 
1894 	return split;
1895 }
1896 
1897 /*
1898  * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1899  * and set @b_node->b_end to the next free slot.
1900  * @mas: The maple state
1901  * @mas_start: The starting slot to copy
1902  * @mas_end: The end slot to copy (inclusively)
1903  * @b_node: The maple_big_node to place the data
1904  * @mab_start: The starting location in maple_big_node to store the data.
1905  */
1906 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1907 			unsigned char mas_end, struct maple_big_node *b_node,
1908 			unsigned char mab_start)
1909 {
1910 	enum maple_type mt;
1911 	struct maple_node *node;
1912 	void __rcu **slots;
1913 	unsigned long *pivots, *gaps;
1914 	int i = mas_start, j = mab_start;
1915 	unsigned char piv_end;
1916 
1917 	node = mas_mn(mas);
1918 	mt = mte_node_type(mas->node);
1919 	pivots = ma_pivots(node, mt);
1920 	if (!i) {
1921 		b_node->pivot[j] = pivots[i++];
1922 		if (unlikely(i > mas_end))
1923 			goto complete;
1924 		j++;
1925 	}
1926 
1927 	piv_end = min(mas_end, mt_pivots[mt]);
1928 	for (; i < piv_end; i++, j++) {
1929 		b_node->pivot[j] = pivots[i];
1930 		if (unlikely(!b_node->pivot[j]))
1931 			break;
1932 
1933 		if (unlikely(mas->max == b_node->pivot[j]))
1934 			goto complete;
1935 	}
1936 
1937 	if (likely(i <= mas_end))
1938 		b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1939 
1940 complete:
1941 	b_node->b_end = ++j;
1942 	j -= mab_start;
1943 	slots = ma_slots(node, mt);
1944 	memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1945 	if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1946 		gaps = ma_gaps(node, mt);
1947 		memcpy(b_node->gap + mab_start, gaps + mas_start,
1948 		       sizeof(unsigned long) * j);
1949 	}
1950 }
1951 
1952 /*
1953  * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1954  * @mas: The maple state
1955  * @node: The maple node
1956  * @pivots: pointer to the maple node pivots
1957  * @mt: The maple type
1958  * @end: The assumed end
1959  *
1960  * Note, end may be incremented within this function but not modified at the
1961  * source.  This is fine since the metadata is the last thing to be stored in a
1962  * node during a write.
1963  */
1964 static inline void mas_leaf_set_meta(struct ma_state *mas,
1965 		struct maple_node *node, unsigned long *pivots,
1966 		enum maple_type mt, unsigned char end)
1967 {
1968 	/* There is no room for metadata already */
1969 	if (mt_pivots[mt] <= end)
1970 		return;
1971 
1972 	if (pivots[end] && pivots[end] < mas->max)
1973 		end++;
1974 
1975 	if (end < mt_slots[mt] - 1)
1976 		ma_set_meta(node, mt, 0, end);
1977 }
1978 
1979 /*
1980  * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
1981  * @b_node: the maple_big_node that has the data
1982  * @mab_start: the start location in @b_node.
1983  * @mab_end: The end location in @b_node (inclusively)
1984  * @mas: The maple state with the maple encoded node.
1985  */
1986 static inline void mab_mas_cp(struct maple_big_node *b_node,
1987 			      unsigned char mab_start, unsigned char mab_end,
1988 			      struct ma_state *mas, bool new_max)
1989 {
1990 	int i, j = 0;
1991 	enum maple_type mt = mte_node_type(mas->node);
1992 	struct maple_node *node = mte_to_node(mas->node);
1993 	void __rcu **slots = ma_slots(node, mt);
1994 	unsigned long *pivots = ma_pivots(node, mt);
1995 	unsigned long *gaps = NULL;
1996 	unsigned char end;
1997 
1998 	if (mab_end - mab_start > mt_pivots[mt])
1999 		mab_end--;
2000 
2001 	if (!pivots[mt_pivots[mt] - 1])
2002 		slots[mt_pivots[mt]] = NULL;
2003 
2004 	i = mab_start;
2005 	do {
2006 		pivots[j++] = b_node->pivot[i++];
2007 	} while (i <= mab_end && likely(b_node->pivot[i]));
2008 
2009 	memcpy(slots, b_node->slot + mab_start,
2010 	       sizeof(void *) * (i - mab_start));
2011 
2012 	if (new_max)
2013 		mas->max = b_node->pivot[i - 1];
2014 
2015 	end = j - 1;
2016 	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2017 		unsigned long max_gap = 0;
2018 		unsigned char offset = 15;
2019 
2020 		gaps = ma_gaps(node, mt);
2021 		do {
2022 			gaps[--j] = b_node->gap[--i];
2023 			if (gaps[j] > max_gap) {
2024 				offset = j;
2025 				max_gap = gaps[j];
2026 			}
2027 		} while (j);
2028 
2029 		ma_set_meta(node, mt, offset, end);
2030 	} else {
2031 		mas_leaf_set_meta(mas, node, pivots, mt, end);
2032 	}
2033 }
2034 
2035 /*
2036  * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2037  * @mas: the maple state with the maple encoded node of the sub-tree.
2038  *
2039  * Descend through a sub-tree and adopt children who do not have the correct
2040  * parents set.  Follow the parents which have the correct parents as they are
2041  * the new entries which need to be followed to find other incorrectly set
2042  * parents.
2043  */
2044 static inline void mas_descend_adopt(struct ma_state *mas)
2045 {
2046 	struct ma_state list[3], next[3];
2047 	int i, n;
2048 
2049 	/*
2050 	 * At each level there may be up to 3 correct parent pointers which indicates
2051 	 * the new nodes which need to be walked to find any new nodes at a lower level.
2052 	 */
2053 
2054 	for (i = 0; i < 3; i++) {
2055 		list[i] = *mas;
2056 		list[i].offset = 0;
2057 		next[i].offset = 0;
2058 	}
2059 	next[0] = *mas;
2060 
2061 	while (!mte_is_leaf(list[0].node)) {
2062 		n = 0;
2063 		for (i = 0; i < 3; i++) {
2064 			if (mas_is_none(&list[i]))
2065 				continue;
2066 
2067 			if (i && list[i-1].node == list[i].node)
2068 				continue;
2069 
2070 			while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2071 				n++;
2072 
2073 			mas_adopt_children(&list[i], list[i].node);
2074 		}
2075 
2076 		while (n < 3)
2077 			next[n++].node = MAS_NONE;
2078 
2079 		/* descend by setting the list to the children */
2080 		for (i = 0; i < 3; i++)
2081 			list[i] = next[i];
2082 	}
2083 }
2084 
2085 /*
2086  * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2087  * @mas: The maple state
2088  * @end: The maple node end
2089  * @mt: The maple node type
2090  */
2091 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2092 				      enum maple_type mt)
2093 {
2094 	if (!(mas->mas_flags & MA_STATE_BULK))
2095 		return;
2096 
2097 	if (mte_is_root(mas->node))
2098 		return;
2099 
2100 	if (end > mt_min_slots[mt]) {
2101 		mas->mas_flags &= ~MA_STATE_REBALANCE;
2102 		return;
2103 	}
2104 }
2105 
2106 /*
2107  * mas_store_b_node() - Store an @entry into the b_node while also copying the
2108  * data from a maple encoded node.
2109  * @wr_mas: the maple write state
2110  * @b_node: the maple_big_node to fill with data
2111  * @offset_end: the offset to end copying
2112  *
2113  * Return: The actual end of the data stored in @b_node
2114  */
2115 static inline void mas_store_b_node(struct ma_wr_state *wr_mas,
2116 		struct maple_big_node *b_node, unsigned char offset_end)
2117 {
2118 	unsigned char slot;
2119 	unsigned char b_end;
2120 	/* Possible underflow of piv will wrap back to 0 before use. */
2121 	unsigned long piv;
2122 	struct ma_state *mas = wr_mas->mas;
2123 
2124 	b_node->type = wr_mas->type;
2125 	b_end = 0;
2126 	slot = mas->offset;
2127 	if (slot) {
2128 		/* Copy start data up to insert. */
2129 		mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2130 		b_end = b_node->b_end;
2131 		piv = b_node->pivot[b_end - 1];
2132 	} else
2133 		piv = mas->min - 1;
2134 
2135 	if (piv + 1 < mas->index) {
2136 		/* Handle range starting after old range */
2137 		b_node->slot[b_end] = wr_mas->content;
2138 		if (!wr_mas->content)
2139 			b_node->gap[b_end] = mas->index - 1 - piv;
2140 		b_node->pivot[b_end++] = mas->index - 1;
2141 	}
2142 
2143 	/* Store the new entry. */
2144 	mas->offset = b_end;
2145 	b_node->slot[b_end] = wr_mas->entry;
2146 	b_node->pivot[b_end] = mas->last;
2147 
2148 	/* Appended. */
2149 	if (mas->last >= mas->max)
2150 		goto b_end;
2151 
2152 	/* Handle new range ending before old range ends */
2153 	piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2154 	if (piv > mas->last) {
2155 		if (piv == ULONG_MAX)
2156 			mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2157 
2158 		if (offset_end != slot)
2159 			wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2160 							  offset_end);
2161 
2162 		b_node->slot[++b_end] = wr_mas->content;
2163 		if (!wr_mas->content)
2164 			b_node->gap[b_end] = piv - mas->last + 1;
2165 		b_node->pivot[b_end] = piv;
2166 	}
2167 
2168 	slot = offset_end + 1;
2169 	if (slot > wr_mas->node_end)
2170 		goto b_end;
2171 
2172 	/* Copy end data to the end of the node. */
2173 	mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2174 	b_node->b_end--;
2175 	return;
2176 
2177 b_end:
2178 	b_node->b_end = b_end;
2179 }
2180 
2181 /*
2182  * mas_prev_sibling() - Find the previous node with the same parent.
2183  * @mas: the maple state
2184  *
2185  * Return: True if there is a previous sibling, false otherwise.
2186  */
2187 static inline bool mas_prev_sibling(struct ma_state *mas)
2188 {
2189 	unsigned int p_slot = mte_parent_slot(mas->node);
2190 
2191 	if (mte_is_root(mas->node))
2192 		return false;
2193 
2194 	if (!p_slot)
2195 		return false;
2196 
2197 	mas_ascend(mas);
2198 	mas->offset = p_slot - 1;
2199 	mas_descend(mas);
2200 	return true;
2201 }
2202 
2203 /*
2204  * mas_next_sibling() - Find the next node with the same parent.
2205  * @mas: the maple state
2206  *
2207  * Return: true if there is a next sibling, false otherwise.
2208  */
2209 static inline bool mas_next_sibling(struct ma_state *mas)
2210 {
2211 	MA_STATE(parent, mas->tree, mas->index, mas->last);
2212 
2213 	if (mte_is_root(mas->node))
2214 		return false;
2215 
2216 	parent = *mas;
2217 	mas_ascend(&parent);
2218 	parent.offset = mte_parent_slot(mas->node) + 1;
2219 	if (parent.offset > mas_data_end(&parent))
2220 		return false;
2221 
2222 	*mas = parent;
2223 	mas_descend(mas);
2224 	return true;
2225 }
2226 
2227 /*
2228  * mte_node_or_node() - Return the encoded node or MAS_NONE.
2229  * @enode: The encoded maple node.
2230  *
2231  * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2232  *
2233  * Return: @enode or MAS_NONE
2234  */
2235 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2236 {
2237 	if (enode)
2238 		return enode;
2239 
2240 	return ma_enode_ptr(MAS_NONE);
2241 }
2242 
2243 /*
2244  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2245  * @wr_mas: The maple write state
2246  *
2247  * Uses mas_slot_locked() and does not need to worry about dead nodes.
2248  */
2249 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2250 {
2251 	struct ma_state *mas = wr_mas->mas;
2252 	unsigned char count;
2253 	unsigned char offset;
2254 	unsigned long index, min, max;
2255 
2256 	if (unlikely(ma_is_dense(wr_mas->type))) {
2257 		wr_mas->r_max = wr_mas->r_min = mas->index;
2258 		mas->offset = mas->index = mas->min;
2259 		return;
2260 	}
2261 
2262 	wr_mas->node = mas_mn(wr_mas->mas);
2263 	wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2264 	count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2265 					       wr_mas->pivots, mas->max);
2266 	offset = mas->offset;
2267 	min = mas_safe_min(mas, wr_mas->pivots, offset);
2268 	if (unlikely(offset == count))
2269 		goto max;
2270 
2271 	max = wr_mas->pivots[offset];
2272 	index = mas->index;
2273 	if (unlikely(index <= max))
2274 		goto done;
2275 
2276 	if (unlikely(!max && offset))
2277 		goto max;
2278 
2279 	min = max + 1;
2280 	while (++offset < count) {
2281 		max = wr_mas->pivots[offset];
2282 		if (index <= max)
2283 			goto done;
2284 		else if (unlikely(!max))
2285 			break;
2286 
2287 		min = max + 1;
2288 	}
2289 
2290 max:
2291 	max = mas->max;
2292 done:
2293 	wr_mas->r_max = max;
2294 	wr_mas->r_min = min;
2295 	wr_mas->offset_end = mas->offset = offset;
2296 }
2297 
2298 /*
2299  * mas_topiary_range() - Add a range of slots to the topiary.
2300  * @mas: The maple state
2301  * @destroy: The topiary to add the slots (usually destroy)
2302  * @start: The starting slot inclusively
2303  * @end: The end slot inclusively
2304  */
2305 static inline void mas_topiary_range(struct ma_state *mas,
2306 	struct ma_topiary *destroy, unsigned char start, unsigned char end)
2307 {
2308 	void __rcu **slots;
2309 	unsigned char offset;
2310 
2311 	MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2312 	slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2313 	for (offset = start; offset <= end; offset++) {
2314 		struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2315 
2316 		if (mte_dead_node(enode))
2317 			continue;
2318 
2319 		mat_add(destroy, enode);
2320 	}
2321 }
2322 
2323 /*
2324  * mast_topiary() - Add the portions of the tree to the removal list; either to
2325  * be freed or discarded (destroy walk).
2326  * @mast: The maple_subtree_state.
2327  */
2328 static inline void mast_topiary(struct maple_subtree_state *mast)
2329 {
2330 	MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2331 	unsigned char r_start, r_end;
2332 	unsigned char l_start, l_end;
2333 	void __rcu **l_slots, **r_slots;
2334 
2335 	wr_mas.type = mte_node_type(mast->orig_l->node);
2336 	mast->orig_l->index = mast->orig_l->last;
2337 	mas_wr_node_walk(&wr_mas);
2338 	l_start = mast->orig_l->offset + 1;
2339 	l_end = mas_data_end(mast->orig_l);
2340 	r_start = 0;
2341 	r_end = mast->orig_r->offset;
2342 
2343 	if (r_end)
2344 		r_end--;
2345 
2346 	l_slots = ma_slots(mas_mn(mast->orig_l),
2347 			   mte_node_type(mast->orig_l->node));
2348 
2349 	r_slots = ma_slots(mas_mn(mast->orig_r),
2350 			   mte_node_type(mast->orig_r->node));
2351 
2352 	if ((l_start < l_end) &&
2353 	    mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2354 		l_start++;
2355 	}
2356 
2357 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2358 		if (r_end)
2359 			r_end--;
2360 	}
2361 
2362 	if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2363 		return;
2364 
2365 	/* At the node where left and right sides meet, add the parts between */
2366 	if (mast->orig_l->node == mast->orig_r->node) {
2367 		return mas_topiary_range(mast->orig_l, mast->destroy,
2368 					     l_start, r_end);
2369 	}
2370 
2371 	/* mast->orig_r is different and consumed. */
2372 	if (mte_is_leaf(mast->orig_r->node))
2373 		return;
2374 
2375 	if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2376 		l_end--;
2377 
2378 
2379 	if (l_start <= l_end)
2380 		mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2381 
2382 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2383 		r_start++;
2384 
2385 	if (r_start <= r_end)
2386 		mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2387 }
2388 
2389 /*
2390  * mast_rebalance_next() - Rebalance against the next node
2391  * @mast: The maple subtree state
2392  * @old_r: The encoded maple node to the right (next node).
2393  */
2394 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2395 {
2396 	unsigned char b_end = mast->bn->b_end;
2397 
2398 	mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2399 		   mast->bn, b_end);
2400 	mast->orig_r->last = mast->orig_r->max;
2401 }
2402 
2403 /*
2404  * mast_rebalance_prev() - Rebalance against the previous node
2405  * @mast: The maple subtree state
2406  * @old_l: The encoded maple node to the left (previous node)
2407  */
2408 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2409 {
2410 	unsigned char end = mas_data_end(mast->orig_l) + 1;
2411 	unsigned char b_end = mast->bn->b_end;
2412 
2413 	mab_shift_right(mast->bn, end);
2414 	mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2415 	mast->l->min = mast->orig_l->min;
2416 	mast->orig_l->index = mast->orig_l->min;
2417 	mast->bn->b_end = end + b_end;
2418 	mast->l->offset += end;
2419 }
2420 
2421 /*
2422  * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2423  * the node to the right.  Checking the nodes to the right then the left at each
2424  * level upwards until root is reached.  Free and destroy as needed.
2425  * Data is copied into the @mast->bn.
2426  * @mast: The maple_subtree_state.
2427  */
2428 static inline
2429 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2430 {
2431 	struct ma_state r_tmp = *mast->orig_r;
2432 	struct ma_state l_tmp = *mast->orig_l;
2433 	struct maple_enode *ancestor = NULL;
2434 	unsigned char start, end;
2435 	unsigned char depth = 0;
2436 
2437 	r_tmp = *mast->orig_r;
2438 	l_tmp = *mast->orig_l;
2439 	do {
2440 		mas_ascend(mast->orig_r);
2441 		mas_ascend(mast->orig_l);
2442 		depth++;
2443 		if (!ancestor &&
2444 		    (mast->orig_r->node == mast->orig_l->node)) {
2445 			ancestor = mast->orig_r->node;
2446 			end = mast->orig_r->offset - 1;
2447 			start = mast->orig_l->offset + 1;
2448 		}
2449 
2450 		if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2451 			if (!ancestor) {
2452 				ancestor = mast->orig_r->node;
2453 				start = 0;
2454 			}
2455 
2456 			mast->orig_r->offset++;
2457 			do {
2458 				mas_descend(mast->orig_r);
2459 				mast->orig_r->offset = 0;
2460 				depth--;
2461 			} while (depth);
2462 
2463 			mast_rebalance_next(mast);
2464 			do {
2465 				unsigned char l_off = 0;
2466 				struct maple_enode *child = r_tmp.node;
2467 
2468 				mas_ascend(&r_tmp);
2469 				if (ancestor == r_tmp.node)
2470 					l_off = start;
2471 
2472 				if (r_tmp.offset)
2473 					r_tmp.offset--;
2474 
2475 				if (l_off < r_tmp.offset)
2476 					mas_topiary_range(&r_tmp, mast->destroy,
2477 							  l_off, r_tmp.offset);
2478 
2479 				if (l_tmp.node != child)
2480 					mat_add(mast->free, child);
2481 
2482 			} while (r_tmp.node != ancestor);
2483 
2484 			*mast->orig_l = l_tmp;
2485 			return true;
2486 
2487 		} else if (mast->orig_l->offset != 0) {
2488 			if (!ancestor) {
2489 				ancestor = mast->orig_l->node;
2490 				end = mas_data_end(mast->orig_l);
2491 			}
2492 
2493 			mast->orig_l->offset--;
2494 			do {
2495 				mas_descend(mast->orig_l);
2496 				mast->orig_l->offset =
2497 					mas_data_end(mast->orig_l);
2498 				depth--;
2499 			} while (depth);
2500 
2501 			mast_rebalance_prev(mast);
2502 			do {
2503 				unsigned char r_off;
2504 				struct maple_enode *child = l_tmp.node;
2505 
2506 				mas_ascend(&l_tmp);
2507 				if (ancestor == l_tmp.node)
2508 					r_off = end;
2509 				else
2510 					r_off = mas_data_end(&l_tmp);
2511 
2512 				if (l_tmp.offset < r_off)
2513 					l_tmp.offset++;
2514 
2515 				if (l_tmp.offset < r_off)
2516 					mas_topiary_range(&l_tmp, mast->destroy,
2517 							  l_tmp.offset, r_off);
2518 
2519 				if (r_tmp.node != child)
2520 					mat_add(mast->free, child);
2521 
2522 			} while (l_tmp.node != ancestor);
2523 
2524 			*mast->orig_r = r_tmp;
2525 			return true;
2526 		}
2527 	} while (!mte_is_root(mast->orig_r->node));
2528 
2529 	*mast->orig_r = r_tmp;
2530 	*mast->orig_l = l_tmp;
2531 	return false;
2532 }
2533 
2534 /*
2535  * mast_ascend_free() - Add current original maple state nodes to the free list
2536  * and ascend.
2537  * @mast: the maple subtree state.
2538  *
2539  * Ascend the original left and right sides and add the previous nodes to the
2540  * free list.  Set the slots to point to the correct location in the new nodes.
2541  */
2542 static inline void
2543 mast_ascend_free(struct maple_subtree_state *mast)
2544 {
2545 	MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2546 	struct maple_enode *left = mast->orig_l->node;
2547 	struct maple_enode *right = mast->orig_r->node;
2548 
2549 	mas_ascend(mast->orig_l);
2550 	mas_ascend(mast->orig_r);
2551 	mat_add(mast->free, left);
2552 
2553 	if (left != right)
2554 		mat_add(mast->free, right);
2555 
2556 	mast->orig_r->offset = 0;
2557 	mast->orig_r->index = mast->r->max;
2558 	/* last should be larger than or equal to index */
2559 	if (mast->orig_r->last < mast->orig_r->index)
2560 		mast->orig_r->last = mast->orig_r->index;
2561 	/*
2562 	 * The node may not contain the value so set slot to ensure all
2563 	 * of the nodes contents are freed or destroyed.
2564 	 */
2565 	wr_mas.type = mte_node_type(mast->orig_r->node);
2566 	mas_wr_node_walk(&wr_mas);
2567 	/* Set up the left side of things */
2568 	mast->orig_l->offset = 0;
2569 	mast->orig_l->index = mast->l->min;
2570 	wr_mas.mas = mast->orig_l;
2571 	wr_mas.type = mte_node_type(mast->orig_l->node);
2572 	mas_wr_node_walk(&wr_mas);
2573 
2574 	mast->bn->type = wr_mas.type;
2575 }
2576 
2577 /*
2578  * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2579  * @mas: the maple state with the allocations.
2580  * @b_node: the maple_big_node with the type encoding.
2581  *
2582  * Use the node type from the maple_big_node to allocate a new node from the
2583  * ma_state.  This function exists mainly for code readability.
2584  *
2585  * Return: A new maple encoded node
2586  */
2587 static inline struct maple_enode
2588 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2589 {
2590 	return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2591 }
2592 
2593 /*
2594  * mas_mab_to_node() - Set up right and middle nodes
2595  *
2596  * @mas: the maple state that contains the allocations.
2597  * @b_node: the node which contains the data.
2598  * @left: The pointer which will have the left node
2599  * @right: The pointer which may have the right node
2600  * @middle: the pointer which may have the middle node (rare)
2601  * @mid_split: the split location for the middle node
2602  *
2603  * Return: the split of left.
2604  */
2605 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2606 	struct maple_big_node *b_node, struct maple_enode **left,
2607 	struct maple_enode **right, struct maple_enode **middle,
2608 	unsigned char *mid_split, unsigned long min)
2609 {
2610 	unsigned char split = 0;
2611 	unsigned char slot_count = mt_slots[b_node->type];
2612 
2613 	*left = mas_new_ma_node(mas, b_node);
2614 	*right = NULL;
2615 	*middle = NULL;
2616 	*mid_split = 0;
2617 
2618 	if (b_node->b_end < slot_count) {
2619 		split = b_node->b_end;
2620 	} else {
2621 		split = mab_calc_split(mas, b_node, mid_split, min);
2622 		*right = mas_new_ma_node(mas, b_node);
2623 	}
2624 
2625 	if (*mid_split)
2626 		*middle = mas_new_ma_node(mas, b_node);
2627 
2628 	return split;
2629 
2630 }
2631 
2632 /*
2633  * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2634  * pointer.
2635  * @b_node - the big node to add the entry
2636  * @mas - the maple state to get the pivot (mas->max)
2637  * @entry - the entry to add, if NULL nothing happens.
2638  */
2639 static inline void mab_set_b_end(struct maple_big_node *b_node,
2640 				 struct ma_state *mas,
2641 				 void *entry)
2642 {
2643 	if (!entry)
2644 		return;
2645 
2646 	b_node->slot[b_node->b_end] = entry;
2647 	if (mt_is_alloc(mas->tree))
2648 		b_node->gap[b_node->b_end] = mas_max_gap(mas);
2649 	b_node->pivot[b_node->b_end++] = mas->max;
2650 }
2651 
2652 /*
2653  * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2654  * of @mas->node to either @left or @right, depending on @slot and @split
2655  *
2656  * @mas - the maple state with the node that needs a parent
2657  * @left - possible parent 1
2658  * @right - possible parent 2
2659  * @slot - the slot the mas->node was placed
2660  * @split - the split location between @left and @right
2661  */
2662 static inline void mas_set_split_parent(struct ma_state *mas,
2663 					struct maple_enode *left,
2664 					struct maple_enode *right,
2665 					unsigned char *slot, unsigned char split)
2666 {
2667 	if (mas_is_none(mas))
2668 		return;
2669 
2670 	if ((*slot) <= split)
2671 		mte_set_parent(mas->node, left, *slot);
2672 	else if (right)
2673 		mte_set_parent(mas->node, right, (*slot) - split - 1);
2674 
2675 	(*slot)++;
2676 }
2677 
2678 /*
2679  * mte_mid_split_check() - Check if the next node passes the mid-split
2680  * @**l: Pointer to left encoded maple node.
2681  * @**m: Pointer to middle encoded maple node.
2682  * @**r: Pointer to right encoded maple node.
2683  * @slot: The offset
2684  * @*split: The split location.
2685  * @mid_split: The middle split.
2686  */
2687 static inline void mte_mid_split_check(struct maple_enode **l,
2688 				       struct maple_enode **r,
2689 				       struct maple_enode *right,
2690 				       unsigned char slot,
2691 				       unsigned char *split,
2692 				       unsigned char mid_split)
2693 {
2694 	if (*r == right)
2695 		return;
2696 
2697 	if (slot < mid_split)
2698 		return;
2699 
2700 	*l = *r;
2701 	*r = right;
2702 	*split = mid_split;
2703 }
2704 
2705 /*
2706  * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2707  * is taken from @mast->l.
2708  * @mast - the maple subtree state
2709  * @left - the left node
2710  * @right - the right node
2711  * @split - the split location.
2712  */
2713 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2714 					  struct maple_enode *left,
2715 					  struct maple_enode *middle,
2716 					  struct maple_enode *right,
2717 					  unsigned char split,
2718 					  unsigned char mid_split)
2719 {
2720 	unsigned char slot;
2721 	struct maple_enode *l = left;
2722 	struct maple_enode *r = right;
2723 
2724 	if (mas_is_none(mast->l))
2725 		return;
2726 
2727 	if (middle)
2728 		r = middle;
2729 
2730 	slot = mast->l->offset;
2731 
2732 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2733 	mas_set_split_parent(mast->l, l, r, &slot, split);
2734 
2735 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2736 	mas_set_split_parent(mast->m, l, r, &slot, split);
2737 
2738 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2739 	mas_set_split_parent(mast->r, l, r, &slot, split);
2740 }
2741 
2742 /*
2743  * mas_wmb_replace() - Write memory barrier and replace
2744  * @mas: The maple state
2745  * @free: the maple topiary list of nodes to free
2746  * @destroy: The maple topiary list of nodes to destroy (walk and free)
2747  *
2748  * Updates gap as necessary.
2749  */
2750 static inline void mas_wmb_replace(struct ma_state *mas,
2751 				   struct ma_topiary *free,
2752 				   struct ma_topiary *destroy)
2753 {
2754 	/* All nodes must see old data as dead prior to replacing that data */
2755 	smp_wmb(); /* Needed for RCU */
2756 
2757 	/* Insert the new data in the tree */
2758 	mas_replace(mas, true);
2759 
2760 	if (!mte_is_leaf(mas->node))
2761 		mas_descend_adopt(mas);
2762 
2763 	mas_mat_free(mas, free);
2764 
2765 	if (destroy)
2766 		mas_mat_destroy(mas, destroy);
2767 
2768 	if (mte_is_leaf(mas->node))
2769 		return;
2770 
2771 	mas_update_gap(mas);
2772 }
2773 
2774 /*
2775  * mast_new_root() - Set a new tree root during subtree creation
2776  * @mast: The maple subtree state
2777  * @mas: The maple state
2778  */
2779 static inline void mast_new_root(struct maple_subtree_state *mast,
2780 				 struct ma_state *mas)
2781 {
2782 	mas_mn(mast->l)->parent =
2783 		ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2784 	if (!mte_dead_node(mast->orig_l->node) &&
2785 	    !mte_is_root(mast->orig_l->node)) {
2786 		do {
2787 			mast_ascend_free(mast);
2788 			mast_topiary(mast);
2789 		} while (!mte_is_root(mast->orig_l->node));
2790 	}
2791 	if ((mast->orig_l->node != mas->node) &&
2792 		   (mast->l->depth > mas_mt_height(mas))) {
2793 		mat_add(mast->free, mas->node);
2794 	}
2795 }
2796 
2797 /*
2798  * mast_cp_to_nodes() - Copy data out to nodes.
2799  * @mast: The maple subtree state
2800  * @left: The left encoded maple node
2801  * @middle: The middle encoded maple node
2802  * @right: The right encoded maple node
2803  * @split: The location to split between left and (middle ? middle : right)
2804  * @mid_split: The location to split between middle and right.
2805  */
2806 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2807 	struct maple_enode *left, struct maple_enode *middle,
2808 	struct maple_enode *right, unsigned char split, unsigned char mid_split)
2809 {
2810 	bool new_lmax = true;
2811 
2812 	mast->l->node = mte_node_or_none(left);
2813 	mast->m->node = mte_node_or_none(middle);
2814 	mast->r->node = mte_node_or_none(right);
2815 
2816 	mast->l->min = mast->orig_l->min;
2817 	if (split == mast->bn->b_end) {
2818 		mast->l->max = mast->orig_r->max;
2819 		new_lmax = false;
2820 	}
2821 
2822 	mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2823 
2824 	if (middle) {
2825 		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2826 		mast->m->min = mast->bn->pivot[split] + 1;
2827 		split = mid_split;
2828 	}
2829 
2830 	mast->r->max = mast->orig_r->max;
2831 	if (right) {
2832 		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2833 		mast->r->min = mast->bn->pivot[split] + 1;
2834 	}
2835 }
2836 
2837 /*
2838  * mast_combine_cp_left - Copy in the original left side of the tree into the
2839  * combined data set in the maple subtree state big node.
2840  * @mast: The maple subtree state
2841  */
2842 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2843 {
2844 	unsigned char l_slot = mast->orig_l->offset;
2845 
2846 	if (!l_slot)
2847 		return;
2848 
2849 	mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2850 }
2851 
2852 /*
2853  * mast_combine_cp_right: Copy in the original right side of the tree into the
2854  * combined data set in the maple subtree state big node.
2855  * @mast: The maple subtree state
2856  */
2857 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2858 {
2859 	if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2860 		return;
2861 
2862 	mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2863 		   mt_slot_count(mast->orig_r->node), mast->bn,
2864 		   mast->bn->b_end);
2865 	mast->orig_r->last = mast->orig_r->max;
2866 }
2867 
2868 /*
2869  * mast_sufficient: Check if the maple subtree state has enough data in the big
2870  * node to create at least one sufficient node
2871  * @mast: the maple subtree state
2872  */
2873 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2874 {
2875 	if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2876 		return true;
2877 
2878 	return false;
2879 }
2880 
2881 /*
2882  * mast_overflow: Check if there is too much data in the subtree state for a
2883  * single node.
2884  * @mast: The maple subtree state
2885  */
2886 static inline bool mast_overflow(struct maple_subtree_state *mast)
2887 {
2888 	if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2889 		return true;
2890 
2891 	return false;
2892 }
2893 
2894 static inline void *mtree_range_walk(struct ma_state *mas)
2895 {
2896 	unsigned long *pivots;
2897 	unsigned char offset;
2898 	struct maple_node *node;
2899 	struct maple_enode *next, *last;
2900 	enum maple_type type;
2901 	void __rcu **slots;
2902 	unsigned char end;
2903 	unsigned long max, min;
2904 	unsigned long prev_max, prev_min;
2905 
2906 	next = mas->node;
2907 	min = mas->min;
2908 	max = mas->max;
2909 	do {
2910 		offset = 0;
2911 		last = next;
2912 		node = mte_to_node(next);
2913 		type = mte_node_type(next);
2914 		pivots = ma_pivots(node, type);
2915 		end = ma_data_end(node, type, pivots, max);
2916 		if (unlikely(ma_dead_node(node)))
2917 			goto dead_node;
2918 
2919 		if (pivots[offset] >= mas->index) {
2920 			prev_max = max;
2921 			prev_min = min;
2922 			max = pivots[offset];
2923 			goto next;
2924 		}
2925 
2926 		do {
2927 			offset++;
2928 		} while ((offset < end) && (pivots[offset] < mas->index));
2929 
2930 		prev_min = min;
2931 		min = pivots[offset - 1] + 1;
2932 		prev_max = max;
2933 		if (likely(offset < end && pivots[offset]))
2934 			max = pivots[offset];
2935 
2936 next:
2937 		slots = ma_slots(node, type);
2938 		next = mt_slot(mas->tree, slots, offset);
2939 		if (unlikely(ma_dead_node(node)))
2940 			goto dead_node;
2941 	} while (!ma_is_leaf(type));
2942 
2943 	mas->offset = offset;
2944 	mas->index = min;
2945 	mas->last = max;
2946 	mas->min = prev_min;
2947 	mas->max = prev_max;
2948 	mas->node = last;
2949 	return (void *) next;
2950 
2951 dead_node:
2952 	mas_reset(mas);
2953 	return NULL;
2954 }
2955 
2956 /*
2957  * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2958  * @mas: The starting maple state
2959  * @mast: The maple_subtree_state, keeps track of 4 maple states.
2960  * @count: The estimated count of iterations needed.
2961  *
2962  * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2963  * is hit.  First @b_node is split into two entries which are inserted into the
2964  * next iteration of the loop.  @b_node is returned populated with the final
2965  * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
2966  * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2967  * to account of what has been copied into the new sub-tree.  The update of
2968  * orig_l_mas->last is used in mas_consume to find the slots that will need to
2969  * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
2970  * the new sub-tree in case the sub-tree becomes the full tree.
2971  *
2972  * Return: the number of elements in b_node during the last loop.
2973  */
2974 static int mas_spanning_rebalance(struct ma_state *mas,
2975 		struct maple_subtree_state *mast, unsigned char count)
2976 {
2977 	unsigned char split, mid_split;
2978 	unsigned char slot = 0;
2979 	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2980 
2981 	MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2982 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2983 	MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2984 	MA_TOPIARY(free, mas->tree);
2985 	MA_TOPIARY(destroy, mas->tree);
2986 
2987 	/*
2988 	 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2989 	 * Rebalancing is done by use of the ``struct maple_topiary``.
2990 	 */
2991 	mast->l = &l_mas;
2992 	mast->m = &m_mas;
2993 	mast->r = &r_mas;
2994 	mast->free = &free;
2995 	mast->destroy = &destroy;
2996 	l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
2997 
2998 	/* Check if this is not root and has sufficient data.  */
2999 	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3000 	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3001 		mast_spanning_rebalance(mast);
3002 
3003 	mast->orig_l->depth = 0;
3004 
3005 	/*
3006 	 * Each level of the tree is examined and balanced, pushing data to the left or
3007 	 * right, or rebalancing against left or right nodes is employed to avoid
3008 	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
3009 	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
3010 	 * will have the incorrect parent pointers and currently be in two trees: the
3011 	 * original tree and the partially new tree.  To remedy the parent pointers in
3012 	 * the old tree, the new data is swapped into the active tree and a walk down
3013 	 * the tree is performed and the parent pointers are updated.
3014 	 * See mas_descend_adopt() for more information..
3015 	 */
3016 	while (count--) {
3017 		mast->bn->b_end--;
3018 		mast->bn->type = mte_node_type(mast->orig_l->node);
3019 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3020 					&mid_split, mast->orig_l->min);
3021 		mast_set_split_parents(mast, left, middle, right, split,
3022 				       mid_split);
3023 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3024 
3025 		/*
3026 		 * Copy data from next level in the tree to mast->bn from next
3027 		 * iteration
3028 		 */
3029 		memset(mast->bn, 0, sizeof(struct maple_big_node));
3030 		mast->bn->type = mte_node_type(left);
3031 		mast->orig_l->depth++;
3032 
3033 		/* Root already stored in l->node. */
3034 		if (mas_is_root_limits(mast->l))
3035 			goto new_root;
3036 
3037 		mast_ascend_free(mast);
3038 		mast_combine_cp_left(mast);
3039 		l_mas.offset = mast->bn->b_end;
3040 		mab_set_b_end(mast->bn, &l_mas, left);
3041 		mab_set_b_end(mast->bn, &m_mas, middle);
3042 		mab_set_b_end(mast->bn, &r_mas, right);
3043 
3044 		/* Copy anything necessary out of the right node. */
3045 		mast_combine_cp_right(mast);
3046 		mast_topiary(mast);
3047 		mast->orig_l->last = mast->orig_l->max;
3048 
3049 		if (mast_sufficient(mast))
3050 			continue;
3051 
3052 		if (mast_overflow(mast))
3053 			continue;
3054 
3055 		/* May be a new root stored in mast->bn */
3056 		if (mas_is_root_limits(mast->orig_l))
3057 			break;
3058 
3059 		mast_spanning_rebalance(mast);
3060 
3061 		/* rebalancing from other nodes may require another loop. */
3062 		if (!count)
3063 			count++;
3064 	}
3065 
3066 	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3067 				mte_node_type(mast->orig_l->node));
3068 	mast->orig_l->depth++;
3069 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3070 	mte_set_parent(left, l_mas.node, slot);
3071 	if (middle)
3072 		mte_set_parent(middle, l_mas.node, ++slot);
3073 
3074 	if (right)
3075 		mte_set_parent(right, l_mas.node, ++slot);
3076 
3077 	if (mas_is_root_limits(mast->l)) {
3078 new_root:
3079 		mast_new_root(mast, mas);
3080 	} else {
3081 		mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3082 	}
3083 
3084 	if (!mte_dead_node(mast->orig_l->node))
3085 		mat_add(&free, mast->orig_l->node);
3086 
3087 	mas->depth = mast->orig_l->depth;
3088 	*mast->orig_l = l_mas;
3089 	mte_set_node_dead(mas->node);
3090 
3091 	/* Set up mas for insertion. */
3092 	mast->orig_l->depth = mas->depth;
3093 	mast->orig_l->alloc = mas->alloc;
3094 	*mas = *mast->orig_l;
3095 	mas_wmb_replace(mas, &free, &destroy);
3096 	mtree_range_walk(mas);
3097 	return mast->bn->b_end;
3098 }
3099 
3100 /*
3101  * mas_rebalance() - Rebalance a given node.
3102  * @mas: The maple state
3103  * @b_node: The big maple node.
3104  *
3105  * Rebalance two nodes into a single node or two new nodes that are sufficient.
3106  * Continue upwards until tree is sufficient.
3107  *
3108  * Return: the number of elements in b_node during the last loop.
3109  */
3110 static inline int mas_rebalance(struct ma_state *mas,
3111 				struct maple_big_node *b_node)
3112 {
3113 	char empty_count = mas_mt_height(mas);
3114 	struct maple_subtree_state mast;
3115 	unsigned char shift, b_end = ++b_node->b_end;
3116 
3117 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3118 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3119 
3120 	trace_ma_op(__func__, mas);
3121 
3122 	/*
3123 	 * Rebalancing occurs if a node is insufficient.  Data is rebalanced
3124 	 * against the node to the right if it exists, otherwise the node to the
3125 	 * left of this node is rebalanced against this node.  If rebalancing
3126 	 * causes just one node to be produced instead of two, then the parent
3127 	 * is also examined and rebalanced if it is insufficient.  Every level
3128 	 * tries to combine the data in the same way.  If one node contains the
3129 	 * entire range of the tree, then that node is used as a new root node.
3130 	 */
3131 	mas_node_count(mas, 1 + empty_count * 3);
3132 	if (mas_is_err(mas))
3133 		return 0;
3134 
3135 	mast.orig_l = &l_mas;
3136 	mast.orig_r = &r_mas;
3137 	mast.bn = b_node;
3138 	mast.bn->type = mte_node_type(mas->node);
3139 
3140 	l_mas = r_mas = *mas;
3141 
3142 	if (mas_next_sibling(&r_mas)) {
3143 		mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3144 		r_mas.last = r_mas.index = r_mas.max;
3145 	} else {
3146 		mas_prev_sibling(&l_mas);
3147 		shift = mas_data_end(&l_mas) + 1;
3148 		mab_shift_right(b_node, shift);
3149 		mas->offset += shift;
3150 		mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3151 		b_node->b_end = shift + b_end;
3152 		l_mas.index = l_mas.last = l_mas.min;
3153 	}
3154 
3155 	return mas_spanning_rebalance(mas, &mast, empty_count);
3156 }
3157 
3158 /*
3159  * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3160  * state.
3161  * @mas: The maple state
3162  * @end: The end of the left-most node.
3163  *
3164  * During a mass-insert event (such as forking), it may be necessary to
3165  * rebalance the left-most node when it is not sufficient.
3166  */
3167 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3168 {
3169 	enum maple_type mt = mte_node_type(mas->node);
3170 	struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3171 	struct maple_enode *eparent;
3172 	unsigned char offset, tmp, split = mt_slots[mt] / 2;
3173 	void __rcu **l_slots, **slots;
3174 	unsigned long *l_pivs, *pivs, gap;
3175 	bool in_rcu = mt_in_rcu(mas->tree);
3176 
3177 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3178 
3179 	l_mas = *mas;
3180 	mas_prev_sibling(&l_mas);
3181 
3182 	/* set up node. */
3183 	if (in_rcu) {
3184 		/* Allocate for both left and right as well as parent. */
3185 		mas_node_count(mas, 3);
3186 		if (mas_is_err(mas))
3187 			return;
3188 
3189 		newnode = mas_pop_node(mas);
3190 	} else {
3191 		newnode = &reuse;
3192 	}
3193 
3194 	node = mas_mn(mas);
3195 	newnode->parent = node->parent;
3196 	slots = ma_slots(newnode, mt);
3197 	pivs = ma_pivots(newnode, mt);
3198 	left = mas_mn(&l_mas);
3199 	l_slots = ma_slots(left, mt);
3200 	l_pivs = ma_pivots(left, mt);
3201 	if (!l_slots[split])
3202 		split++;
3203 	tmp = mas_data_end(&l_mas) - split;
3204 
3205 	memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3206 	memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3207 	pivs[tmp] = l_mas.max;
3208 	memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3209 	memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3210 
3211 	l_mas.max = l_pivs[split];
3212 	mas->min = l_mas.max + 1;
3213 	eparent = mt_mk_node(mte_parent(l_mas.node),
3214 			     mas_parent_enum(&l_mas, l_mas.node));
3215 	tmp += end;
3216 	if (!in_rcu) {
3217 		unsigned char max_p = mt_pivots[mt];
3218 		unsigned char max_s = mt_slots[mt];
3219 
3220 		if (tmp < max_p)
3221 			memset(pivs + tmp, 0,
3222 			       sizeof(unsigned long *) * (max_p - tmp));
3223 
3224 		if (tmp < mt_slots[mt])
3225 			memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3226 
3227 		memcpy(node, newnode, sizeof(struct maple_node));
3228 		ma_set_meta(node, mt, 0, tmp - 1);
3229 		mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3230 			      l_pivs[split]);
3231 
3232 		/* Remove data from l_pivs. */
3233 		tmp = split + 1;
3234 		memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3235 		memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3236 		ma_set_meta(left, mt, 0, split);
3237 
3238 		goto done;
3239 	}
3240 
3241 	/* RCU requires replacing both l_mas, mas, and parent. */
3242 	mas->node = mt_mk_node(newnode, mt);
3243 	ma_set_meta(newnode, mt, 0, tmp);
3244 
3245 	new_left = mas_pop_node(mas);
3246 	new_left->parent = left->parent;
3247 	mt = mte_node_type(l_mas.node);
3248 	slots = ma_slots(new_left, mt);
3249 	pivs = ma_pivots(new_left, mt);
3250 	memcpy(slots, l_slots, sizeof(void *) * split);
3251 	memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3252 	ma_set_meta(new_left, mt, 0, split);
3253 	l_mas.node = mt_mk_node(new_left, mt);
3254 
3255 	/* replace parent. */
3256 	offset = mte_parent_slot(mas->node);
3257 	mt = mas_parent_enum(&l_mas, l_mas.node);
3258 	parent = mas_pop_node(mas);
3259 	slots = ma_slots(parent, mt);
3260 	pivs = ma_pivots(parent, mt);
3261 	memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3262 	rcu_assign_pointer(slots[offset], mas->node);
3263 	rcu_assign_pointer(slots[offset - 1], l_mas.node);
3264 	pivs[offset - 1] = l_mas.max;
3265 	eparent = mt_mk_node(parent, mt);
3266 done:
3267 	gap = mas_leaf_max_gap(mas);
3268 	mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3269 	gap = mas_leaf_max_gap(&l_mas);
3270 	mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3271 	mas_ascend(mas);
3272 
3273 	if (in_rcu)
3274 		mas_replace(mas, false);
3275 
3276 	mas_update_gap(mas);
3277 }
3278 
3279 /*
3280  * mas_split_final_node() - Split the final node in a subtree operation.
3281  * @mast: the maple subtree state
3282  * @mas: The maple state
3283  * @height: The height of the tree in case it's a new root.
3284  */
3285 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3286 					struct ma_state *mas, int height)
3287 {
3288 	struct maple_enode *ancestor;
3289 
3290 	if (mte_is_root(mas->node)) {
3291 		if (mt_is_alloc(mas->tree))
3292 			mast->bn->type = maple_arange_64;
3293 		else
3294 			mast->bn->type = maple_range_64;
3295 		mas->depth = height;
3296 	}
3297 	/*
3298 	 * Only a single node is used here, could be root.
3299 	 * The Big_node data should just fit in a single node.
3300 	 */
3301 	ancestor = mas_new_ma_node(mas, mast->bn);
3302 	mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3303 	mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3304 	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3305 
3306 	mast->l->node = ancestor;
3307 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3308 	mas->offset = mast->bn->b_end - 1;
3309 	return true;
3310 }
3311 
3312 /*
3313  * mast_fill_bnode() - Copy data into the big node in the subtree state
3314  * @mast: The maple subtree state
3315  * @mas: the maple state
3316  * @skip: The number of entries to skip for new nodes insertion.
3317  */
3318 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3319 					 struct ma_state *mas,
3320 					 unsigned char skip)
3321 {
3322 	bool cp = true;
3323 	struct maple_enode *old = mas->node;
3324 	unsigned char split;
3325 
3326 	memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3327 	memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3328 	memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3329 	mast->bn->b_end = 0;
3330 
3331 	if (mte_is_root(mas->node)) {
3332 		cp = false;
3333 	} else {
3334 		mas_ascend(mas);
3335 		mat_add(mast->free, old);
3336 		mas->offset = mte_parent_slot(mas->node);
3337 	}
3338 
3339 	if (cp && mast->l->offset)
3340 		mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3341 
3342 	split = mast->bn->b_end;
3343 	mab_set_b_end(mast->bn, mast->l, mast->l->node);
3344 	mast->r->offset = mast->bn->b_end;
3345 	mab_set_b_end(mast->bn, mast->r, mast->r->node);
3346 	if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3347 		cp = false;
3348 
3349 	if (cp)
3350 		mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3351 			   mast->bn, mast->bn->b_end);
3352 
3353 	mast->bn->b_end--;
3354 	mast->bn->type = mte_node_type(mas->node);
3355 }
3356 
3357 /*
3358  * mast_split_data() - Split the data in the subtree state big node into regular
3359  * nodes.
3360  * @mast: The maple subtree state
3361  * @mas: The maple state
3362  * @split: The location to split the big node
3363  */
3364 static inline void mast_split_data(struct maple_subtree_state *mast,
3365 	   struct ma_state *mas, unsigned char split)
3366 {
3367 	unsigned char p_slot;
3368 
3369 	mab_mas_cp(mast->bn, 0, split, mast->l, true);
3370 	mte_set_pivot(mast->r->node, 0, mast->r->max);
3371 	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3372 	mast->l->offset = mte_parent_slot(mas->node);
3373 	mast->l->max = mast->bn->pivot[split];
3374 	mast->r->min = mast->l->max + 1;
3375 	if (mte_is_leaf(mas->node))
3376 		return;
3377 
3378 	p_slot = mast->orig_l->offset;
3379 	mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3380 			     &p_slot, split);
3381 	mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3382 			     &p_slot, split);
3383 }
3384 
3385 /*
3386  * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3387  * data to the right or left node if there is room.
3388  * @mas: The maple state
3389  * @height: The current height of the maple state
3390  * @mast: The maple subtree state
3391  * @left: Push left or not.
3392  *
3393  * Keeping the height of the tree low means faster lookups.
3394  *
3395  * Return: True if pushed, false otherwise.
3396  */
3397 static inline bool mas_push_data(struct ma_state *mas, int height,
3398 				 struct maple_subtree_state *mast, bool left)
3399 {
3400 	unsigned char slot_total = mast->bn->b_end;
3401 	unsigned char end, space, split;
3402 
3403 	MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3404 	tmp_mas = *mas;
3405 	tmp_mas.depth = mast->l->depth;
3406 
3407 	if (left && !mas_prev_sibling(&tmp_mas))
3408 		return false;
3409 	else if (!left && !mas_next_sibling(&tmp_mas))
3410 		return false;
3411 
3412 	end = mas_data_end(&tmp_mas);
3413 	slot_total += end;
3414 	space = 2 * mt_slot_count(mas->node) - 2;
3415 	/* -2 instead of -1 to ensure there isn't a triple split */
3416 	if (ma_is_leaf(mast->bn->type))
3417 		space--;
3418 
3419 	if (mas->max == ULONG_MAX)
3420 		space--;
3421 
3422 	if (slot_total >= space)
3423 		return false;
3424 
3425 	/* Get the data; Fill mast->bn */
3426 	mast->bn->b_end++;
3427 	if (left) {
3428 		mab_shift_right(mast->bn, end + 1);
3429 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3430 		mast->bn->b_end = slot_total + 1;
3431 	} else {
3432 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3433 	}
3434 
3435 	/* Configure mast for splitting of mast->bn */
3436 	split = mt_slots[mast->bn->type] - 2;
3437 	if (left) {
3438 		/*  Switch mas to prev node  */
3439 		mat_add(mast->free, mas->node);
3440 		*mas = tmp_mas;
3441 		/* Start using mast->l for the left side. */
3442 		tmp_mas.node = mast->l->node;
3443 		*mast->l = tmp_mas;
3444 	} else {
3445 		mat_add(mast->free, tmp_mas.node);
3446 		tmp_mas.node = mast->r->node;
3447 		*mast->r = tmp_mas;
3448 		split = slot_total - split;
3449 	}
3450 	split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3451 	/* Update parent slot for split calculation. */
3452 	if (left)
3453 		mast->orig_l->offset += end + 1;
3454 
3455 	mast_split_data(mast, mas, split);
3456 	mast_fill_bnode(mast, mas, 2);
3457 	mas_split_final_node(mast, mas, height + 1);
3458 	return true;
3459 }
3460 
3461 /*
3462  * mas_split() - Split data that is too big for one node into two.
3463  * @mas: The maple state
3464  * @b_node: The maple big node
3465  * Return: 1 on success, 0 on failure.
3466  */
3467 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3468 {
3469 
3470 	struct maple_subtree_state mast;
3471 	int height = 0;
3472 	unsigned char mid_split, split = 0;
3473 
3474 	/*
3475 	 * Splitting is handled differently from any other B-tree; the Maple
3476 	 * Tree splits upwards.  Splitting up means that the split operation
3477 	 * occurs when the walk of the tree hits the leaves and not on the way
3478 	 * down.  The reason for splitting up is that it is impossible to know
3479 	 * how much space will be needed until the leaf is (or leaves are)
3480 	 * reached.  Since overwriting data is allowed and a range could
3481 	 * overwrite more than one range or result in changing one entry into 3
3482 	 * entries, it is impossible to know if a split is required until the
3483 	 * data is examined.
3484 	 *
3485 	 * Splitting is a balancing act between keeping allocations to a minimum
3486 	 * and avoiding a 'jitter' event where a tree is expanded to make room
3487 	 * for an entry followed by a contraction when the entry is removed.  To
3488 	 * accomplish the balance, there are empty slots remaining in both left
3489 	 * and right nodes after a split.
3490 	 */
3491 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3492 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3493 	MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3494 	MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3495 	MA_TOPIARY(mat, mas->tree);
3496 
3497 	trace_ma_op(__func__, mas);
3498 	mas->depth = mas_mt_height(mas);
3499 	/* Allocation failures will happen early. */
3500 	mas_node_count(mas, 1 + mas->depth * 2);
3501 	if (mas_is_err(mas))
3502 		return 0;
3503 
3504 	mast.l = &l_mas;
3505 	mast.r = &r_mas;
3506 	mast.orig_l = &prev_l_mas;
3507 	mast.orig_r = &prev_r_mas;
3508 	mast.free = &mat;
3509 	mast.bn = b_node;
3510 
3511 	while (height++ <= mas->depth) {
3512 		if (mt_slots[b_node->type] > b_node->b_end) {
3513 			mas_split_final_node(&mast, mas, height);
3514 			break;
3515 		}
3516 
3517 		l_mas = r_mas = *mas;
3518 		l_mas.node = mas_new_ma_node(mas, b_node);
3519 		r_mas.node = mas_new_ma_node(mas, b_node);
3520 		/*
3521 		 * Another way that 'jitter' is avoided is to terminate a split up early if the
3522 		 * left or right node has space to spare.  This is referred to as "pushing left"
3523 		 * or "pushing right" and is similar to the B* tree, except the nodes left or
3524 		 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3525 		 * is a significant savings.
3526 		 */
3527 		/* Try to push left. */
3528 		if (mas_push_data(mas, height, &mast, true))
3529 			break;
3530 
3531 		/* Try to push right. */
3532 		if (mas_push_data(mas, height, &mast, false))
3533 			break;
3534 
3535 		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3536 		mast_split_data(&mast, mas, split);
3537 		/*
3538 		 * Usually correct, mab_mas_cp in the above call overwrites
3539 		 * r->max.
3540 		 */
3541 		mast.r->max = mas->max;
3542 		mast_fill_bnode(&mast, mas, 1);
3543 		prev_l_mas = *mast.l;
3544 		prev_r_mas = *mast.r;
3545 	}
3546 
3547 	/* Set the original node as dead */
3548 	mat_add(mast.free, mas->node);
3549 	mas->node = l_mas.node;
3550 	mas_wmb_replace(mas, mast.free, NULL);
3551 	mtree_range_walk(mas);
3552 	return 1;
3553 }
3554 
3555 /*
3556  * mas_reuse_node() - Reuse the node to store the data.
3557  * @wr_mas: The maple write state
3558  * @bn: The maple big node
3559  * @end: The end of the data.
3560  *
3561  * Will always return false in RCU mode.
3562  *
3563  * Return: True if node was reused, false otherwise.
3564  */
3565 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3566 			  struct maple_big_node *bn, unsigned char end)
3567 {
3568 	/* Need to be rcu safe. */
3569 	if (mt_in_rcu(wr_mas->mas->tree))
3570 		return false;
3571 
3572 	if (end > bn->b_end) {
3573 		int clear = mt_slots[wr_mas->type] - bn->b_end;
3574 
3575 		memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3576 		memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3577 	}
3578 	mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3579 	return true;
3580 }
3581 
3582 /*
3583  * mas_commit_b_node() - Commit the big node into the tree.
3584  * @wr_mas: The maple write state
3585  * @b_node: The maple big node
3586  * @end: The end of the data.
3587  */
3588 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas,
3589 			    struct maple_big_node *b_node, unsigned char end)
3590 {
3591 	struct maple_node *node;
3592 	unsigned char b_end = b_node->b_end;
3593 	enum maple_type b_type = b_node->type;
3594 
3595 	if ((b_end < mt_min_slots[b_type]) &&
3596 	    (!mte_is_root(wr_mas->mas->node)) &&
3597 	    (mas_mt_height(wr_mas->mas) > 1))
3598 		return mas_rebalance(wr_mas->mas, b_node);
3599 
3600 	if (b_end >= mt_slots[b_type])
3601 		return mas_split(wr_mas->mas, b_node);
3602 
3603 	if (mas_reuse_node(wr_mas, b_node, end))
3604 		goto reuse_node;
3605 
3606 	mas_node_count(wr_mas->mas, 1);
3607 	if (mas_is_err(wr_mas->mas))
3608 		return 0;
3609 
3610 	node = mas_pop_node(wr_mas->mas);
3611 	node->parent = mas_mn(wr_mas->mas)->parent;
3612 	wr_mas->mas->node = mt_mk_node(node, b_type);
3613 	mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3614 	mas_replace(wr_mas->mas, false);
3615 reuse_node:
3616 	mas_update_gap(wr_mas->mas);
3617 	return 1;
3618 }
3619 
3620 /*
3621  * mas_root_expand() - Expand a root to a node
3622  * @mas: The maple state
3623  * @entry: The entry to store into the tree
3624  */
3625 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3626 {
3627 	void *contents = mas_root_locked(mas);
3628 	enum maple_type type = maple_leaf_64;
3629 	struct maple_node *node;
3630 	void __rcu **slots;
3631 	unsigned long *pivots;
3632 	int slot = 0;
3633 
3634 	mas_node_count(mas, 1);
3635 	if (unlikely(mas_is_err(mas)))
3636 		return 0;
3637 
3638 	node = mas_pop_node(mas);
3639 	pivots = ma_pivots(node, type);
3640 	slots = ma_slots(node, type);
3641 	node->parent = ma_parent_ptr(
3642 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3643 	mas->node = mt_mk_node(node, type);
3644 
3645 	if (mas->index) {
3646 		if (contents) {
3647 			rcu_assign_pointer(slots[slot], contents);
3648 			if (likely(mas->index > 1))
3649 				slot++;
3650 		}
3651 		pivots[slot++] = mas->index - 1;
3652 	}
3653 
3654 	rcu_assign_pointer(slots[slot], entry);
3655 	mas->offset = slot;
3656 	pivots[slot] = mas->last;
3657 	if (mas->last != ULONG_MAX)
3658 		slot++;
3659 	mas->depth = 1;
3660 	mas_set_height(mas);
3661 
3662 	/* swap the new root into the tree */
3663 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3664 	ma_set_meta(node, maple_leaf_64, 0, slot);
3665 	return slot;
3666 }
3667 
3668 static inline void mas_store_root(struct ma_state *mas, void *entry)
3669 {
3670 	if (likely((mas->last != 0) || (mas->index != 0)))
3671 		mas_root_expand(mas, entry);
3672 	else if (((unsigned long) (entry) & 3) == 2)
3673 		mas_root_expand(mas, entry);
3674 	else {
3675 		rcu_assign_pointer(mas->tree->ma_root, entry);
3676 		mas->node = MAS_START;
3677 	}
3678 }
3679 
3680 /*
3681  * mas_is_span_wr() - Check if the write needs to be treated as a write that
3682  * spans the node.
3683  * @mas: The maple state
3684  * @piv: The pivot value being written
3685  * @type: The maple node type
3686  * @entry: The data to write
3687  *
3688  * Spanning writes are writes that start in one node and end in another OR if
3689  * the write of a %NULL will cause the node to end with a %NULL.
3690  *
3691  * Return: True if this is a spanning write, false otherwise.
3692  */
3693 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3694 {
3695 	unsigned long max;
3696 	unsigned long last = wr_mas->mas->last;
3697 	unsigned long piv = wr_mas->r_max;
3698 	enum maple_type type = wr_mas->type;
3699 	void *entry = wr_mas->entry;
3700 
3701 	/* Contained in this pivot */
3702 	if (piv > last)
3703 		return false;
3704 
3705 	max = wr_mas->mas->max;
3706 	if (unlikely(ma_is_leaf(type))) {
3707 		/* Fits in the node, but may span slots. */
3708 		if (last < max)
3709 			return false;
3710 
3711 		/* Writes to the end of the node but not null. */
3712 		if ((last == max) && entry)
3713 			return false;
3714 
3715 		/*
3716 		 * Writing ULONG_MAX is not a spanning write regardless of the
3717 		 * value being written as long as the range fits in the node.
3718 		 */
3719 		if ((last == ULONG_MAX) && (last == max))
3720 			return false;
3721 	} else if (piv == last) {
3722 		if (entry)
3723 			return false;
3724 
3725 		/* Detect spanning store wr walk */
3726 		if (last == ULONG_MAX)
3727 			return false;
3728 	}
3729 
3730 	trace_ma_write(__func__, wr_mas->mas, piv, entry);
3731 
3732 	return true;
3733 }
3734 
3735 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3736 {
3737 	wr_mas->type = mte_node_type(wr_mas->mas->node);
3738 	mas_wr_node_walk(wr_mas);
3739 	wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3740 }
3741 
3742 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3743 {
3744 	wr_mas->mas->max = wr_mas->r_max;
3745 	wr_mas->mas->min = wr_mas->r_min;
3746 	wr_mas->mas->node = wr_mas->content;
3747 	wr_mas->mas->offset = 0;
3748 	wr_mas->mas->depth++;
3749 }
3750 /*
3751  * mas_wr_walk() - Walk the tree for a write.
3752  * @wr_mas: The maple write state
3753  *
3754  * Uses mas_slot_locked() and does not need to worry about dead nodes.
3755  *
3756  * Return: True if it's contained in a node, false on spanning write.
3757  */
3758 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3759 {
3760 	struct ma_state *mas = wr_mas->mas;
3761 
3762 	while (true) {
3763 		mas_wr_walk_descend(wr_mas);
3764 		if (unlikely(mas_is_span_wr(wr_mas)))
3765 			return false;
3766 
3767 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3768 						  mas->offset);
3769 		if (ma_is_leaf(wr_mas->type))
3770 			return true;
3771 
3772 		mas_wr_walk_traverse(wr_mas);
3773 	}
3774 
3775 	return true;
3776 }
3777 
3778 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3779 {
3780 	struct ma_state *mas = wr_mas->mas;
3781 
3782 	while (true) {
3783 		mas_wr_walk_descend(wr_mas);
3784 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3785 						  mas->offset);
3786 		if (ma_is_leaf(wr_mas->type))
3787 			return true;
3788 		mas_wr_walk_traverse(wr_mas);
3789 
3790 	}
3791 	return true;
3792 }
3793 /*
3794  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3795  * @l_wr_mas: The left maple write state
3796  * @r_wr_mas: The right maple write state
3797  */
3798 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3799 					    struct ma_wr_state *r_wr_mas)
3800 {
3801 	struct ma_state *r_mas = r_wr_mas->mas;
3802 	struct ma_state *l_mas = l_wr_mas->mas;
3803 	unsigned char l_slot;
3804 
3805 	l_slot = l_mas->offset;
3806 	if (!l_wr_mas->content)
3807 		l_mas->index = l_wr_mas->r_min;
3808 
3809 	if ((l_mas->index == l_wr_mas->r_min) &&
3810 		 (l_slot &&
3811 		  !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3812 		if (l_slot > 1)
3813 			l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3814 		else
3815 			l_mas->index = l_mas->min;
3816 
3817 		l_mas->offset = l_slot - 1;
3818 	}
3819 
3820 	if (!r_wr_mas->content) {
3821 		if (r_mas->last < r_wr_mas->r_max)
3822 			r_mas->last = r_wr_mas->r_max;
3823 		r_mas->offset++;
3824 	} else if ((r_mas->last == r_wr_mas->r_max) &&
3825 	    (r_mas->last < r_mas->max) &&
3826 	    !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3827 		r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3828 					     r_wr_mas->type, r_mas->offset + 1);
3829 		r_mas->offset++;
3830 	}
3831 }
3832 
3833 static inline void *mas_state_walk(struct ma_state *mas)
3834 {
3835 	void *entry;
3836 
3837 	entry = mas_start(mas);
3838 	if (mas_is_none(mas))
3839 		return NULL;
3840 
3841 	if (mas_is_ptr(mas))
3842 		return entry;
3843 
3844 	return mtree_range_walk(mas);
3845 }
3846 
3847 /*
3848  * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3849  * to date.
3850  *
3851  * @mas: The maple state.
3852  *
3853  * Note: Leaves mas in undesirable state.
3854  * Return: The entry for @mas->index or %NULL on dead node.
3855  */
3856 static inline void *mtree_lookup_walk(struct ma_state *mas)
3857 {
3858 	unsigned long *pivots;
3859 	unsigned char offset;
3860 	struct maple_node *node;
3861 	struct maple_enode *next;
3862 	enum maple_type type;
3863 	void __rcu **slots;
3864 	unsigned char end;
3865 	unsigned long max;
3866 
3867 	next = mas->node;
3868 	max = ULONG_MAX;
3869 	do {
3870 		offset = 0;
3871 		node = mte_to_node(next);
3872 		type = mte_node_type(next);
3873 		pivots = ma_pivots(node, type);
3874 		end = ma_data_end(node, type, pivots, max);
3875 		if (unlikely(ma_dead_node(node)))
3876 			goto dead_node;
3877 
3878 		if (pivots[offset] >= mas->index)
3879 			goto next;
3880 
3881 		do {
3882 			offset++;
3883 		} while ((offset < end) && (pivots[offset] < mas->index));
3884 
3885 		if (likely(offset > end))
3886 			max = pivots[offset];
3887 
3888 next:
3889 		slots = ma_slots(node, type);
3890 		next = mt_slot(mas->tree, slots, offset);
3891 		if (unlikely(ma_dead_node(node)))
3892 			goto dead_node;
3893 	} while (!ma_is_leaf(type));
3894 
3895 	return (void *) next;
3896 
3897 dead_node:
3898 	mas_reset(mas);
3899 	return NULL;
3900 }
3901 
3902 /*
3903  * mas_new_root() - Create a new root node that only contains the entry passed
3904  * in.
3905  * @mas: The maple state
3906  * @entry: The entry to store.
3907  *
3908  * Only valid when the index == 0 and the last == ULONG_MAX
3909  *
3910  * Return 0 on error, 1 on success.
3911  */
3912 static inline int mas_new_root(struct ma_state *mas, void *entry)
3913 {
3914 	struct maple_enode *root = mas_root_locked(mas);
3915 	enum maple_type type = maple_leaf_64;
3916 	struct maple_node *node;
3917 	void __rcu **slots;
3918 	unsigned long *pivots;
3919 
3920 	if (!entry && !mas->index && mas->last == ULONG_MAX) {
3921 		mas->depth = 0;
3922 		mas_set_height(mas);
3923 		rcu_assign_pointer(mas->tree->ma_root, entry);
3924 		mas->node = MAS_START;
3925 		goto done;
3926 	}
3927 
3928 	mas_node_count(mas, 1);
3929 	if (mas_is_err(mas))
3930 		return 0;
3931 
3932 	node = mas_pop_node(mas);
3933 	pivots = ma_pivots(node, type);
3934 	slots = ma_slots(node, type);
3935 	node->parent = ma_parent_ptr(
3936 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3937 	mas->node = mt_mk_node(node, type);
3938 	rcu_assign_pointer(slots[0], entry);
3939 	pivots[0] = mas->last;
3940 	mas->depth = 1;
3941 	mas_set_height(mas);
3942 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3943 
3944 done:
3945 	if (xa_is_node(root))
3946 		mte_destroy_walk(root, mas->tree);
3947 
3948 	return 1;
3949 }
3950 /*
3951  * mas_wr_spanning_store() - Create a subtree with the store operation completed
3952  * and new nodes where necessary, then place the sub-tree in the actual tree.
3953  * Note that mas is expected to point to the node which caused the store to
3954  * span.
3955  * @wr_mas: The maple write state
3956  *
3957  * Return: 0 on error, positive on success.
3958  */
3959 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3960 {
3961 	struct maple_subtree_state mast;
3962 	struct maple_big_node b_node;
3963 	struct ma_state *mas;
3964 	unsigned char height;
3965 
3966 	/* Left and Right side of spanning store */
3967 	MA_STATE(l_mas, NULL, 0, 0);
3968 	MA_STATE(r_mas, NULL, 0, 0);
3969 
3970 	MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3971 	MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3972 
3973 	/*
3974 	 * A store operation that spans multiple nodes is called a spanning
3975 	 * store and is handled early in the store call stack by the function
3976 	 * mas_is_span_wr().  When a spanning store is identified, the maple
3977 	 * state is duplicated.  The first maple state walks the left tree path
3978 	 * to ``index``, the duplicate walks the right tree path to ``last``.
3979 	 * The data in the two nodes are combined into a single node, two nodes,
3980 	 * or possibly three nodes (see the 3-way split above).  A ``NULL``
3981 	 * written to the last entry of a node is considered a spanning store as
3982 	 * a rebalance is required for the operation to complete and an overflow
3983 	 * of data may happen.
3984 	 */
3985 	mas = wr_mas->mas;
3986 	trace_ma_op(__func__, mas);
3987 
3988 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
3989 		return mas_new_root(mas, wr_mas->entry);
3990 	/*
3991 	 * Node rebalancing may occur due to this store, so there may be three new
3992 	 * entries per level plus a new root.
3993 	 */
3994 	height = mas_mt_height(mas);
3995 	mas_node_count(mas, 1 + height * 3);
3996 	if (mas_is_err(mas))
3997 		return 0;
3998 
3999 	/*
4000 	 * Set up right side.  Need to get to the next offset after the spanning
4001 	 * store to ensure it's not NULL and to combine both the next node and
4002 	 * the node with the start together.
4003 	 */
4004 	r_mas = *mas;
4005 	/* Avoid overflow, walk to next slot in the tree. */
4006 	if (r_mas.last + 1)
4007 		r_mas.last++;
4008 
4009 	r_mas.index = r_mas.last;
4010 	mas_wr_walk_index(&r_wr_mas);
4011 	r_mas.last = r_mas.index = mas->last;
4012 
4013 	/* Set up left side. */
4014 	l_mas = *mas;
4015 	mas_wr_walk_index(&l_wr_mas);
4016 
4017 	if (!wr_mas->entry) {
4018 		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4019 		mas->offset = l_mas.offset;
4020 		mas->index = l_mas.index;
4021 		mas->last = l_mas.last = r_mas.last;
4022 	}
4023 
4024 	/* expanding NULLs may make this cover the entire range */
4025 	if (!l_mas.index && r_mas.last == ULONG_MAX) {
4026 		mas_set_range(mas, 0, ULONG_MAX);
4027 		return mas_new_root(mas, wr_mas->entry);
4028 	}
4029 
4030 	memset(&b_node, 0, sizeof(struct maple_big_node));
4031 	/* Copy l_mas and store the value in b_node. */
4032 	mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4033 	/* Copy r_mas into b_node. */
4034 	if (r_mas.offset <= r_wr_mas.node_end)
4035 		mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4036 			   &b_node, b_node.b_end + 1);
4037 	else
4038 		b_node.b_end++;
4039 
4040 	/* Stop spanning searches by searching for just index. */
4041 	l_mas.index = l_mas.last = mas->index;
4042 
4043 	mast.bn = &b_node;
4044 	mast.orig_l = &l_mas;
4045 	mast.orig_r = &r_mas;
4046 	/* Combine l_mas and r_mas and split them up evenly again. */
4047 	return mas_spanning_rebalance(mas, &mast, height + 1);
4048 }
4049 
4050 /*
4051  * mas_wr_node_store() - Attempt to store the value in a node
4052  * @wr_mas: The maple write state
4053  *
4054  * Attempts to reuse the node, but may allocate.
4055  *
4056  * Return: True if stored, false otherwise
4057  */
4058 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4059 {
4060 	struct ma_state *mas = wr_mas->mas;
4061 	void __rcu **dst_slots;
4062 	unsigned long *dst_pivots;
4063 	unsigned char dst_offset;
4064 	unsigned char new_end = wr_mas->node_end;
4065 	unsigned char offset;
4066 	unsigned char node_slots = mt_slots[wr_mas->type];
4067 	struct maple_node reuse, *newnode;
4068 	unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4069 	bool in_rcu = mt_in_rcu(mas->tree);
4070 
4071 	offset = mas->offset;
4072 	if (mas->last == wr_mas->r_max) {
4073 		/* runs right to the end of the node */
4074 		if (mas->last == mas->max)
4075 			new_end = offset;
4076 		/* don't copy this offset */
4077 		wr_mas->offset_end++;
4078 	} else if (mas->last < wr_mas->r_max) {
4079 		/* new range ends in this range */
4080 		if (unlikely(wr_mas->r_max == ULONG_MAX))
4081 			mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4082 
4083 		new_end++;
4084 	} else {
4085 		if (wr_mas->end_piv == mas->last)
4086 			wr_mas->offset_end++;
4087 
4088 		new_end -= wr_mas->offset_end - offset - 1;
4089 	}
4090 
4091 	/* new range starts within a range */
4092 	if (wr_mas->r_min < mas->index)
4093 		new_end++;
4094 
4095 	/* Not enough room */
4096 	if (new_end >= node_slots)
4097 		return false;
4098 
4099 	/* Not enough data. */
4100 	if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4101 	    !(mas->mas_flags & MA_STATE_BULK))
4102 		return false;
4103 
4104 	/* set up node. */
4105 	if (in_rcu) {
4106 		mas_node_count(mas, 1);
4107 		if (mas_is_err(mas))
4108 			return false;
4109 
4110 		newnode = mas_pop_node(mas);
4111 	} else {
4112 		memset(&reuse, 0, sizeof(struct maple_node));
4113 		newnode = &reuse;
4114 	}
4115 
4116 	newnode->parent = mas_mn(mas)->parent;
4117 	dst_pivots = ma_pivots(newnode, wr_mas->type);
4118 	dst_slots = ma_slots(newnode, wr_mas->type);
4119 	/* Copy from start to insert point */
4120 	memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4121 	memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4122 	dst_offset = offset;
4123 
4124 	/* Handle insert of new range starting after old range */
4125 	if (wr_mas->r_min < mas->index) {
4126 		mas->offset++;
4127 		rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4128 		dst_pivots[dst_offset++] = mas->index - 1;
4129 	}
4130 
4131 	/* Store the new entry and range end. */
4132 	if (dst_offset < max_piv)
4133 		dst_pivots[dst_offset] = mas->last;
4134 	mas->offset = dst_offset;
4135 	rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4136 
4137 	/*
4138 	 * this range wrote to the end of the node or it overwrote the rest of
4139 	 * the data
4140 	 */
4141 	if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4142 		new_end = dst_offset;
4143 		goto done;
4144 	}
4145 
4146 	dst_offset++;
4147 	/* Copy to the end of node if necessary. */
4148 	copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4149 	memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4150 	       sizeof(void *) * copy_size);
4151 	if (dst_offset < max_piv) {
4152 		if (copy_size > max_piv - dst_offset)
4153 			copy_size = max_piv - dst_offset;
4154 
4155 		memcpy(dst_pivots + dst_offset,
4156 		       wr_mas->pivots + wr_mas->offset_end,
4157 		       sizeof(unsigned long) * copy_size);
4158 	}
4159 
4160 	if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4161 		dst_pivots[new_end] = mas->max;
4162 
4163 done:
4164 	mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4165 	if (in_rcu) {
4166 		mas->node = mt_mk_node(newnode, wr_mas->type);
4167 		mas_replace(mas, false);
4168 	} else {
4169 		memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4170 	}
4171 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4172 	mas_update_gap(mas);
4173 	return true;
4174 }
4175 
4176 /*
4177  * mas_wr_slot_store: Attempt to store a value in a slot.
4178  * @wr_mas: the maple write state
4179  *
4180  * Return: True if stored, false otherwise
4181  */
4182 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4183 {
4184 	struct ma_state *mas = wr_mas->mas;
4185 	unsigned long lmax; /* Logical max. */
4186 	unsigned char offset = mas->offset;
4187 
4188 	if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4189 				  (offset != wr_mas->node_end)))
4190 		return false;
4191 
4192 	if (offset == wr_mas->node_end - 1)
4193 		lmax = mas->max;
4194 	else
4195 		lmax = wr_mas->pivots[offset + 1];
4196 
4197 	/* going to overwrite too many slots. */
4198 	if (lmax < mas->last)
4199 		return false;
4200 
4201 	if (wr_mas->r_min == mas->index) {
4202 		/* overwriting two or more ranges with one. */
4203 		if (lmax == mas->last)
4204 			return false;
4205 
4206 		/* Overwriting all of offset and a portion of offset + 1. */
4207 		rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4208 		wr_mas->pivots[offset] = mas->last;
4209 		goto done;
4210 	}
4211 
4212 	/* Doesn't end on the next range end. */
4213 	if (lmax != mas->last)
4214 		return false;
4215 
4216 	/* Overwriting a portion of offset and all of offset + 1 */
4217 	if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4218 	    (wr_mas->entry || wr_mas->pivots[offset + 1]))
4219 		wr_mas->pivots[offset + 1] = mas->last;
4220 
4221 	rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4222 	wr_mas->pivots[offset] = mas->index - 1;
4223 	mas->offset++; /* Keep mas accurate. */
4224 
4225 done:
4226 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4227 	mas_update_gap(mas);
4228 	return true;
4229 }
4230 
4231 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4232 {
4233 	while ((wr_mas->mas->last > wr_mas->end_piv) &&
4234 	       (wr_mas->offset_end < wr_mas->node_end))
4235 		wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4236 
4237 	if (wr_mas->mas->last > wr_mas->end_piv)
4238 		wr_mas->end_piv = wr_mas->mas->max;
4239 }
4240 
4241 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4242 {
4243 	struct ma_state *mas = wr_mas->mas;
4244 
4245 	if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4246 		mas->last = wr_mas->end_piv;
4247 
4248 	/* Check next slot(s) if we are overwriting the end */
4249 	if ((mas->last == wr_mas->end_piv) &&
4250 	    (wr_mas->node_end != wr_mas->offset_end) &&
4251 	    !wr_mas->slots[wr_mas->offset_end + 1]) {
4252 		wr_mas->offset_end++;
4253 		if (wr_mas->offset_end == wr_mas->node_end)
4254 			mas->last = mas->max;
4255 		else
4256 			mas->last = wr_mas->pivots[wr_mas->offset_end];
4257 		wr_mas->end_piv = mas->last;
4258 	}
4259 
4260 	if (!wr_mas->content) {
4261 		/* If this one is null, the next and prev are not */
4262 		mas->index = wr_mas->r_min;
4263 	} else {
4264 		/* Check prev slot if we are overwriting the start */
4265 		if (mas->index == wr_mas->r_min && mas->offset &&
4266 		    !wr_mas->slots[mas->offset - 1]) {
4267 			mas->offset--;
4268 			wr_mas->r_min = mas->index =
4269 				mas_safe_min(mas, wr_mas->pivots, mas->offset);
4270 			wr_mas->r_max = wr_mas->pivots[mas->offset];
4271 		}
4272 	}
4273 }
4274 
4275 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4276 {
4277 	unsigned char end = wr_mas->node_end;
4278 	unsigned char new_end = end + 1;
4279 	struct ma_state *mas = wr_mas->mas;
4280 	unsigned char node_pivots = mt_pivots[wr_mas->type];
4281 
4282 	if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4283 		if (new_end < node_pivots)
4284 			wr_mas->pivots[new_end] = wr_mas->pivots[end];
4285 
4286 		if (new_end < node_pivots)
4287 			ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4288 
4289 		rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4290 		mas->offset = new_end;
4291 		wr_mas->pivots[end] = mas->index - 1;
4292 
4293 		return true;
4294 	}
4295 
4296 	if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4297 		if (new_end < node_pivots)
4298 			wr_mas->pivots[new_end] = wr_mas->pivots[end];
4299 
4300 		rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4301 		if (new_end < node_pivots)
4302 			ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4303 
4304 		wr_mas->pivots[end] = mas->last;
4305 		rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4306 		return true;
4307 	}
4308 
4309 	return false;
4310 }
4311 
4312 /*
4313  * mas_wr_bnode() - Slow path for a modification.
4314  * @wr_mas: The write maple state
4315  *
4316  * This is where split, rebalance end up.
4317  */
4318 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4319 {
4320 	struct maple_big_node b_node;
4321 
4322 	trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4323 	memset(&b_node, 0, sizeof(struct maple_big_node));
4324 	mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4325 	mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4326 }
4327 
4328 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4329 {
4330 	unsigned char node_slots;
4331 	unsigned char node_size;
4332 	struct ma_state *mas = wr_mas->mas;
4333 
4334 	/* Direct replacement */
4335 	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4336 		rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4337 		if (!!wr_mas->entry ^ !!wr_mas->content)
4338 			mas_update_gap(mas);
4339 		return;
4340 	}
4341 
4342 	/* Attempt to append */
4343 	node_slots = mt_slots[wr_mas->type];
4344 	node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4345 	if (mas->max == ULONG_MAX)
4346 		node_size++;
4347 
4348 	/* slot and node store will not fit, go to the slow path */
4349 	if (unlikely(node_size >= node_slots))
4350 		goto slow_path;
4351 
4352 	if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4353 	    (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4354 		if (!wr_mas->content || !wr_mas->entry)
4355 			mas_update_gap(mas);
4356 		return;
4357 	}
4358 
4359 	if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4360 		return;
4361 	else if (mas_wr_node_store(wr_mas))
4362 		return;
4363 
4364 	if (mas_is_err(mas))
4365 		return;
4366 
4367 slow_path:
4368 	mas_wr_bnode(wr_mas);
4369 }
4370 
4371 /*
4372  * mas_wr_store_entry() - Internal call to store a value
4373  * @mas: The maple state
4374  * @entry: The entry to store.
4375  *
4376  * Return: The contents that was stored at the index.
4377  */
4378 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4379 {
4380 	struct ma_state *mas = wr_mas->mas;
4381 
4382 	wr_mas->content = mas_start(mas);
4383 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4384 		mas_store_root(mas, wr_mas->entry);
4385 		return wr_mas->content;
4386 	}
4387 
4388 	if (unlikely(!mas_wr_walk(wr_mas))) {
4389 		mas_wr_spanning_store(wr_mas);
4390 		return wr_mas->content;
4391 	}
4392 
4393 	/* At this point, we are at the leaf node that needs to be altered. */
4394 	wr_mas->end_piv = wr_mas->r_max;
4395 	mas_wr_end_piv(wr_mas);
4396 
4397 	if (!wr_mas->entry)
4398 		mas_wr_extend_null(wr_mas);
4399 
4400 	/* New root for a single pointer */
4401 	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4402 		mas_new_root(mas, wr_mas->entry);
4403 		return wr_mas->content;
4404 	}
4405 
4406 	mas_wr_modify(wr_mas);
4407 	return wr_mas->content;
4408 }
4409 
4410 /**
4411  * mas_insert() - Internal call to insert a value
4412  * @mas: The maple state
4413  * @entry: The entry to store
4414  *
4415  * Return: %NULL or the contents that already exists at the requested index
4416  * otherwise.  The maple state needs to be checked for error conditions.
4417  */
4418 static inline void *mas_insert(struct ma_state *mas, void *entry)
4419 {
4420 	MA_WR_STATE(wr_mas, mas, entry);
4421 
4422 	/*
4423 	 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4424 	 * tree.  If the insert fits exactly into an existing gap with a value
4425 	 * of NULL, then the slot only needs to be written with the new value.
4426 	 * If the range being inserted is adjacent to another range, then only a
4427 	 * single pivot needs to be inserted (as well as writing the entry).  If
4428 	 * the new range is within a gap but does not touch any other ranges,
4429 	 * then two pivots need to be inserted: the start - 1, and the end.  As
4430 	 * usual, the entry must be written.  Most operations require a new node
4431 	 * to be allocated and replace an existing node to ensure RCU safety,
4432 	 * when in RCU mode.  The exception to requiring a newly allocated node
4433 	 * is when inserting at the end of a node (appending).  When done
4434 	 * carefully, appending can reuse the node in place.
4435 	 */
4436 	wr_mas.content = mas_start(mas);
4437 	if (wr_mas.content)
4438 		goto exists;
4439 
4440 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4441 		mas_store_root(mas, entry);
4442 		return NULL;
4443 	}
4444 
4445 	/* spanning writes always overwrite something */
4446 	if (!mas_wr_walk(&wr_mas))
4447 		goto exists;
4448 
4449 	/* At this point, we are at the leaf node that needs to be altered. */
4450 	wr_mas.offset_end = mas->offset;
4451 	wr_mas.end_piv = wr_mas.r_max;
4452 
4453 	if (wr_mas.content || (mas->last > wr_mas.r_max))
4454 		goto exists;
4455 
4456 	if (!entry)
4457 		return NULL;
4458 
4459 	mas_wr_modify(&wr_mas);
4460 	return wr_mas.content;
4461 
4462 exists:
4463 	mas_set_err(mas, -EEXIST);
4464 	return wr_mas.content;
4465 
4466 }
4467 
4468 /*
4469  * mas_prev_node() - Find the prev non-null entry at the same level in the
4470  * tree.  The prev value will be mas->node[mas->offset] or MAS_NONE.
4471  * @mas: The maple state
4472  * @min: The lower limit to search
4473  *
4474  * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4475  * Return: 1 if the node is dead, 0 otherwise.
4476  */
4477 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4478 {
4479 	enum maple_type mt;
4480 	int offset, level;
4481 	void __rcu **slots;
4482 	struct maple_node *node;
4483 	struct maple_enode *enode;
4484 	unsigned long *pivots;
4485 
4486 	if (mas_is_none(mas))
4487 		return 0;
4488 
4489 	level = 0;
4490 	do {
4491 		node = mas_mn(mas);
4492 		if (ma_is_root(node))
4493 			goto no_entry;
4494 
4495 		/* Walk up. */
4496 		if (unlikely(mas_ascend(mas)))
4497 			return 1;
4498 		offset = mas->offset;
4499 		level++;
4500 	} while (!offset);
4501 
4502 	offset--;
4503 	mt = mte_node_type(mas->node);
4504 	node = mas_mn(mas);
4505 	slots = ma_slots(node, mt);
4506 	pivots = ma_pivots(node, mt);
4507 	mas->max = pivots[offset];
4508 	if (offset)
4509 		mas->min = pivots[offset - 1] + 1;
4510 	if (unlikely(ma_dead_node(node)))
4511 		return 1;
4512 
4513 	if (mas->max < min)
4514 		goto no_entry_min;
4515 
4516 	while (level > 1) {
4517 		level--;
4518 		enode = mas_slot(mas, slots, offset);
4519 		if (unlikely(ma_dead_node(node)))
4520 			return 1;
4521 
4522 		mas->node = enode;
4523 		mt = mte_node_type(mas->node);
4524 		node = mas_mn(mas);
4525 		slots = ma_slots(node, mt);
4526 		pivots = ma_pivots(node, mt);
4527 		offset = ma_data_end(node, mt, pivots, mas->max);
4528 		if (offset)
4529 			mas->min = pivots[offset - 1] + 1;
4530 
4531 		if (offset < mt_pivots[mt])
4532 			mas->max = pivots[offset];
4533 
4534 		if (mas->max < min)
4535 			goto no_entry;
4536 	}
4537 
4538 	mas->node = mas_slot(mas, slots, offset);
4539 	if (unlikely(ma_dead_node(node)))
4540 		return 1;
4541 
4542 	mas->offset = mas_data_end(mas);
4543 	if (unlikely(mte_dead_node(mas->node)))
4544 		return 1;
4545 
4546 	return 0;
4547 
4548 no_entry_min:
4549 	mas->offset = offset;
4550 	if (offset)
4551 		mas->min = pivots[offset - 1] + 1;
4552 no_entry:
4553 	if (unlikely(ma_dead_node(node)))
4554 		return 1;
4555 
4556 	mas->node = MAS_NONE;
4557 	return 0;
4558 }
4559 
4560 /*
4561  * mas_next_node() - Get the next node at the same level in the tree.
4562  * @mas: The maple state
4563  * @max: The maximum pivot value to check.
4564  *
4565  * The next value will be mas->node[mas->offset] or MAS_NONE.
4566  * Return: 1 on dead node, 0 otherwise.
4567  */
4568 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4569 				unsigned long max)
4570 {
4571 	unsigned long min, pivot;
4572 	unsigned long *pivots;
4573 	struct maple_enode *enode;
4574 	int level = 0;
4575 	unsigned char offset;
4576 	enum maple_type mt;
4577 	void __rcu **slots;
4578 
4579 	if (mas->max >= max)
4580 		goto no_entry;
4581 
4582 	level = 0;
4583 	do {
4584 		if (ma_is_root(node))
4585 			goto no_entry;
4586 
4587 		min = mas->max + 1;
4588 		if (min > max)
4589 			goto no_entry;
4590 
4591 		if (unlikely(mas_ascend(mas)))
4592 			return 1;
4593 
4594 		offset = mas->offset;
4595 		level++;
4596 		node = mas_mn(mas);
4597 		mt = mte_node_type(mas->node);
4598 		pivots = ma_pivots(node, mt);
4599 	} while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max)));
4600 
4601 	slots = ma_slots(node, mt);
4602 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4603 	while (unlikely(level > 1)) {
4604 		/* Descend, if necessary */
4605 		enode = mas_slot(mas, slots, offset);
4606 		if (unlikely(ma_dead_node(node)))
4607 			return 1;
4608 
4609 		mas->node = enode;
4610 		level--;
4611 		node = mas_mn(mas);
4612 		mt = mte_node_type(mas->node);
4613 		slots = ma_slots(node, mt);
4614 		pivots = ma_pivots(node, mt);
4615 		offset = 0;
4616 		pivot = pivots[0];
4617 	}
4618 
4619 	enode = mas_slot(mas, slots, offset);
4620 	if (unlikely(ma_dead_node(node)))
4621 		return 1;
4622 
4623 	mas->node = enode;
4624 	mas->min = min;
4625 	mas->max = pivot;
4626 	return 0;
4627 
4628 no_entry:
4629 	if (unlikely(ma_dead_node(node)))
4630 		return 1;
4631 
4632 	mas->node = MAS_NONE;
4633 	return 0;
4634 }
4635 
4636 /*
4637  * mas_next_nentry() - Get the next node entry
4638  * @mas: The maple state
4639  * @max: The maximum value to check
4640  * @*range_start: Pointer to store the start of the range.
4641  *
4642  * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4643  * pivot of the entry.
4644  *
4645  * Return: The next entry, %NULL otherwise
4646  */
4647 static inline void *mas_next_nentry(struct ma_state *mas,
4648 	    struct maple_node *node, unsigned long max, enum maple_type type)
4649 {
4650 	unsigned char count;
4651 	unsigned long pivot;
4652 	unsigned long *pivots;
4653 	void __rcu **slots;
4654 	void *entry;
4655 
4656 	if (mas->last == mas->max) {
4657 		mas->index = mas->max;
4658 		return NULL;
4659 	}
4660 
4661 	pivots = ma_pivots(node, type);
4662 	slots = ma_slots(node, type);
4663 	mas->index = mas_safe_min(mas, pivots, mas->offset);
4664 	if (ma_dead_node(node))
4665 		return NULL;
4666 
4667 	if (mas->index > max)
4668 		return NULL;
4669 
4670 	count = ma_data_end(node, type, pivots, mas->max);
4671 	if (mas->offset > count)
4672 		return NULL;
4673 
4674 	while (mas->offset < count) {
4675 		pivot = pivots[mas->offset];
4676 		entry = mas_slot(mas, slots, mas->offset);
4677 		if (ma_dead_node(node))
4678 			return NULL;
4679 
4680 		if (entry)
4681 			goto found;
4682 
4683 		if (pivot >= max)
4684 			return NULL;
4685 
4686 		mas->index = pivot + 1;
4687 		mas->offset++;
4688 	}
4689 
4690 	if (mas->index > mas->max) {
4691 		mas->index = mas->last;
4692 		return NULL;
4693 	}
4694 
4695 	pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4696 	entry = mas_slot(mas, slots, mas->offset);
4697 	if (ma_dead_node(node))
4698 		return NULL;
4699 
4700 	if (!pivot)
4701 		return NULL;
4702 
4703 	if (!entry)
4704 		return NULL;
4705 
4706 found:
4707 	mas->last = pivot;
4708 	return entry;
4709 }
4710 
4711 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4712 {
4713 
4714 retry:
4715 	mas_set(mas, index);
4716 	mas_state_walk(mas);
4717 	if (mas_is_start(mas))
4718 		goto retry;
4719 
4720 	return;
4721 
4722 }
4723 
4724 /*
4725  * mas_next_entry() - Internal function to get the next entry.
4726  * @mas: The maple state
4727  * @limit: The maximum range start.
4728  *
4729  * Set the @mas->node to the next entry and the range_start to
4730  * the beginning value for the entry.  Does not check beyond @limit.
4731  * Sets @mas->index and @mas->last to the limit if it is hit.
4732  * Restarts on dead nodes.
4733  *
4734  * Return: the next entry or %NULL.
4735  */
4736 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4737 {
4738 	void *entry = NULL;
4739 	struct maple_enode *prev_node;
4740 	struct maple_node *node;
4741 	unsigned char offset;
4742 	unsigned long last;
4743 	enum maple_type mt;
4744 
4745 	last = mas->last;
4746 retry:
4747 	offset = mas->offset;
4748 	prev_node = mas->node;
4749 	node = mas_mn(mas);
4750 	mt = mte_node_type(mas->node);
4751 	mas->offset++;
4752 	if (unlikely(mas->offset >= mt_slots[mt])) {
4753 		mas->offset = mt_slots[mt] - 1;
4754 		goto next_node;
4755 	}
4756 
4757 	while (!mas_is_none(mas)) {
4758 		entry = mas_next_nentry(mas, node, limit, mt);
4759 		if (unlikely(ma_dead_node(node))) {
4760 			mas_rewalk(mas, last);
4761 			goto retry;
4762 		}
4763 
4764 		if (likely(entry))
4765 			return entry;
4766 
4767 		if (unlikely((mas->index > limit)))
4768 			break;
4769 
4770 next_node:
4771 		prev_node = mas->node;
4772 		offset = mas->offset;
4773 		if (unlikely(mas_next_node(mas, node, limit))) {
4774 			mas_rewalk(mas, last);
4775 			goto retry;
4776 		}
4777 		mas->offset = 0;
4778 		node = mas_mn(mas);
4779 		mt = mte_node_type(mas->node);
4780 	}
4781 
4782 	mas->index = mas->last = limit;
4783 	mas->offset = offset;
4784 	mas->node = prev_node;
4785 	return NULL;
4786 }
4787 
4788 /*
4789  * mas_prev_nentry() - Get the previous node entry.
4790  * @mas: The maple state.
4791  * @limit: The lower limit to check for a value.
4792  *
4793  * Return: the entry, %NULL otherwise.
4794  */
4795 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4796 				    unsigned long index)
4797 {
4798 	unsigned long pivot, min;
4799 	unsigned char offset;
4800 	struct maple_node *mn;
4801 	enum maple_type mt;
4802 	unsigned long *pivots;
4803 	void __rcu **slots;
4804 	void *entry;
4805 
4806 retry:
4807 	if (!mas->offset)
4808 		return NULL;
4809 
4810 	mn = mas_mn(mas);
4811 	mt = mte_node_type(mas->node);
4812 	offset = mas->offset - 1;
4813 	if (offset >= mt_slots[mt])
4814 		offset = mt_slots[mt] - 1;
4815 
4816 	slots = ma_slots(mn, mt);
4817 	pivots = ma_pivots(mn, mt);
4818 	if (offset == mt_pivots[mt])
4819 		pivot = mas->max;
4820 	else
4821 		pivot = pivots[offset];
4822 
4823 	if (unlikely(ma_dead_node(mn))) {
4824 		mas_rewalk(mas, index);
4825 		goto retry;
4826 	}
4827 
4828 	while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4829 	       !pivot))
4830 		pivot = pivots[--offset];
4831 
4832 	min = mas_safe_min(mas, pivots, offset);
4833 	entry = mas_slot(mas, slots, offset);
4834 	if (unlikely(ma_dead_node(mn))) {
4835 		mas_rewalk(mas, index);
4836 		goto retry;
4837 	}
4838 
4839 	if (likely(entry)) {
4840 		mas->offset = offset;
4841 		mas->last = pivot;
4842 		mas->index = min;
4843 	}
4844 	return entry;
4845 }
4846 
4847 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4848 {
4849 	void *entry;
4850 
4851 retry:
4852 	while (likely(!mas_is_none(mas))) {
4853 		entry = mas_prev_nentry(mas, min, mas->index);
4854 		if (unlikely(mas->last < min))
4855 			goto not_found;
4856 
4857 		if (likely(entry))
4858 			return entry;
4859 
4860 		if (unlikely(mas_prev_node(mas, min))) {
4861 			mas_rewalk(mas, mas->index);
4862 			goto retry;
4863 		}
4864 
4865 		mas->offset++;
4866 	}
4867 
4868 	mas->offset--;
4869 not_found:
4870 	mas->index = mas->last = min;
4871 	return NULL;
4872 }
4873 
4874 /*
4875  * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4876  * highest gap address of a given size in a given node and descend.
4877  * @mas: The maple state
4878  * @size: The needed size.
4879  *
4880  * Return: True if found in a leaf, false otherwise.
4881  *
4882  */
4883 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
4884 {
4885 	enum maple_type type = mte_node_type(mas->node);
4886 	struct maple_node *node = mas_mn(mas);
4887 	unsigned long *pivots, *gaps;
4888 	void __rcu **slots;
4889 	unsigned long gap = 0;
4890 	unsigned long max, min, index;
4891 	unsigned char offset;
4892 
4893 	if (unlikely(mas_is_err(mas)))
4894 		return true;
4895 
4896 	if (ma_is_dense(type)) {
4897 		/* dense nodes. */
4898 		mas->offset = (unsigned char)(mas->index - mas->min);
4899 		return true;
4900 	}
4901 
4902 	pivots = ma_pivots(node, type);
4903 	slots = ma_slots(node, type);
4904 	gaps = ma_gaps(node, type);
4905 	offset = mas->offset;
4906 	min = mas_safe_min(mas, pivots, offset);
4907 	/* Skip out of bounds. */
4908 	while (mas->last < min)
4909 		min = mas_safe_min(mas, pivots, --offset);
4910 
4911 	max = mas_safe_pivot(mas, pivots, offset, type);
4912 	index = mas->index;
4913 	while (index <= max) {
4914 		gap = 0;
4915 		if (gaps)
4916 			gap = gaps[offset];
4917 		else if (!mas_slot(mas, slots, offset))
4918 			gap = max - min + 1;
4919 
4920 		if (gap) {
4921 			if ((size <= gap) && (size <= mas->last - min + 1))
4922 				break;
4923 
4924 			if (!gaps) {
4925 				/* Skip the next slot, it cannot be a gap. */
4926 				if (offset < 2)
4927 					goto ascend;
4928 
4929 				offset -= 2;
4930 				max = pivots[offset];
4931 				min = mas_safe_min(mas, pivots, offset);
4932 				continue;
4933 			}
4934 		}
4935 
4936 		if (!offset)
4937 			goto ascend;
4938 
4939 		offset--;
4940 		max = min - 1;
4941 		min = mas_safe_min(mas, pivots, offset);
4942 	}
4943 
4944 	if (unlikely(index > max)) {
4945 		mas_set_err(mas, -EBUSY);
4946 		return false;
4947 	}
4948 
4949 	if (unlikely(ma_is_leaf(type))) {
4950 		mas->offset = offset;
4951 		mas->min = min;
4952 		mas->max = min + gap - 1;
4953 		return true;
4954 	}
4955 
4956 	/* descend, only happens under lock. */
4957 	mas->node = mas_slot(mas, slots, offset);
4958 	mas->min = min;
4959 	mas->max = max;
4960 	mas->offset = mas_data_end(mas);
4961 	return false;
4962 
4963 ascend:
4964 	if (mte_is_root(mas->node))
4965 		mas_set_err(mas, -EBUSY);
4966 
4967 	return false;
4968 }
4969 
4970 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4971 {
4972 	enum maple_type type = mte_node_type(mas->node);
4973 	unsigned long pivot, min, gap = 0;
4974 	unsigned char offset;
4975 	unsigned long *gaps;
4976 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
4977 	void __rcu **slots = ma_slots(mas_mn(mas), type);
4978 	bool found = false;
4979 
4980 	if (ma_is_dense(type)) {
4981 		mas->offset = (unsigned char)(mas->index - mas->min);
4982 		return true;
4983 	}
4984 
4985 	gaps = ma_gaps(mte_to_node(mas->node), type);
4986 	offset = mas->offset;
4987 	min = mas_safe_min(mas, pivots, offset);
4988 	for (; offset < mt_slots[type]; offset++) {
4989 		pivot = mas_safe_pivot(mas, pivots, offset, type);
4990 		if (offset && !pivot)
4991 			break;
4992 
4993 		/* Not within lower bounds */
4994 		if (mas->index > pivot)
4995 			goto next_slot;
4996 
4997 		if (gaps)
4998 			gap = gaps[offset];
4999 		else if (!mas_slot(mas, slots, offset))
5000 			gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5001 		else
5002 			goto next_slot;
5003 
5004 		if (gap >= size) {
5005 			if (ma_is_leaf(type)) {
5006 				found = true;
5007 				goto done;
5008 			}
5009 			if (mas->index <= pivot) {
5010 				mas->node = mas_slot(mas, slots, offset);
5011 				mas->min = min;
5012 				mas->max = pivot;
5013 				offset = 0;
5014 				break;
5015 			}
5016 		}
5017 next_slot:
5018 		min = pivot + 1;
5019 		if (mas->last <= pivot) {
5020 			mas_set_err(mas, -EBUSY);
5021 			return true;
5022 		}
5023 	}
5024 
5025 	if (mte_is_root(mas->node))
5026 		found = true;
5027 done:
5028 	mas->offset = offset;
5029 	return found;
5030 }
5031 
5032 /**
5033  * mas_walk() - Search for @mas->index in the tree.
5034  * @mas: The maple state.
5035  *
5036  * mas->index and mas->last will be set to the range if there is a value.  If
5037  * mas->node is MAS_NONE, reset to MAS_START.
5038  *
5039  * Return: the entry at the location or %NULL.
5040  */
5041 void *mas_walk(struct ma_state *mas)
5042 {
5043 	void *entry;
5044 
5045 retry:
5046 	entry = mas_state_walk(mas);
5047 	if (mas_is_start(mas))
5048 		goto retry;
5049 
5050 	if (mas_is_ptr(mas)) {
5051 		if (!mas->index) {
5052 			mas->last = 0;
5053 		} else {
5054 			mas->index = 1;
5055 			mas->last = ULONG_MAX;
5056 		}
5057 		return entry;
5058 	}
5059 
5060 	if (mas_is_none(mas)) {
5061 		mas->index = 0;
5062 		mas->last = ULONG_MAX;
5063 	}
5064 
5065 	return entry;
5066 }
5067 EXPORT_SYMBOL_GPL(mas_walk);
5068 
5069 static inline bool mas_rewind_node(struct ma_state *mas)
5070 {
5071 	unsigned char slot;
5072 
5073 	do {
5074 		if (mte_is_root(mas->node)) {
5075 			slot = mas->offset;
5076 			if (!slot)
5077 				return false;
5078 		} else {
5079 			mas_ascend(mas);
5080 			slot = mas->offset;
5081 		}
5082 	} while (!slot);
5083 
5084 	mas->offset = --slot;
5085 	return true;
5086 }
5087 
5088 /*
5089  * mas_skip_node() - Internal function.  Skip over a node.
5090  * @mas: The maple state.
5091  *
5092  * Return: true if there is another node, false otherwise.
5093  */
5094 static inline bool mas_skip_node(struct ma_state *mas)
5095 {
5096 	unsigned char slot, slot_count;
5097 	unsigned long *pivots;
5098 	enum maple_type mt;
5099 
5100 	mt = mte_node_type(mas->node);
5101 	slot_count = mt_slots[mt] - 1;
5102 	do {
5103 		if (mte_is_root(mas->node)) {
5104 			slot = mas->offset;
5105 			if (slot > slot_count) {
5106 				mas_set_err(mas, -EBUSY);
5107 				return false;
5108 			}
5109 		} else {
5110 			mas_ascend(mas);
5111 			slot = mas->offset;
5112 			mt = mte_node_type(mas->node);
5113 			slot_count = mt_slots[mt] - 1;
5114 		}
5115 	} while (slot > slot_count);
5116 
5117 	mas->offset = ++slot;
5118 	pivots = ma_pivots(mas_mn(mas), mt);
5119 	if (slot > 0)
5120 		mas->min = pivots[slot - 1] + 1;
5121 
5122 	if (slot <= slot_count)
5123 		mas->max = pivots[slot];
5124 
5125 	return true;
5126 }
5127 
5128 /*
5129  * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
5130  * @size
5131  * @mas: The maple state
5132  * @size: The size of the gap required
5133  *
5134  * Search between @mas->index and @mas->last for a gap of @size.
5135  */
5136 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5137 {
5138 	struct maple_enode *last = NULL;
5139 
5140 	/*
5141 	 * There are 4 options:
5142 	 * go to child (descend)
5143 	 * go back to parent (ascend)
5144 	 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5145 	 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5146 	 */
5147 	while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5148 		if (last == mas->node)
5149 			mas_skip_node(mas);
5150 		else
5151 			last = mas->node;
5152 	}
5153 }
5154 
5155 /*
5156  * mas_fill_gap() - Fill a located gap with @entry.
5157  * @mas: The maple state
5158  * @entry: The value to store
5159  * @slot: The offset into the node to store the @entry
5160  * @size: The size of the entry
5161  * @index: The start location
5162  */
5163 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5164 		unsigned char slot, unsigned long size, unsigned long *index)
5165 {
5166 	MA_WR_STATE(wr_mas, mas, entry);
5167 	unsigned char pslot = mte_parent_slot(mas->node);
5168 	struct maple_enode *mn = mas->node;
5169 	unsigned long *pivots;
5170 	enum maple_type ptype;
5171 	/*
5172 	 * mas->index is the start address for the search
5173 	 *  which may no longer be needed.
5174 	 * mas->last is the end address for the search
5175 	 */
5176 
5177 	*index = mas->index;
5178 	mas->last = mas->index + size - 1;
5179 
5180 	/*
5181 	 * It is possible that using mas->max and mas->min to correctly
5182 	 * calculate the index and last will cause an issue in the gap
5183 	 * calculation, so fix the ma_state here
5184 	 */
5185 	mas_ascend(mas);
5186 	ptype = mte_node_type(mas->node);
5187 	pivots = ma_pivots(mas_mn(mas), ptype);
5188 	mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5189 	mas->min = mas_safe_min(mas, pivots, pslot);
5190 	mas->node = mn;
5191 	mas->offset = slot;
5192 	mas_wr_store_entry(&wr_mas);
5193 }
5194 
5195 /*
5196  * mas_sparse_area() - Internal function.  Return upper or lower limit when
5197  * searching for a gap in an empty tree.
5198  * @mas: The maple state
5199  * @min: the minimum range
5200  * @max: The maximum range
5201  * @size: The size of the gap
5202  * @fwd: Searching forward or back
5203  */
5204 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5205 				unsigned long max, unsigned long size, bool fwd)
5206 {
5207 	unsigned long start = 0;
5208 
5209 	if (!unlikely(mas_is_none(mas)))
5210 		start++;
5211 	/* mas_is_ptr */
5212 
5213 	if (start < min)
5214 		start = min;
5215 
5216 	if (fwd) {
5217 		mas->index = start;
5218 		mas->last = start + size - 1;
5219 		return;
5220 	}
5221 
5222 	mas->index = max;
5223 }
5224 
5225 /*
5226  * mas_empty_area() - Get the lowest address within the range that is
5227  * sufficient for the size requested.
5228  * @mas: The maple state
5229  * @min: The lowest value of the range
5230  * @max: The highest value of the range
5231  * @size: The size needed
5232  */
5233 int mas_empty_area(struct ma_state *mas, unsigned long min,
5234 		unsigned long max, unsigned long size)
5235 {
5236 	unsigned char offset;
5237 	unsigned long *pivots;
5238 	enum maple_type mt;
5239 
5240 	if (mas_is_start(mas))
5241 		mas_start(mas);
5242 	else if (mas->offset >= 2)
5243 		mas->offset -= 2;
5244 	else if (!mas_skip_node(mas))
5245 		return -EBUSY;
5246 
5247 	/* Empty set */
5248 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5249 		mas_sparse_area(mas, min, max, size, true);
5250 		return 0;
5251 	}
5252 
5253 	/* The start of the window can only be within these values */
5254 	mas->index = min;
5255 	mas->last = max;
5256 	mas_awalk(mas, size);
5257 
5258 	if (unlikely(mas_is_err(mas)))
5259 		return xa_err(mas->node);
5260 
5261 	offset = mas->offset;
5262 	if (unlikely(offset == MAPLE_NODE_SLOTS))
5263 		return -EBUSY;
5264 
5265 	mt = mte_node_type(mas->node);
5266 	pivots = ma_pivots(mas_mn(mas), mt);
5267 	if (offset)
5268 		mas->min = pivots[offset - 1] + 1;
5269 
5270 	if (offset < mt_pivots[mt])
5271 		mas->max = pivots[offset];
5272 
5273 	if (mas->index < mas->min)
5274 		mas->index = mas->min;
5275 
5276 	mas->last = mas->index + size - 1;
5277 	return 0;
5278 }
5279 EXPORT_SYMBOL_GPL(mas_empty_area);
5280 
5281 /*
5282  * mas_empty_area_rev() - Get the highest address within the range that is
5283  * sufficient for the size requested.
5284  * @mas: The maple state
5285  * @min: The lowest value of the range
5286  * @max: The highest value of the range
5287  * @size: The size needed
5288  */
5289 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5290 		unsigned long max, unsigned long size)
5291 {
5292 	struct maple_enode *last = mas->node;
5293 
5294 	if (mas_is_start(mas)) {
5295 		mas_start(mas);
5296 		mas->offset = mas_data_end(mas);
5297 	} else if (mas->offset >= 2) {
5298 		mas->offset -= 2;
5299 	} else if (!mas_rewind_node(mas)) {
5300 		return -EBUSY;
5301 	}
5302 
5303 	/* Empty set. */
5304 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5305 		mas_sparse_area(mas, min, max, size, false);
5306 		return 0;
5307 	}
5308 
5309 	/* The start of the window can only be within these values. */
5310 	mas->index = min;
5311 	mas->last = max;
5312 
5313 	while (!mas_rev_awalk(mas, size)) {
5314 		if (last == mas->node) {
5315 			if (!mas_rewind_node(mas))
5316 				return -EBUSY;
5317 		} else {
5318 			last = mas->node;
5319 		}
5320 	}
5321 
5322 	if (mas_is_err(mas))
5323 		return xa_err(mas->node);
5324 
5325 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5326 		return -EBUSY;
5327 
5328 	/*
5329 	 * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
5330 	 * the maximum is outside the window we are searching, then use the last
5331 	 * location in the search.
5332 	 * mas->max and mas->min is the range of the gap.
5333 	 * mas->index and mas->last are currently set to the search range.
5334 	 */
5335 
5336 	/* Trim the upper limit to the max. */
5337 	if (mas->max <= mas->last)
5338 		mas->last = mas->max;
5339 
5340 	mas->index = mas->last - size + 1;
5341 	return 0;
5342 }
5343 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5344 
5345 static inline int mas_alloc(struct ma_state *mas, void *entry,
5346 		unsigned long size, unsigned long *index)
5347 {
5348 	unsigned long min;
5349 
5350 	mas_start(mas);
5351 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5352 		mas_root_expand(mas, entry);
5353 		if (mas_is_err(mas))
5354 			return xa_err(mas->node);
5355 
5356 		if (!mas->index)
5357 			return mte_pivot(mas->node, 0);
5358 		return mte_pivot(mas->node, 1);
5359 	}
5360 
5361 	/* Must be walking a tree. */
5362 	mas_awalk(mas, size);
5363 	if (mas_is_err(mas))
5364 		return xa_err(mas->node);
5365 
5366 	if (mas->offset == MAPLE_NODE_SLOTS)
5367 		goto no_gap;
5368 
5369 	/*
5370 	 * At this point, mas->node points to the right node and we have an
5371 	 * offset that has a sufficient gap.
5372 	 */
5373 	min = mas->min;
5374 	if (mas->offset)
5375 		min = mte_pivot(mas->node, mas->offset - 1) + 1;
5376 
5377 	if (mas->index < min)
5378 		mas->index = min;
5379 
5380 	mas_fill_gap(mas, entry, mas->offset, size, index);
5381 	return 0;
5382 
5383 no_gap:
5384 	return -EBUSY;
5385 }
5386 
5387 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5388 				unsigned long max, void *entry,
5389 				unsigned long size, unsigned long *index)
5390 {
5391 	int ret = 0;
5392 
5393 	ret = mas_empty_area_rev(mas, min, max, size);
5394 	if (ret)
5395 		return ret;
5396 
5397 	if (mas_is_err(mas))
5398 		return xa_err(mas->node);
5399 
5400 	if (mas->offset == MAPLE_NODE_SLOTS)
5401 		goto no_gap;
5402 
5403 	mas_fill_gap(mas, entry, mas->offset, size, index);
5404 	return 0;
5405 
5406 no_gap:
5407 	return -EBUSY;
5408 }
5409 
5410 /*
5411  * mas_dead_leaves() - Mark all leaves of a node as dead.
5412  * @mas: The maple state
5413  * @slots: Pointer to the slot array
5414  *
5415  * Must hold the write lock.
5416  *
5417  * Return: The number of leaves marked as dead.
5418  */
5419 static inline
5420 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots)
5421 {
5422 	struct maple_node *node;
5423 	enum maple_type type;
5424 	void *entry;
5425 	int offset;
5426 
5427 	for (offset = 0; offset < mt_slot_count(mas->node); offset++) {
5428 		entry = mas_slot_locked(mas, slots, offset);
5429 		type = mte_node_type(entry);
5430 		node = mte_to_node(entry);
5431 		/* Use both node and type to catch LE & BE metadata */
5432 		if (!node || !type)
5433 			break;
5434 
5435 		mte_set_node_dead(entry);
5436 		smp_wmb(); /* Needed for RCU */
5437 		node->type = type;
5438 		rcu_assign_pointer(slots[offset], node);
5439 	}
5440 
5441 	return offset;
5442 }
5443 
5444 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset)
5445 {
5446 	struct maple_node *node, *next;
5447 	void __rcu **slots = NULL;
5448 
5449 	next = mas_mn(mas);
5450 	do {
5451 		mas->node = ma_enode_ptr(next);
5452 		node = mas_mn(mas);
5453 		slots = ma_slots(node, node->type);
5454 		next = mas_slot_locked(mas, slots, offset);
5455 		offset = 0;
5456 	} while (!ma_is_leaf(next->type));
5457 
5458 	return slots;
5459 }
5460 
5461 static void mt_free_walk(struct rcu_head *head)
5462 {
5463 	void __rcu **slots;
5464 	struct maple_node *node, *start;
5465 	struct maple_tree mt;
5466 	unsigned char offset;
5467 	enum maple_type type;
5468 	MA_STATE(mas, &mt, 0, 0);
5469 
5470 	node = container_of(head, struct maple_node, rcu);
5471 
5472 	if (ma_is_leaf(node->type))
5473 		goto free_leaf;
5474 
5475 	mt_init_flags(&mt, node->ma_flags);
5476 	mas_lock(&mas);
5477 	start = node;
5478 	mas.node = mt_mk_node(node, node->type);
5479 	slots = mas_dead_walk(&mas, 0);
5480 	node = mas_mn(&mas);
5481 	do {
5482 		mt_free_bulk(node->slot_len, slots);
5483 		offset = node->parent_slot + 1;
5484 		mas.node = node->piv_parent;
5485 		if (mas_mn(&mas) == node)
5486 			goto start_slots_free;
5487 
5488 		type = mte_node_type(mas.node);
5489 		slots = ma_slots(mte_to_node(mas.node), type);
5490 		if ((offset < mt_slots[type]) && (slots[offset]))
5491 			slots = mas_dead_walk(&mas, offset);
5492 
5493 		node = mas_mn(&mas);
5494 	} while ((node != start) || (node->slot_len < offset));
5495 
5496 	slots = ma_slots(node, node->type);
5497 	mt_free_bulk(node->slot_len, slots);
5498 
5499 start_slots_free:
5500 	mas_unlock(&mas);
5501 free_leaf:
5502 	mt_free_rcu(&node->rcu);
5503 }
5504 
5505 static inline void __rcu **mas_destroy_descend(struct ma_state *mas,
5506 			struct maple_enode *prev, unsigned char offset)
5507 {
5508 	struct maple_node *node;
5509 	struct maple_enode *next = mas->node;
5510 	void __rcu **slots = NULL;
5511 
5512 	do {
5513 		mas->node = next;
5514 		node = mas_mn(mas);
5515 		slots = ma_slots(node, mte_node_type(mas->node));
5516 		next = mas_slot_locked(mas, slots, 0);
5517 		if ((mte_dead_node(next)))
5518 			next = mas_slot_locked(mas, slots, 1);
5519 
5520 		mte_set_node_dead(mas->node);
5521 		node->type = mte_node_type(mas->node);
5522 		node->piv_parent = prev;
5523 		node->parent_slot = offset;
5524 		offset = 0;
5525 		prev = mas->node;
5526 	} while (!mte_is_leaf(next));
5527 
5528 	return slots;
5529 }
5530 
5531 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags,
5532 			    bool free)
5533 {
5534 	void __rcu **slots;
5535 	struct maple_node *node = mte_to_node(enode);
5536 	struct maple_enode *start;
5537 	struct maple_tree mt;
5538 
5539 	MA_STATE(mas, &mt, 0, 0);
5540 
5541 	if (mte_is_leaf(enode))
5542 		goto free_leaf;
5543 
5544 	mt_init_flags(&mt, ma_flags);
5545 	mas_lock(&mas);
5546 
5547 	mas.node = start = enode;
5548 	slots = mas_destroy_descend(&mas, start, 0);
5549 	node = mas_mn(&mas);
5550 	do {
5551 		enum maple_type type;
5552 		unsigned char offset;
5553 		struct maple_enode *parent, *tmp;
5554 
5555 		node->slot_len = mas_dead_leaves(&mas, slots);
5556 		if (free)
5557 			mt_free_bulk(node->slot_len, slots);
5558 		offset = node->parent_slot + 1;
5559 		mas.node = node->piv_parent;
5560 		if (mas_mn(&mas) == node)
5561 			goto start_slots_free;
5562 
5563 		type = mte_node_type(mas.node);
5564 		slots = ma_slots(mte_to_node(mas.node), type);
5565 		if (offset >= mt_slots[type])
5566 			goto next;
5567 
5568 		tmp = mas_slot_locked(&mas, slots, offset);
5569 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
5570 			parent = mas.node;
5571 			mas.node = tmp;
5572 			slots = mas_destroy_descend(&mas, parent, offset);
5573 		}
5574 next:
5575 		node = mas_mn(&mas);
5576 	} while (start != mas.node);
5577 
5578 	node = mas_mn(&mas);
5579 	node->slot_len = mas_dead_leaves(&mas, slots);
5580 	if (free)
5581 		mt_free_bulk(node->slot_len, slots);
5582 
5583 start_slots_free:
5584 	mas_unlock(&mas);
5585 
5586 free_leaf:
5587 	if (free)
5588 		mt_free_rcu(&node->rcu);
5589 }
5590 
5591 /*
5592  * mte_destroy_walk() - Free a tree or sub-tree.
5593  * @enode - the encoded maple node (maple_enode) to start
5594  * @mn - the tree to free - needed for node types.
5595  *
5596  * Must hold the write lock.
5597  */
5598 static inline void mte_destroy_walk(struct maple_enode *enode,
5599 				    struct maple_tree *mt)
5600 {
5601 	struct maple_node *node = mte_to_node(enode);
5602 
5603 	if (mt_in_rcu(mt)) {
5604 		mt_destroy_walk(enode, mt->ma_flags, false);
5605 		call_rcu(&node->rcu, mt_free_walk);
5606 	} else {
5607 		mt_destroy_walk(enode, mt->ma_flags, true);
5608 	}
5609 }
5610 
5611 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5612 {
5613 	if (!mas_is_start(wr_mas->mas)) {
5614 		if (mas_is_none(wr_mas->mas)) {
5615 			mas_reset(wr_mas->mas);
5616 		} else {
5617 			wr_mas->r_max = wr_mas->mas->max;
5618 			wr_mas->type = mte_node_type(wr_mas->mas->node);
5619 			if (mas_is_span_wr(wr_mas))
5620 				mas_reset(wr_mas->mas);
5621 		}
5622 	}
5623 
5624 }
5625 
5626 /* Interface */
5627 
5628 /**
5629  * mas_store() - Store an @entry.
5630  * @mas: The maple state.
5631  * @entry: The entry to store.
5632  *
5633  * The @mas->index and @mas->last is used to set the range for the @entry.
5634  * Note: The @mas should have pre-allocated entries to ensure there is memory to
5635  * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5636  *
5637  * Return: the first entry between mas->index and mas->last or %NULL.
5638  */
5639 void *mas_store(struct ma_state *mas, void *entry)
5640 {
5641 	MA_WR_STATE(wr_mas, mas, entry);
5642 
5643 	trace_ma_write(__func__, mas, 0, entry);
5644 #ifdef CONFIG_DEBUG_MAPLE_TREE
5645 	if (mas->index > mas->last)
5646 		pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5647 	MT_BUG_ON(mas->tree, mas->index > mas->last);
5648 	if (mas->index > mas->last) {
5649 		mas_set_err(mas, -EINVAL);
5650 		return NULL;
5651 	}
5652 
5653 #endif
5654 
5655 	/*
5656 	 * Storing is the same operation as insert with the added caveat that it
5657 	 * can overwrite entries.  Although this seems simple enough, one may
5658 	 * want to examine what happens if a single store operation was to
5659 	 * overwrite multiple entries within a self-balancing B-Tree.
5660 	 */
5661 	mas_wr_store_setup(&wr_mas);
5662 	mas_wr_store_entry(&wr_mas);
5663 	return wr_mas.content;
5664 }
5665 EXPORT_SYMBOL_GPL(mas_store);
5666 
5667 /**
5668  * mas_store_gfp() - Store a value into the tree.
5669  * @mas: The maple state
5670  * @entry: The entry to store
5671  * @gfp: The GFP_FLAGS to use for allocations if necessary.
5672  *
5673  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5674  * be allocated.
5675  */
5676 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5677 {
5678 	MA_WR_STATE(wr_mas, mas, entry);
5679 
5680 	mas_wr_store_setup(&wr_mas);
5681 	trace_ma_write(__func__, mas, 0, entry);
5682 retry:
5683 	mas_wr_store_entry(&wr_mas);
5684 	if (unlikely(mas_nomem(mas, gfp)))
5685 		goto retry;
5686 
5687 	if (unlikely(mas_is_err(mas)))
5688 		return xa_err(mas->node);
5689 
5690 	return 0;
5691 }
5692 EXPORT_SYMBOL_GPL(mas_store_gfp);
5693 
5694 /**
5695  * mas_store_prealloc() - Store a value into the tree using memory
5696  * preallocated in the maple state.
5697  * @mas: The maple state
5698  * @entry: The entry to store.
5699  */
5700 void mas_store_prealloc(struct ma_state *mas, void *entry)
5701 {
5702 	MA_WR_STATE(wr_mas, mas, entry);
5703 
5704 	mas_wr_store_setup(&wr_mas);
5705 	trace_ma_write(__func__, mas, 0, entry);
5706 	mas_wr_store_entry(&wr_mas);
5707 	BUG_ON(mas_is_err(mas));
5708 	mas_destroy(mas);
5709 }
5710 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5711 
5712 /**
5713  * mas_preallocate() - Preallocate enough nodes for a store operation
5714  * @mas: The maple state
5715  * @entry: The entry that will be stored
5716  * @gfp: The GFP_FLAGS to use for allocations.
5717  *
5718  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5719  */
5720 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5721 {
5722 	int ret;
5723 
5724 	mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5725 	mas->mas_flags |= MA_STATE_PREALLOC;
5726 	if (likely(!mas_is_err(mas)))
5727 		return 0;
5728 
5729 	mas_set_alloc_req(mas, 0);
5730 	ret = xa_err(mas->node);
5731 	mas_reset(mas);
5732 	mas_destroy(mas);
5733 	mas_reset(mas);
5734 	return ret;
5735 }
5736 
5737 /*
5738  * mas_destroy() - destroy a maple state.
5739  * @mas: The maple state
5740  *
5741  * Upon completion, check the left-most node and rebalance against the node to
5742  * the right if necessary.  Frees any allocated nodes associated with this maple
5743  * state.
5744  */
5745 void mas_destroy(struct ma_state *mas)
5746 {
5747 	struct maple_alloc *node;
5748 
5749 	/*
5750 	 * When using mas_for_each() to insert an expected number of elements,
5751 	 * it is possible that the number inserted is less than the expected
5752 	 * number.  To fix an invalid final node, a check is performed here to
5753 	 * rebalance the previous node with the final node.
5754 	 */
5755 	if (mas->mas_flags & MA_STATE_REBALANCE) {
5756 		unsigned char end;
5757 
5758 		if (mas_is_start(mas))
5759 			mas_start(mas);
5760 
5761 		mtree_range_walk(mas);
5762 		end = mas_data_end(mas) + 1;
5763 		if (end < mt_min_slot_count(mas->node) - 1)
5764 			mas_destroy_rebalance(mas, end);
5765 
5766 		mas->mas_flags &= ~MA_STATE_REBALANCE;
5767 	}
5768 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5769 
5770 	while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) {
5771 		node = mas->alloc;
5772 		mas->alloc = node->slot[0];
5773 		if (node->node_count > 0)
5774 			mt_free_bulk(node->node_count,
5775 				     (void __rcu **)&node->slot[1]);
5776 		kmem_cache_free(maple_node_cache, node);
5777 	}
5778 	mas->alloc = NULL;
5779 }
5780 EXPORT_SYMBOL_GPL(mas_destroy);
5781 
5782 /*
5783  * mas_expected_entries() - Set the expected number of entries that will be inserted.
5784  * @mas: The maple state
5785  * @nr_entries: The number of expected entries.
5786  *
5787  * This will attempt to pre-allocate enough nodes to store the expected number
5788  * of entries.  The allocations will occur using the bulk allocator interface
5789  * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5790  * to ensure any unused nodes are freed.
5791  *
5792  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5793  */
5794 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5795 {
5796 	int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5797 	struct maple_enode *enode = mas->node;
5798 	int nr_nodes;
5799 	int ret;
5800 
5801 	/*
5802 	 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5803 	 * forking a process and duplicating the VMAs from one tree to a new
5804 	 * tree.  When such a situation arises, it is known that the new tree is
5805 	 * not going to be used until the entire tree is populated.  For
5806 	 * performance reasons, it is best to use a bulk load with RCU disabled.
5807 	 * This allows for optimistic splitting that favours the left and reuse
5808 	 * of nodes during the operation.
5809 	 */
5810 
5811 	/* Optimize splitting for bulk insert in-order */
5812 	mas->mas_flags |= MA_STATE_BULK;
5813 
5814 	/*
5815 	 * Avoid overflow, assume a gap between each entry and a trailing null.
5816 	 * If this is wrong, it just means allocation can happen during
5817 	 * insertion of entries.
5818 	 */
5819 	nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5820 	if (!mt_is_alloc(mas->tree))
5821 		nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5822 
5823 	/* Leaves; reduce slots to keep space for expansion */
5824 	nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5825 	/* Internal nodes */
5826 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5827 	/* Add working room for split (2 nodes) + new parents */
5828 	mas_node_count(mas, nr_nodes + 3);
5829 
5830 	/* Detect if allocations run out */
5831 	mas->mas_flags |= MA_STATE_PREALLOC;
5832 
5833 	if (!mas_is_err(mas))
5834 		return 0;
5835 
5836 	ret = xa_err(mas->node);
5837 	mas->node = enode;
5838 	mas_destroy(mas);
5839 	return ret;
5840 
5841 }
5842 EXPORT_SYMBOL_GPL(mas_expected_entries);
5843 
5844 /**
5845  * mas_next() - Get the next entry.
5846  * @mas: The maple state
5847  * @max: The maximum index to check.
5848  *
5849  * Returns the next entry after @mas->index.
5850  * Must hold rcu_read_lock or the write lock.
5851  * Can return the zero entry.
5852  *
5853  * Return: The next entry or %NULL
5854  */
5855 void *mas_next(struct ma_state *mas, unsigned long max)
5856 {
5857 	if (mas_is_none(mas) || mas_is_paused(mas))
5858 		mas->node = MAS_START;
5859 
5860 	if (mas_is_start(mas))
5861 		mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5862 
5863 	if (mas_is_ptr(mas)) {
5864 		if (!mas->index) {
5865 			mas->index = 1;
5866 			mas->last = ULONG_MAX;
5867 		}
5868 		return NULL;
5869 	}
5870 
5871 	if (mas->last == ULONG_MAX)
5872 		return NULL;
5873 
5874 	/* Retries on dead nodes handled by mas_next_entry */
5875 	return mas_next_entry(mas, max);
5876 }
5877 EXPORT_SYMBOL_GPL(mas_next);
5878 
5879 /**
5880  * mt_next() - get the next value in the maple tree
5881  * @mt: The maple tree
5882  * @index: The start index
5883  * @max: The maximum index to check
5884  *
5885  * Return: The entry at @index or higher, or %NULL if nothing is found.
5886  */
5887 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5888 {
5889 	void *entry = NULL;
5890 	MA_STATE(mas, mt, index, index);
5891 
5892 	rcu_read_lock();
5893 	entry = mas_next(&mas, max);
5894 	rcu_read_unlock();
5895 	return entry;
5896 }
5897 EXPORT_SYMBOL_GPL(mt_next);
5898 
5899 /**
5900  * mas_prev() - Get the previous entry
5901  * @mas: The maple state
5902  * @min: The minimum value to check.
5903  *
5904  * Must hold rcu_read_lock or the write lock.
5905  * Will reset mas to MAS_START if the node is MAS_NONE.  Will stop on not
5906  * searchable nodes.
5907  *
5908  * Return: the previous value or %NULL.
5909  */
5910 void *mas_prev(struct ma_state *mas, unsigned long min)
5911 {
5912 	if (!mas->index) {
5913 		/* Nothing comes before 0 */
5914 		mas->last = 0;
5915 		return NULL;
5916 	}
5917 
5918 	if (unlikely(mas_is_ptr(mas)))
5919 		return NULL;
5920 
5921 	if (mas_is_none(mas) || mas_is_paused(mas))
5922 		mas->node = MAS_START;
5923 
5924 	if (mas_is_start(mas)) {
5925 		mas_walk(mas);
5926 		if (!mas->index)
5927 			return NULL;
5928 	}
5929 
5930 	if (mas_is_ptr(mas)) {
5931 		if (!mas->index) {
5932 			mas->last = 0;
5933 			return NULL;
5934 		}
5935 
5936 		mas->index = mas->last = 0;
5937 		return mas_root_locked(mas);
5938 	}
5939 	return mas_prev_entry(mas, min);
5940 }
5941 EXPORT_SYMBOL_GPL(mas_prev);
5942 
5943 /**
5944  * mt_prev() - get the previous value in the maple tree
5945  * @mt: The maple tree
5946  * @index: The start index
5947  * @min: The minimum index to check
5948  *
5949  * Return: The entry at @index or lower, or %NULL if nothing is found.
5950  */
5951 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5952 {
5953 	void *entry = NULL;
5954 	MA_STATE(mas, mt, index, index);
5955 
5956 	rcu_read_lock();
5957 	entry = mas_prev(&mas, min);
5958 	rcu_read_unlock();
5959 	return entry;
5960 }
5961 EXPORT_SYMBOL_GPL(mt_prev);
5962 
5963 /**
5964  * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5965  * @mas: The maple state to pause
5966  *
5967  * Some users need to pause a walk and drop the lock they're holding in
5968  * order to yield to a higher priority thread or carry out an operation
5969  * on an entry.  Those users should call this function before they drop
5970  * the lock.  It resets the @mas to be suitable for the next iteration
5971  * of the loop after the user has reacquired the lock.  If most entries
5972  * found during a walk require you to call mas_pause(), the mt_for_each()
5973  * iterator may be more appropriate.
5974  *
5975  */
5976 void mas_pause(struct ma_state *mas)
5977 {
5978 	mas->node = MAS_PAUSE;
5979 }
5980 EXPORT_SYMBOL_GPL(mas_pause);
5981 
5982 /**
5983  * mas_find() - On the first call, find the entry at or after mas->index up to
5984  * %max.  Otherwise, find the entry after mas->index.
5985  * @mas: The maple state
5986  * @max: The maximum value to check.
5987  *
5988  * Must hold rcu_read_lock or the write lock.
5989  * If an entry exists, last and index are updated accordingly.
5990  * May set @mas->node to MAS_NONE.
5991  *
5992  * Return: The entry or %NULL.
5993  */
5994 void *mas_find(struct ma_state *mas, unsigned long max)
5995 {
5996 	if (unlikely(mas_is_paused(mas))) {
5997 		if (unlikely(mas->last == ULONG_MAX)) {
5998 			mas->node = MAS_NONE;
5999 			return NULL;
6000 		}
6001 		mas->node = MAS_START;
6002 		mas->index = ++mas->last;
6003 	}
6004 
6005 	if (unlikely(mas_is_start(mas))) {
6006 		/* First run or continue */
6007 		void *entry;
6008 
6009 		if (mas->index > max)
6010 			return NULL;
6011 
6012 		entry = mas_walk(mas);
6013 		if (entry)
6014 			return entry;
6015 	}
6016 
6017 	if (unlikely(!mas_searchable(mas)))
6018 		return NULL;
6019 
6020 	/* Retries on dead nodes handled by mas_next_entry */
6021 	return mas_next_entry(mas, max);
6022 }
6023 EXPORT_SYMBOL_GPL(mas_find);
6024 
6025 /**
6026  * mas_find_rev: On the first call, find the first non-null entry at or below
6027  * mas->index down to %min.  Otherwise find the first non-null entry below
6028  * mas->index down to %min.
6029  * @mas: The maple state
6030  * @min: The minimum value to check.
6031  *
6032  * Must hold rcu_read_lock or the write lock.
6033  * If an entry exists, last and index are updated accordingly.
6034  * May set @mas->node to MAS_NONE.
6035  *
6036  * Return: The entry or %NULL.
6037  */
6038 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6039 {
6040 	if (unlikely(mas_is_paused(mas))) {
6041 		if (unlikely(mas->last == ULONG_MAX)) {
6042 			mas->node = MAS_NONE;
6043 			return NULL;
6044 		}
6045 		mas->node = MAS_START;
6046 		mas->last = --mas->index;
6047 	}
6048 
6049 	if (unlikely(mas_is_start(mas))) {
6050 		/* First run or continue */
6051 		void *entry;
6052 
6053 		if (mas->index < min)
6054 			return NULL;
6055 
6056 		entry = mas_walk(mas);
6057 		if (entry)
6058 			return entry;
6059 	}
6060 
6061 	if (unlikely(!mas_searchable(mas)))
6062 		return NULL;
6063 
6064 	if (mas->index < min)
6065 		return NULL;
6066 
6067 	/* Retries on dead nodes handled by mas_prev_entry */
6068 	return mas_prev_entry(mas, min);
6069 }
6070 EXPORT_SYMBOL_GPL(mas_find_rev);
6071 
6072 /**
6073  * mas_erase() - Find the range in which index resides and erase the entire
6074  * range.
6075  * @mas: The maple state
6076  *
6077  * Must hold the write lock.
6078  * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6079  * erases that range.
6080  *
6081  * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6082  */
6083 void *mas_erase(struct ma_state *mas)
6084 {
6085 	void *entry;
6086 	MA_WR_STATE(wr_mas, mas, NULL);
6087 
6088 	if (mas_is_none(mas) || mas_is_paused(mas))
6089 		mas->node = MAS_START;
6090 
6091 	/* Retry unnecessary when holding the write lock. */
6092 	entry = mas_state_walk(mas);
6093 	if (!entry)
6094 		return NULL;
6095 
6096 write_retry:
6097 	/* Must reset to ensure spanning writes of last slot are detected */
6098 	mas_reset(mas);
6099 	mas_wr_store_setup(&wr_mas);
6100 	mas_wr_store_entry(&wr_mas);
6101 	if (mas_nomem(mas, GFP_KERNEL))
6102 		goto write_retry;
6103 
6104 	return entry;
6105 }
6106 EXPORT_SYMBOL_GPL(mas_erase);
6107 
6108 /**
6109  * mas_nomem() - Check if there was an error allocating and do the allocation
6110  * if necessary If there are allocations, then free them.
6111  * @mas: The maple state
6112  * @gfp: The GFP_FLAGS to use for allocations
6113  * Return: true on allocation, false otherwise.
6114  */
6115 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6116 	__must_hold(mas->tree->lock)
6117 {
6118 	if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6119 		mas_destroy(mas);
6120 		return false;
6121 	}
6122 
6123 	if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6124 		mtree_unlock(mas->tree);
6125 		mas_alloc_nodes(mas, gfp);
6126 		mtree_lock(mas->tree);
6127 	} else {
6128 		mas_alloc_nodes(mas, gfp);
6129 	}
6130 
6131 	if (!mas_allocated(mas))
6132 		return false;
6133 
6134 	mas->node = MAS_START;
6135 	return true;
6136 }
6137 
6138 void __init maple_tree_init(void)
6139 {
6140 	maple_node_cache = kmem_cache_create("maple_node",
6141 			sizeof(struct maple_node), sizeof(struct maple_node),
6142 			SLAB_PANIC, NULL);
6143 }
6144 
6145 /**
6146  * mtree_load() - Load a value stored in a maple tree
6147  * @mt: The maple tree
6148  * @index: The index to load
6149  *
6150  * Return: the entry or %NULL
6151  */
6152 void *mtree_load(struct maple_tree *mt, unsigned long index)
6153 {
6154 	MA_STATE(mas, mt, index, index);
6155 	void *entry;
6156 
6157 	trace_ma_read(__func__, &mas);
6158 	rcu_read_lock();
6159 retry:
6160 	entry = mas_start(&mas);
6161 	if (unlikely(mas_is_none(&mas)))
6162 		goto unlock;
6163 
6164 	if (unlikely(mas_is_ptr(&mas))) {
6165 		if (index)
6166 			entry = NULL;
6167 
6168 		goto unlock;
6169 	}
6170 
6171 	entry = mtree_lookup_walk(&mas);
6172 	if (!entry && unlikely(mas_is_start(&mas)))
6173 		goto retry;
6174 unlock:
6175 	rcu_read_unlock();
6176 	if (xa_is_zero(entry))
6177 		return NULL;
6178 
6179 	return entry;
6180 }
6181 EXPORT_SYMBOL(mtree_load);
6182 
6183 /**
6184  * mtree_store_range() - Store an entry at a given range.
6185  * @mt: The maple tree
6186  * @index: The start of the range
6187  * @last: The end of the range
6188  * @entry: The entry to store
6189  * @gfp: The GFP_FLAGS to use for allocations
6190  *
6191  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6192  * be allocated.
6193  */
6194 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6195 		unsigned long last, void *entry, gfp_t gfp)
6196 {
6197 	MA_STATE(mas, mt, index, last);
6198 	MA_WR_STATE(wr_mas, &mas, entry);
6199 
6200 	trace_ma_write(__func__, &mas, 0, entry);
6201 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6202 		return -EINVAL;
6203 
6204 	if (index > last)
6205 		return -EINVAL;
6206 
6207 	mtree_lock(mt);
6208 retry:
6209 	mas_wr_store_entry(&wr_mas);
6210 	if (mas_nomem(&mas, gfp))
6211 		goto retry;
6212 
6213 	mtree_unlock(mt);
6214 	if (mas_is_err(&mas))
6215 		return xa_err(mas.node);
6216 
6217 	return 0;
6218 }
6219 EXPORT_SYMBOL(mtree_store_range);
6220 
6221 /**
6222  * mtree_store() - Store an entry at a given index.
6223  * @mt: The maple tree
6224  * @index: The index to store the value
6225  * @entry: The entry to store
6226  * @gfp: The GFP_FLAGS to use for allocations
6227  *
6228  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6229  * be allocated.
6230  */
6231 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6232 		 gfp_t gfp)
6233 {
6234 	return mtree_store_range(mt, index, index, entry, gfp);
6235 }
6236 EXPORT_SYMBOL(mtree_store);
6237 
6238 /**
6239  * mtree_insert_range() - Insert an entry at a give range if there is no value.
6240  * @mt: The maple tree
6241  * @first: The start of the range
6242  * @last: The end of the range
6243  * @entry: The entry to store
6244  * @gfp: The GFP_FLAGS to use for allocations.
6245  *
6246  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6247  * request, -ENOMEM if memory could not be allocated.
6248  */
6249 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6250 		unsigned long last, void *entry, gfp_t gfp)
6251 {
6252 	MA_STATE(ms, mt, first, last);
6253 
6254 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6255 		return -EINVAL;
6256 
6257 	if (first > last)
6258 		return -EINVAL;
6259 
6260 	mtree_lock(mt);
6261 retry:
6262 	mas_insert(&ms, entry);
6263 	if (mas_nomem(&ms, gfp))
6264 		goto retry;
6265 
6266 	mtree_unlock(mt);
6267 	if (mas_is_err(&ms))
6268 		return xa_err(ms.node);
6269 
6270 	return 0;
6271 }
6272 EXPORT_SYMBOL(mtree_insert_range);
6273 
6274 /**
6275  * mtree_insert() - Insert an entry at a give index if there is no value.
6276  * @mt: The maple tree
6277  * @index : The index to store the value
6278  * @entry: The entry to store
6279  * @gfp: The FGP_FLAGS to use for allocations.
6280  *
6281  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6282  * request, -ENOMEM if memory could not be allocated.
6283  */
6284 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6285 		 gfp_t gfp)
6286 {
6287 	return mtree_insert_range(mt, index, index, entry, gfp);
6288 }
6289 EXPORT_SYMBOL(mtree_insert);
6290 
6291 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6292 		void *entry, unsigned long size, unsigned long min,
6293 		unsigned long max, gfp_t gfp)
6294 {
6295 	int ret = 0;
6296 
6297 	MA_STATE(mas, mt, min, max - size);
6298 	if (!mt_is_alloc(mt))
6299 		return -EINVAL;
6300 
6301 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6302 		return -EINVAL;
6303 
6304 	if (min > max)
6305 		return -EINVAL;
6306 
6307 	if (max < size)
6308 		return -EINVAL;
6309 
6310 	if (!size)
6311 		return -EINVAL;
6312 
6313 	mtree_lock(mt);
6314 retry:
6315 	mas.offset = 0;
6316 	mas.index = min;
6317 	mas.last = max - size;
6318 	ret = mas_alloc(&mas, entry, size, startp);
6319 	if (mas_nomem(&mas, gfp))
6320 		goto retry;
6321 
6322 	mtree_unlock(mt);
6323 	return ret;
6324 }
6325 EXPORT_SYMBOL(mtree_alloc_range);
6326 
6327 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6328 		void *entry, unsigned long size, unsigned long min,
6329 		unsigned long max, gfp_t gfp)
6330 {
6331 	int ret = 0;
6332 
6333 	MA_STATE(mas, mt, min, max - size);
6334 	if (!mt_is_alloc(mt))
6335 		return -EINVAL;
6336 
6337 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6338 		return -EINVAL;
6339 
6340 	if (min >= max)
6341 		return -EINVAL;
6342 
6343 	if (max < size - 1)
6344 		return -EINVAL;
6345 
6346 	if (!size)
6347 		return -EINVAL;
6348 
6349 	mtree_lock(mt);
6350 retry:
6351 	ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6352 	if (mas_nomem(&mas, gfp))
6353 		goto retry;
6354 
6355 	mtree_unlock(mt);
6356 	return ret;
6357 }
6358 EXPORT_SYMBOL(mtree_alloc_rrange);
6359 
6360 /**
6361  * mtree_erase() - Find an index and erase the entire range.
6362  * @mt: The maple tree
6363  * @index: The index to erase
6364  *
6365  * Erasing is the same as a walk to an entry then a store of a NULL to that
6366  * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6367  *
6368  * Return: The entry stored at the @index or %NULL
6369  */
6370 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6371 {
6372 	void *entry = NULL;
6373 
6374 	MA_STATE(mas, mt, index, index);
6375 	trace_ma_op(__func__, &mas);
6376 
6377 	mtree_lock(mt);
6378 	entry = mas_erase(&mas);
6379 	mtree_unlock(mt);
6380 
6381 	return entry;
6382 }
6383 EXPORT_SYMBOL(mtree_erase);
6384 
6385 /**
6386  * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6387  * @mt: The maple tree
6388  *
6389  * Note: Does not handle locking.
6390  */
6391 void __mt_destroy(struct maple_tree *mt)
6392 {
6393 	void *root = mt_root_locked(mt);
6394 
6395 	rcu_assign_pointer(mt->ma_root, NULL);
6396 	if (xa_is_node(root))
6397 		mte_destroy_walk(root, mt);
6398 
6399 	mt->ma_flags = 0;
6400 }
6401 EXPORT_SYMBOL_GPL(__mt_destroy);
6402 
6403 /**
6404  * mtree_destroy() - Destroy a maple tree
6405  * @mt: The maple tree
6406  *
6407  * Frees all resources used by the tree.  Handles locking.
6408  */
6409 void mtree_destroy(struct maple_tree *mt)
6410 {
6411 	mtree_lock(mt);
6412 	__mt_destroy(mt);
6413 	mtree_unlock(mt);
6414 }
6415 EXPORT_SYMBOL(mtree_destroy);
6416 
6417 /**
6418  * mt_find() - Search from the start up until an entry is found.
6419  * @mt: The maple tree
6420  * @index: Pointer which contains the start location of the search
6421  * @max: The maximum value to check
6422  *
6423  * Handles locking.  @index will be incremented to one beyond the range.
6424  *
6425  * Return: The entry at or after the @index or %NULL
6426  */
6427 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6428 {
6429 	MA_STATE(mas, mt, *index, *index);
6430 	void *entry;
6431 #ifdef CONFIG_DEBUG_MAPLE_TREE
6432 	unsigned long copy = *index;
6433 #endif
6434 
6435 	trace_ma_read(__func__, &mas);
6436 
6437 	if ((*index) > max)
6438 		return NULL;
6439 
6440 	rcu_read_lock();
6441 retry:
6442 	entry = mas_state_walk(&mas);
6443 	if (mas_is_start(&mas))
6444 		goto retry;
6445 
6446 	if (unlikely(xa_is_zero(entry)))
6447 		entry = NULL;
6448 
6449 	if (entry)
6450 		goto unlock;
6451 
6452 	while (mas_searchable(&mas) && (mas.index < max)) {
6453 		entry = mas_next_entry(&mas, max);
6454 		if (likely(entry && !xa_is_zero(entry)))
6455 			break;
6456 	}
6457 
6458 	if (unlikely(xa_is_zero(entry)))
6459 		entry = NULL;
6460 unlock:
6461 	rcu_read_unlock();
6462 	if (likely(entry)) {
6463 		*index = mas.last + 1;
6464 #ifdef CONFIG_DEBUG_MAPLE_TREE
6465 		if ((*index) && (*index) <= copy)
6466 			pr_err("index not increased! %lx <= %lx\n",
6467 			       *index, copy);
6468 		MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6469 #endif
6470 	}
6471 
6472 	return entry;
6473 }
6474 EXPORT_SYMBOL(mt_find);
6475 
6476 /**
6477  * mt_find_after() - Search from the start up until an entry is found.
6478  * @mt: The maple tree
6479  * @index: Pointer which contains the start location of the search
6480  * @max: The maximum value to check
6481  *
6482  * Handles locking, detects wrapping on index == 0
6483  *
6484  * Return: The entry at or after the @index or %NULL
6485  */
6486 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6487 		    unsigned long max)
6488 {
6489 	if (!(*index))
6490 		return NULL;
6491 
6492 	return mt_find(mt, index, max);
6493 }
6494 EXPORT_SYMBOL(mt_find_after);
6495 
6496 #ifdef CONFIG_DEBUG_MAPLE_TREE
6497 atomic_t maple_tree_tests_run;
6498 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6499 atomic_t maple_tree_tests_passed;
6500 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6501 
6502 #ifndef __KERNEL__
6503 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6504 void mt_set_non_kernel(unsigned int val)
6505 {
6506 	kmem_cache_set_non_kernel(maple_node_cache, val);
6507 }
6508 
6509 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6510 unsigned long mt_get_alloc_size(void)
6511 {
6512 	return kmem_cache_get_alloc(maple_node_cache);
6513 }
6514 
6515 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6516 void mt_zero_nr_tallocated(void)
6517 {
6518 	kmem_cache_zero_nr_tallocated(maple_node_cache);
6519 }
6520 
6521 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6522 unsigned int mt_nr_tallocated(void)
6523 {
6524 	return kmem_cache_nr_tallocated(maple_node_cache);
6525 }
6526 
6527 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6528 unsigned int mt_nr_allocated(void)
6529 {
6530 	return kmem_cache_nr_allocated(maple_node_cache);
6531 }
6532 
6533 /*
6534  * mas_dead_node() - Check if the maple state is pointing to a dead node.
6535  * @mas: The maple state
6536  * @index: The index to restore in @mas.
6537  *
6538  * Used in test code.
6539  * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6540  */
6541 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6542 {
6543 	if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6544 		return 0;
6545 
6546 	if (likely(!mte_dead_node(mas->node)))
6547 		return 0;
6548 
6549 	mas_rewalk(mas, index);
6550 	return 1;
6551 }
6552 
6553 void mt_cache_shrink(void)
6554 {
6555 }
6556 #else
6557 /*
6558  * mt_cache_shrink() - For testing, don't use this.
6559  *
6560  * Certain testcases can trigger an OOM when combined with other memory
6561  * debugging configuration options.  This function is used to reduce the
6562  * possibility of an out of memory even due to kmem_cache objects remaining
6563  * around for longer than usual.
6564  */
6565 void mt_cache_shrink(void)
6566 {
6567 	kmem_cache_shrink(maple_node_cache);
6568 
6569 }
6570 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6571 
6572 #endif /* not defined __KERNEL__ */
6573 /*
6574  * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6575  * @mas: The maple state
6576  * @offset: The offset into the slot array to fetch.
6577  *
6578  * Return: The entry stored at @offset.
6579  */
6580 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6581 		unsigned char offset)
6582 {
6583 	return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6584 			offset);
6585 }
6586 
6587 
6588 /*
6589  * mas_first_entry() - Go the first leaf and find the first entry.
6590  * @mas: the maple state.
6591  * @limit: the maximum index to check.
6592  * @*r_start: Pointer to set to the range start.
6593  *
6594  * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6595  *
6596  * Return: The first entry or MAS_NONE.
6597  */
6598 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6599 		unsigned long limit, enum maple_type mt)
6600 
6601 {
6602 	unsigned long max;
6603 	unsigned long *pivots;
6604 	void __rcu **slots;
6605 	void *entry = NULL;
6606 
6607 	mas->index = mas->min;
6608 	if (mas->index > limit)
6609 		goto none;
6610 
6611 	max = mas->max;
6612 	mas->offset = 0;
6613 	while (likely(!ma_is_leaf(mt))) {
6614 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6615 		slots = ma_slots(mn, mt);
6616 		pivots = ma_pivots(mn, mt);
6617 		max = pivots[0];
6618 		entry = mas_slot(mas, slots, 0);
6619 		if (unlikely(ma_dead_node(mn)))
6620 			return NULL;
6621 		mas->node = entry;
6622 		mn = mas_mn(mas);
6623 		mt = mte_node_type(mas->node);
6624 	}
6625 	MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6626 
6627 	mas->max = max;
6628 	slots = ma_slots(mn, mt);
6629 	entry = mas_slot(mas, slots, 0);
6630 	if (unlikely(ma_dead_node(mn)))
6631 		return NULL;
6632 
6633 	/* Slot 0 or 1 must be set */
6634 	if (mas->index > limit)
6635 		goto none;
6636 
6637 	if (likely(entry))
6638 		return entry;
6639 
6640 	pivots = ma_pivots(mn, mt);
6641 	mas->index = pivots[0] + 1;
6642 	mas->offset = 1;
6643 	entry = mas_slot(mas, slots, 1);
6644 	if (unlikely(ma_dead_node(mn)))
6645 		return NULL;
6646 
6647 	if (mas->index > limit)
6648 		goto none;
6649 
6650 	if (likely(entry))
6651 		return entry;
6652 
6653 none:
6654 	if (likely(!ma_dead_node(mn)))
6655 		mas->node = MAS_NONE;
6656 	return NULL;
6657 }
6658 
6659 /* Depth first search, post-order */
6660 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6661 {
6662 
6663 	struct maple_enode *p = MAS_NONE, *mn = mas->node;
6664 	unsigned long p_min, p_max;
6665 
6666 	mas_next_node(mas, mas_mn(mas), max);
6667 	if (!mas_is_none(mas))
6668 		return;
6669 
6670 	if (mte_is_root(mn))
6671 		return;
6672 
6673 	mas->node = mn;
6674 	mas_ascend(mas);
6675 	while (mas->node != MAS_NONE) {
6676 		p = mas->node;
6677 		p_min = mas->min;
6678 		p_max = mas->max;
6679 		mas_prev_node(mas, 0);
6680 	}
6681 
6682 	if (p == MAS_NONE)
6683 		return;
6684 
6685 	mas->node = p;
6686 	mas->max = p_max;
6687 	mas->min = p_min;
6688 }
6689 
6690 /* Tree validations */
6691 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6692 		unsigned long min, unsigned long max, unsigned int depth);
6693 static void mt_dump_range(unsigned long min, unsigned long max,
6694 			  unsigned int depth)
6695 {
6696 	static const char spaces[] = "                                ";
6697 
6698 	if (min == max)
6699 		pr_info("%.*s%lu: ", depth * 2, spaces, min);
6700 	else
6701 		pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6702 }
6703 
6704 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6705 			  unsigned int depth)
6706 {
6707 	mt_dump_range(min, max, depth);
6708 
6709 	if (xa_is_value(entry))
6710 		pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6711 				xa_to_value(entry), entry);
6712 	else if (xa_is_zero(entry))
6713 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
6714 	else if (mt_is_reserved(entry))
6715 		pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6716 	else
6717 		pr_cont("%p\n", entry);
6718 }
6719 
6720 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6721 			unsigned long min, unsigned long max, unsigned int depth)
6722 {
6723 	struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6724 	bool leaf = mte_is_leaf(entry);
6725 	unsigned long first = min;
6726 	int i;
6727 
6728 	pr_cont(" contents: ");
6729 	for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6730 		pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6731 	pr_cont("%p\n", node->slot[i]);
6732 	for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6733 		unsigned long last = max;
6734 
6735 		if (i < (MAPLE_RANGE64_SLOTS - 1))
6736 			last = node->pivot[i];
6737 		else if (!node->slot[i] && max != mt_max[mte_node_type(entry)])
6738 			break;
6739 		if (last == 0 && i > 0)
6740 			break;
6741 		if (leaf)
6742 			mt_dump_entry(mt_slot(mt, node->slot, i),
6743 					first, last, depth + 1);
6744 		else if (node->slot[i])
6745 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6746 					first, last, depth + 1);
6747 
6748 		if (last == max)
6749 			break;
6750 		if (last > max) {
6751 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6752 					node, last, max, i);
6753 			break;
6754 		}
6755 		first = last + 1;
6756 	}
6757 }
6758 
6759 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6760 			unsigned long min, unsigned long max, unsigned int depth)
6761 {
6762 	struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6763 	bool leaf = mte_is_leaf(entry);
6764 	unsigned long first = min;
6765 	int i;
6766 
6767 	pr_cont(" contents: ");
6768 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6769 		pr_cont("%lu ", node->gap[i]);
6770 	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6771 	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6772 		pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6773 	pr_cont("%p\n", node->slot[i]);
6774 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6775 		unsigned long last = max;
6776 
6777 		if (i < (MAPLE_ARANGE64_SLOTS - 1))
6778 			last = node->pivot[i];
6779 		else if (!node->slot[i])
6780 			break;
6781 		if (last == 0 && i > 0)
6782 			break;
6783 		if (leaf)
6784 			mt_dump_entry(mt_slot(mt, node->slot, i),
6785 					first, last, depth + 1);
6786 		else if (node->slot[i])
6787 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6788 					first, last, depth + 1);
6789 
6790 		if (last == max)
6791 			break;
6792 		if (last > max) {
6793 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6794 					node, last, max, i);
6795 			break;
6796 		}
6797 		first = last + 1;
6798 	}
6799 }
6800 
6801 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6802 		unsigned long min, unsigned long max, unsigned int depth)
6803 {
6804 	struct maple_node *node = mte_to_node(entry);
6805 	unsigned int type = mte_node_type(entry);
6806 	unsigned int i;
6807 
6808 	mt_dump_range(min, max, depth);
6809 
6810 	pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6811 			node ? node->parent : NULL);
6812 	switch (type) {
6813 	case maple_dense:
6814 		pr_cont("\n");
6815 		for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6816 			if (min + i > max)
6817 				pr_cont("OUT OF RANGE: ");
6818 			mt_dump_entry(mt_slot(mt, node->slot, i),
6819 					min + i, min + i, depth);
6820 		}
6821 		break;
6822 	case maple_leaf_64:
6823 	case maple_range_64:
6824 		mt_dump_range64(mt, entry, min, max, depth);
6825 		break;
6826 	case maple_arange_64:
6827 		mt_dump_arange64(mt, entry, min, max, depth);
6828 		break;
6829 
6830 	default:
6831 		pr_cont(" UNKNOWN TYPE\n");
6832 	}
6833 }
6834 
6835 void mt_dump(const struct maple_tree *mt)
6836 {
6837 	void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6838 
6839 	pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6840 		 mt, mt->ma_flags, mt_height(mt), entry);
6841 	if (!xa_is_node(entry))
6842 		mt_dump_entry(entry, 0, 0, 0);
6843 	else if (entry)
6844 		mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0);
6845 }
6846 EXPORT_SYMBOL_GPL(mt_dump);
6847 
6848 /*
6849  * Calculate the maximum gap in a node and check if that's what is reported in
6850  * the parent (unless root).
6851  */
6852 static void mas_validate_gaps(struct ma_state *mas)
6853 {
6854 	struct maple_enode *mte = mas->node;
6855 	struct maple_node *p_mn;
6856 	unsigned long gap = 0, max_gap = 0;
6857 	unsigned long p_end, p_start = mas->min;
6858 	unsigned char p_slot;
6859 	unsigned long *gaps = NULL;
6860 	unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6861 	int i;
6862 
6863 	if (ma_is_dense(mte_node_type(mte))) {
6864 		for (i = 0; i < mt_slot_count(mte); i++) {
6865 			if (mas_get_slot(mas, i)) {
6866 				if (gap > max_gap)
6867 					max_gap = gap;
6868 				gap = 0;
6869 				continue;
6870 			}
6871 			gap++;
6872 		}
6873 		goto counted;
6874 	}
6875 
6876 	gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6877 	for (i = 0; i < mt_slot_count(mte); i++) {
6878 		p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6879 
6880 		if (!gaps) {
6881 			if (mas_get_slot(mas, i)) {
6882 				gap = 0;
6883 				goto not_empty;
6884 			}
6885 
6886 			gap += p_end - p_start + 1;
6887 		} else {
6888 			void *entry = mas_get_slot(mas, i);
6889 
6890 			gap = gaps[i];
6891 			if (!entry) {
6892 				if (gap != p_end - p_start + 1) {
6893 					pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6894 						mas_mn(mas), i,
6895 						mas_get_slot(mas, i), gap,
6896 						p_end, p_start);
6897 					mt_dump(mas->tree);
6898 
6899 					MT_BUG_ON(mas->tree,
6900 						gap != p_end - p_start + 1);
6901 				}
6902 			} else {
6903 				if (gap > p_end - p_start + 1) {
6904 					pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6905 					mas_mn(mas), i, gap, p_end, p_start,
6906 					p_end - p_start + 1);
6907 					MT_BUG_ON(mas->tree,
6908 						gap > p_end - p_start + 1);
6909 				}
6910 			}
6911 		}
6912 
6913 		if (gap > max_gap)
6914 			max_gap = gap;
6915 not_empty:
6916 		p_start = p_end + 1;
6917 		if (p_end >= mas->max)
6918 			break;
6919 	}
6920 
6921 counted:
6922 	if (mte_is_root(mte))
6923 		return;
6924 
6925 	p_slot = mte_parent_slot(mas->node);
6926 	p_mn = mte_parent(mte);
6927 	MT_BUG_ON(mas->tree, max_gap > mas->max);
6928 	if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
6929 		pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
6930 		mt_dump(mas->tree);
6931 	}
6932 
6933 	MT_BUG_ON(mas->tree,
6934 		  ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
6935 }
6936 
6937 static void mas_validate_parent_slot(struct ma_state *mas)
6938 {
6939 	struct maple_node *parent;
6940 	struct maple_enode *node;
6941 	enum maple_type p_type = mas_parent_enum(mas, mas->node);
6942 	unsigned char p_slot = mte_parent_slot(mas->node);
6943 	void __rcu **slots;
6944 	int i;
6945 
6946 	if (mte_is_root(mas->node))
6947 		return;
6948 
6949 	parent = mte_parent(mas->node);
6950 	slots = ma_slots(parent, p_type);
6951 	MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
6952 
6953 	/* Check prev/next parent slot for duplicate node entry */
6954 
6955 	for (i = 0; i < mt_slots[p_type]; i++) {
6956 		node = mas_slot(mas, slots, i);
6957 		if (i == p_slot) {
6958 			if (node != mas->node)
6959 				pr_err("parent %p[%u] does not have %p\n",
6960 					parent, i, mas_mn(mas));
6961 			MT_BUG_ON(mas->tree, node != mas->node);
6962 		} else if (node == mas->node) {
6963 			pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
6964 			       mas_mn(mas), parent, i, p_slot);
6965 			MT_BUG_ON(mas->tree, node == mas->node);
6966 		}
6967 	}
6968 }
6969 
6970 static void mas_validate_child_slot(struct ma_state *mas)
6971 {
6972 	enum maple_type type = mte_node_type(mas->node);
6973 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
6974 	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
6975 	struct maple_enode *child;
6976 	unsigned char i;
6977 
6978 	if (mte_is_leaf(mas->node))
6979 		return;
6980 
6981 	for (i = 0; i < mt_slots[type]; i++) {
6982 		child = mas_slot(mas, slots, i);
6983 		if (!pivots[i] || pivots[i] == mas->max)
6984 			break;
6985 
6986 		if (!child)
6987 			break;
6988 
6989 		if (mte_parent_slot(child) != i) {
6990 			pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
6991 			       mas_mn(mas), i, mte_to_node(child),
6992 			       mte_parent_slot(child));
6993 			MT_BUG_ON(mas->tree, 1);
6994 		}
6995 
6996 		if (mte_parent(child) != mte_to_node(mas->node)) {
6997 			pr_err("child %p has parent %p not %p\n",
6998 			       mte_to_node(child), mte_parent(child),
6999 			       mte_to_node(mas->node));
7000 			MT_BUG_ON(mas->tree, 1);
7001 		}
7002 	}
7003 }
7004 
7005 /*
7006  * Validate all pivots are within mas->min and mas->max.
7007  */
7008 static void mas_validate_limits(struct ma_state *mas)
7009 {
7010 	int i;
7011 	unsigned long prev_piv = 0;
7012 	enum maple_type type = mte_node_type(mas->node);
7013 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7014 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7015 
7016 	/* all limits are fine here. */
7017 	if (mte_is_root(mas->node))
7018 		return;
7019 
7020 	for (i = 0; i < mt_slots[type]; i++) {
7021 		unsigned long piv;
7022 
7023 		piv = mas_safe_pivot(mas, pivots, i, type);
7024 
7025 		if (!piv && (i != 0))
7026 			break;
7027 
7028 		if (!mte_is_leaf(mas->node)) {
7029 			void *entry = mas_slot(mas, slots, i);
7030 
7031 			if (!entry)
7032 				pr_err("%p[%u] cannot be null\n",
7033 				       mas_mn(mas), i);
7034 
7035 			MT_BUG_ON(mas->tree, !entry);
7036 		}
7037 
7038 		if (prev_piv > piv) {
7039 			pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7040 				mas_mn(mas), i, piv, prev_piv);
7041 			MT_BUG_ON(mas->tree, piv < prev_piv);
7042 		}
7043 
7044 		if (piv < mas->min) {
7045 			pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7046 				piv, mas->min);
7047 			MT_BUG_ON(mas->tree, piv < mas->min);
7048 		}
7049 		if (piv > mas->max) {
7050 			pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7051 				piv, mas->max);
7052 			MT_BUG_ON(mas->tree, piv > mas->max);
7053 		}
7054 		prev_piv = piv;
7055 		if (piv == mas->max)
7056 			break;
7057 	}
7058 	for (i += 1; i < mt_slots[type]; i++) {
7059 		void *entry = mas_slot(mas, slots, i);
7060 
7061 		if (entry && (i != mt_slots[type] - 1)) {
7062 			pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7063 			       i, entry);
7064 			MT_BUG_ON(mas->tree, entry != NULL);
7065 		}
7066 
7067 		if (i < mt_pivots[type]) {
7068 			unsigned long piv = pivots[i];
7069 
7070 			if (!piv)
7071 				continue;
7072 
7073 			pr_err("%p[%u] should not have piv %lu\n",
7074 			       mas_mn(mas), i, piv);
7075 			MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7076 		}
7077 	}
7078 }
7079 
7080 static void mt_validate_nulls(struct maple_tree *mt)
7081 {
7082 	void *entry, *last = (void *)1;
7083 	unsigned char offset = 0;
7084 	void __rcu **slots;
7085 	MA_STATE(mas, mt, 0, 0);
7086 
7087 	mas_start(&mas);
7088 	if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7089 		return;
7090 
7091 	while (!mte_is_leaf(mas.node))
7092 		mas_descend(&mas);
7093 
7094 	slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7095 	do {
7096 		entry = mas_slot(&mas, slots, offset);
7097 		if (!last && !entry) {
7098 			pr_err("Sequential nulls end at %p[%u]\n",
7099 				mas_mn(&mas), offset);
7100 		}
7101 		MT_BUG_ON(mt, !last && !entry);
7102 		last = entry;
7103 		if (offset == mas_data_end(&mas)) {
7104 			mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7105 			if (mas_is_none(&mas))
7106 				return;
7107 			offset = 0;
7108 			slots = ma_slots(mte_to_node(mas.node),
7109 					 mte_node_type(mas.node));
7110 		} else {
7111 			offset++;
7112 		}
7113 
7114 	} while (!mas_is_none(&mas));
7115 }
7116 
7117 /*
7118  * validate a maple tree by checking:
7119  * 1. The limits (pivots are within mas->min to mas->max)
7120  * 2. The gap is correctly set in the parents
7121  */
7122 void mt_validate(struct maple_tree *mt)
7123 {
7124 	unsigned char end;
7125 
7126 	MA_STATE(mas, mt, 0, 0);
7127 	rcu_read_lock();
7128 	mas_start(&mas);
7129 	if (!mas_searchable(&mas))
7130 		goto done;
7131 
7132 	mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7133 	while (!mas_is_none(&mas)) {
7134 		MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7135 		if (!mte_is_root(mas.node)) {
7136 			end = mas_data_end(&mas);
7137 			if ((end < mt_min_slot_count(mas.node)) &&
7138 			    (mas.max != ULONG_MAX)) {
7139 				pr_err("Invalid size %u of %p\n", end,
7140 				mas_mn(&mas));
7141 				MT_BUG_ON(mas.tree, 1);
7142 			}
7143 
7144 		}
7145 		mas_validate_parent_slot(&mas);
7146 		mas_validate_child_slot(&mas);
7147 		mas_validate_limits(&mas);
7148 		if (mt_is_alloc(mt))
7149 			mas_validate_gaps(&mas);
7150 		mas_dfs_postorder(&mas, ULONG_MAX);
7151 	}
7152 	mt_validate_nulls(mt);
7153 done:
7154 	rcu_read_unlock();
7155 
7156 }
7157 EXPORT_SYMBOL_GPL(mt_validate);
7158 
7159 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7160