xref: /openbmc/linux/lib/maple_tree.c (revision 5d7800d9)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Maple Tree implementation
4  * Copyright (c) 2018-2022 Oracle Corporation
5  * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6  *	    Matthew Wilcox <willy@infradead.org>
7  */
8 
9 /*
10  * DOC: Interesting implementation details of the Maple Tree
11  *
12  * Each node type has a number of slots for entries and a number of slots for
13  * pivots.  In the case of dense nodes, the pivots are implied by the position
14  * and are simply the slot index + the minimum of the node.
15  *
16  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
17  * indicate that the tree is specifying ranges,  Pivots may appear in the
18  * subtree with an entry attached to the value where as keys are unique to a
19  * specific position of a B-tree.  Pivot values are inclusive of the slot with
20  * the same index.
21  *
22  *
23  * The following illustrates the layout of a range64 nodes slots and pivots.
24  *
25  *
26  *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
27  *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
28  *           │   │   │   │     │    │    │    │    └─ Implied maximum
29  *           │   │   │   │     │    │    │    └─ Pivot 14
30  *           │   │   │   │     │    │    └─ Pivot 13
31  *           │   │   │   │     │    └─ Pivot 12
32  *           │   │   │   │     └─ Pivot 11
33  *           │   │   │   └─ Pivot 2
34  *           │   │   └─ Pivot 1
35  *           │   └─ Pivot 0
36  *           └─  Implied minimum
37  *
38  * Slot contents:
39  *  Internal (non-leaf) nodes contain pointers to other nodes.
40  *  Leaf nodes contain entries.
41  *
42  * The location of interest is often referred to as an offset.  All offsets have
43  * a slot, but the last offset has an implied pivot from the node above (or
44  * UINT_MAX for the root node.
45  *
46  * Ranges complicate certain write activities.  When modifying any of
47  * the B-tree variants, it is known that one entry will either be added or
48  * deleted.  When modifying the Maple Tree, one store operation may overwrite
49  * the entire data set, or one half of the tree, or the middle half of the tree.
50  *
51  */
52 
53 
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
61 
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
64 
65 #define MA_ROOT_PARENT 1
66 
67 /*
68  * Maple state flags
69  * * MA_STATE_BULK		- Bulk insert mode
70  * * MA_STATE_REBALANCE		- Indicate a rebalance during bulk insert
71  * * MA_STATE_PREALLOC		- Preallocated nodes, WARN_ON allocation
72  */
73 #define MA_STATE_BULK		1
74 #define MA_STATE_REBALANCE	2
75 #define MA_STATE_PREALLOC	4
76 
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
81 
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 	[maple_dense]		= MAPLE_NODE_SLOTS,
85 	[maple_leaf_64]		= ULONG_MAX,
86 	[maple_range_64]	= ULONG_MAX,
87 	[maple_arange_64]	= ULONG_MAX,
88 };
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
90 #endif
91 
92 static const unsigned char mt_slots[] = {
93 	[maple_dense]		= MAPLE_NODE_SLOTS,
94 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
95 	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
96 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
97 };
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
99 
100 static const unsigned char mt_pivots[] = {
101 	[maple_dense]		= 0,
102 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
103 	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
104 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
105 };
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
107 
108 static const unsigned char mt_min_slots[] = {
109 	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
110 	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
111 	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
112 	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 1,
113 };
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
115 
116 #define MAPLE_BIG_NODE_SLOTS	(MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS	(MAPLE_ARANGE64_SLOTS * 2 + 1)
118 
119 struct maple_big_node {
120 	struct maple_pnode *parent;
121 	unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
122 	union {
123 		struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
124 		struct {
125 			unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 			unsigned long gap[MAPLE_BIG_NODE_GAPS];
127 		};
128 	};
129 	unsigned char b_end;
130 	enum maple_type type;
131 };
132 
133 /*
134  * The maple_subtree_state is used to build a tree to replace a segment of an
135  * existing tree in a more atomic way.  Any walkers of the older tree will hit a
136  * dead node and restart on updates.
137  */
138 struct maple_subtree_state {
139 	struct ma_state *orig_l;	/* Original left side of subtree */
140 	struct ma_state *orig_r;	/* Original right side of subtree */
141 	struct ma_state *l;		/* New left side of subtree */
142 	struct ma_state *m;		/* New middle of subtree (rare) */
143 	struct ma_state *r;		/* New right side of subtree */
144 	struct ma_topiary *free;	/* nodes to be freed */
145 	struct ma_topiary *destroy;	/* Nodes to be destroyed (walked and freed) */
146 	struct maple_big_node *bn;
147 };
148 
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
152 #else
153 #define noinline_for_kasan inline
154 #endif
155 
156 /* Functions */
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
158 {
159 	return kmem_cache_alloc(maple_node_cache, gfp);
160 }
161 
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
163 {
164 	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
165 }
166 
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
168 {
169 	kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
170 }
171 
172 static void mt_free_rcu(struct rcu_head *head)
173 {
174 	struct maple_node *node = container_of(head, struct maple_node, rcu);
175 
176 	kmem_cache_free(maple_node_cache, node);
177 }
178 
179 /*
180  * ma_free_rcu() - Use rcu callback to free a maple node
181  * @node: The node to free
182  *
183  * The maple tree uses the parent pointer to indicate this node is no longer in
184  * use and will be freed.
185  */
186 static void ma_free_rcu(struct maple_node *node)
187 {
188 	WARN_ON(node->parent != ma_parent_ptr(node));
189 	call_rcu(&node->rcu, mt_free_rcu);
190 }
191 
192 static void mas_set_height(struct ma_state *mas)
193 {
194 	unsigned int new_flags = mas->tree->ma_flags;
195 
196 	new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 	MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX);
198 	new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 	mas->tree->ma_flags = new_flags;
200 }
201 
202 static unsigned int mas_mt_height(struct ma_state *mas)
203 {
204 	return mt_height(mas->tree);
205 }
206 
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
208 {
209 	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 		MAPLE_NODE_TYPE_MASK;
211 }
212 
213 static inline bool ma_is_dense(const enum maple_type type)
214 {
215 	return type < maple_leaf_64;
216 }
217 
218 static inline bool ma_is_leaf(const enum maple_type type)
219 {
220 	return type < maple_range_64;
221 }
222 
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
224 {
225 	return ma_is_leaf(mte_node_type(entry));
226 }
227 
228 /*
229  * We also reserve values with the bottom two bits set to '10' which are
230  * below 4096
231  */
232 static inline bool mt_is_reserved(const void *entry)
233 {
234 	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 		xa_is_internal(entry);
236 }
237 
238 static inline void mas_set_err(struct ma_state *mas, long err)
239 {
240 	mas->node = MA_ERROR(err);
241 }
242 
243 static inline bool mas_is_ptr(const struct ma_state *mas)
244 {
245 	return mas->node == MAS_ROOT;
246 }
247 
248 static inline bool mas_is_start(const struct ma_state *mas)
249 {
250 	return mas->node == MAS_START;
251 }
252 
253 bool mas_is_err(struct ma_state *mas)
254 {
255 	return xa_is_err(mas->node);
256 }
257 
258 static inline bool mas_searchable(struct ma_state *mas)
259 {
260 	if (mas_is_none(mas))
261 		return false;
262 
263 	if (mas_is_ptr(mas))
264 		return false;
265 
266 	return true;
267 }
268 
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
270 {
271 	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
272 }
273 
274 /*
275  * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276  * @entry: The maple encoded node
277  *
278  * Return: a maple topiary pointer
279  */
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
281 {
282 	return (struct maple_topiary *)
283 		((unsigned long)entry & ~MAPLE_NODE_MASK);
284 }
285 
286 /*
287  * mas_mn() - Get the maple state node.
288  * @mas: The maple state
289  *
290  * Return: the maple node (not encoded - bare pointer).
291  */
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
293 {
294 	return mte_to_node(mas->node);
295 }
296 
297 /*
298  * mte_set_node_dead() - Set a maple encoded node as dead.
299  * @mn: The maple encoded node.
300  */
301 static inline void mte_set_node_dead(struct maple_enode *mn)
302 {
303 	mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 	smp_wmb(); /* Needed for RCU */
305 }
306 
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE			0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT		0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL		0x04
313 
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 					     enum maple_type type)
316 {
317 	return (void *)((unsigned long)node |
318 			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
319 }
320 
321 static inline void *mte_mk_root(const struct maple_enode *node)
322 {
323 	return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
324 }
325 
326 static inline void *mte_safe_root(const struct maple_enode *node)
327 {
328 	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
329 }
330 
331 static inline void *mte_set_full(const struct maple_enode *node)
332 {
333 	return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
334 }
335 
336 static inline void *mte_clear_full(const struct maple_enode *node)
337 {
338 	return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
339 }
340 
341 static inline bool mte_has_null(const struct maple_enode *node)
342 {
343 	return (unsigned long)node & MAPLE_ENODE_NULL;
344 }
345 
346 static inline bool ma_is_root(struct maple_node *node)
347 {
348 	return ((unsigned long)node->parent & MA_ROOT_PARENT);
349 }
350 
351 static inline bool mte_is_root(const struct maple_enode *node)
352 {
353 	return ma_is_root(mte_to_node(node));
354 }
355 
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
357 {
358 	return !mas->min && mas->max == ULONG_MAX;
359 }
360 
361 static inline bool mt_is_alloc(struct maple_tree *mt)
362 {
363 	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
364 }
365 
366 /*
367  * The Parent Pointer
368  * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369  * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
370  * bit values need an extra bit to store the offset.  This extra bit comes from
371  * a reuse of the last bit in the node type.  This is possible by using bit 1 to
372  * indicate if bit 2 is part of the type or the slot.
373  *
374  * Note types:
375  *  0x??1 = Root
376  *  0x?00 = 16 bit nodes
377  *  0x010 = 32 bit nodes
378  *  0x110 = 64 bit nodes
379  *
380  * Slot size and alignment
381  *  0b??1 : Root
382  *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
383  *  0b010 : 32 bit values, type in 0-2, slot in 3-7
384  *  0b110 : 64 bit values, type in 0-2, slot in 3-7
385  */
386 
387 #define MAPLE_PARENT_ROOT		0x01
388 
389 #define MAPLE_PARENT_SLOT_SHIFT		0x03
390 #define MAPLE_PARENT_SLOT_MASK		0xF8
391 
392 #define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK	0xFC
394 
395 #define MAPLE_PARENT_RANGE64		0x06
396 #define MAPLE_PARENT_RANGE32		0x04
397 #define MAPLE_PARENT_NOT_RANGE16	0x02
398 
399 /*
400  * mte_parent_shift() - Get the parent shift for the slot storage.
401  * @parent: The parent pointer cast as an unsigned long
402  * Return: The shift into that pointer to the star to of the slot
403  */
404 static inline unsigned long mte_parent_shift(unsigned long parent)
405 {
406 	/* Note bit 1 == 0 means 16B */
407 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 		return MAPLE_PARENT_SLOT_SHIFT;
409 
410 	return MAPLE_PARENT_16B_SLOT_SHIFT;
411 }
412 
413 /*
414  * mte_parent_slot_mask() - Get the slot mask for the parent.
415  * @parent: The parent pointer cast as an unsigned long.
416  * Return: The slot mask for that parent.
417  */
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
419 {
420 	/* Note bit 1 == 0 means 16B */
421 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 		return MAPLE_PARENT_SLOT_MASK;
423 
424 	return MAPLE_PARENT_16B_SLOT_MASK;
425 }
426 
427 /*
428  * mas_parent_type() - Return the maple_type of the parent from the stored
429  * parent type.
430  * @mas: The maple state
431  * @enode: The maple_enode to extract the parent's enum
432  * Return: The node->parent maple_type
433  */
434 static inline
435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode)
436 {
437 	unsigned long p_type;
438 
439 	p_type = (unsigned long)mte_to_node(enode)->parent;
440 	if (WARN_ON(p_type & MAPLE_PARENT_ROOT))
441 		return 0;
442 
443 	p_type &= MAPLE_NODE_MASK;
444 	p_type &= ~mte_parent_slot_mask(p_type);
445 	switch (p_type) {
446 	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
447 		if (mt_is_alloc(mas->tree))
448 			return maple_arange_64;
449 		return maple_range_64;
450 	}
451 
452 	return 0;
453 }
454 
455 /*
456  * mas_set_parent() - Set the parent node and encode the slot
457  * @enode: The encoded maple node.
458  * @parent: The encoded maple node that is the parent of @enode.
459  * @slot: The slot that @enode resides in @parent.
460  *
461  * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
462  * parent type.
463  */
464 static inline
465 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
466 		    const struct maple_enode *parent, unsigned char slot)
467 {
468 	unsigned long val = (unsigned long)parent;
469 	unsigned long shift;
470 	unsigned long type;
471 	enum maple_type p_type = mte_node_type(parent);
472 
473 	MAS_BUG_ON(mas, p_type == maple_dense);
474 	MAS_BUG_ON(mas, p_type == maple_leaf_64);
475 
476 	switch (p_type) {
477 	case maple_range_64:
478 	case maple_arange_64:
479 		shift = MAPLE_PARENT_SLOT_SHIFT;
480 		type = MAPLE_PARENT_RANGE64;
481 		break;
482 	default:
483 	case maple_dense:
484 	case maple_leaf_64:
485 		shift = type = 0;
486 		break;
487 	}
488 
489 	val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
490 	val |= (slot << shift) | type;
491 	mte_to_node(enode)->parent = ma_parent_ptr(val);
492 }
493 
494 /*
495  * mte_parent_slot() - get the parent slot of @enode.
496  * @enode: The encoded maple node.
497  *
498  * Return: The slot in the parent node where @enode resides.
499  */
500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
501 {
502 	unsigned long val = (unsigned long)mte_to_node(enode)->parent;
503 
504 	if (val & MA_ROOT_PARENT)
505 		return 0;
506 
507 	/*
508 	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
509 	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
510 	 */
511 	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
512 }
513 
514 /*
515  * mte_parent() - Get the parent of @node.
516  * @node: The encoded maple node.
517  *
518  * Return: The parent maple node.
519  */
520 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
521 {
522 	return (void *)((unsigned long)
523 			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
524 }
525 
526 /*
527  * ma_dead_node() - check if the @enode is dead.
528  * @enode: The encoded maple node
529  *
530  * Return: true if dead, false otherwise.
531  */
532 static inline bool ma_dead_node(const struct maple_node *node)
533 {
534 	struct maple_node *parent;
535 
536 	/* Do not reorder reads from the node prior to the parent check */
537 	smp_rmb();
538 	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
539 	return (parent == node);
540 }
541 
542 /*
543  * mte_dead_node() - check if the @enode is dead.
544  * @enode: The encoded maple node
545  *
546  * Return: true if dead, false otherwise.
547  */
548 static inline bool mte_dead_node(const struct maple_enode *enode)
549 {
550 	struct maple_node *parent, *node;
551 
552 	node = mte_to_node(enode);
553 	/* Do not reorder reads from the node prior to the parent check */
554 	smp_rmb();
555 	parent = mte_parent(enode);
556 	return (parent == node);
557 }
558 
559 /*
560  * mas_allocated() - Get the number of nodes allocated in a maple state.
561  * @mas: The maple state
562  *
563  * The ma_state alloc member is overloaded to hold a pointer to the first
564  * allocated node or to the number of requested nodes to allocate.  If bit 0 is
565  * set, then the alloc contains the number of requested nodes.  If there is an
566  * allocated node, then the total allocated nodes is in that node.
567  *
568  * Return: The total number of nodes allocated
569  */
570 static inline unsigned long mas_allocated(const struct ma_state *mas)
571 {
572 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
573 		return 0;
574 
575 	return mas->alloc->total;
576 }
577 
578 /*
579  * mas_set_alloc_req() - Set the requested number of allocations.
580  * @mas: the maple state
581  * @count: the number of allocations.
582  *
583  * The requested number of allocations is either in the first allocated node,
584  * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
585  * no allocated node.  Set the request either in the node or do the necessary
586  * encoding to store in @mas->alloc directly.
587  */
588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
589 {
590 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
591 		if (!count)
592 			mas->alloc = NULL;
593 		else
594 			mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
595 		return;
596 	}
597 
598 	mas->alloc->request_count = count;
599 }
600 
601 /*
602  * mas_alloc_req() - get the requested number of allocations.
603  * @mas: The maple state
604  *
605  * The alloc count is either stored directly in @mas, or in
606  * @mas->alloc->request_count if there is at least one node allocated.  Decode
607  * the request count if it's stored directly in @mas->alloc.
608  *
609  * Return: The allocation request count.
610  */
611 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
612 {
613 	if ((unsigned long)mas->alloc & 0x1)
614 		return (unsigned long)(mas->alloc) >> 1;
615 	else if (mas->alloc)
616 		return mas->alloc->request_count;
617 	return 0;
618 }
619 
620 /*
621  * ma_pivots() - Get a pointer to the maple node pivots.
622  * @node - the maple node
623  * @type - the node type
624  *
625  * In the event of a dead node, this array may be %NULL
626  *
627  * Return: A pointer to the maple node pivots
628  */
629 static inline unsigned long *ma_pivots(struct maple_node *node,
630 					   enum maple_type type)
631 {
632 	switch (type) {
633 	case maple_arange_64:
634 		return node->ma64.pivot;
635 	case maple_range_64:
636 	case maple_leaf_64:
637 		return node->mr64.pivot;
638 	case maple_dense:
639 		return NULL;
640 	}
641 	return NULL;
642 }
643 
644 /*
645  * ma_gaps() - Get a pointer to the maple node gaps.
646  * @node - the maple node
647  * @type - the node type
648  *
649  * Return: A pointer to the maple node gaps
650  */
651 static inline unsigned long *ma_gaps(struct maple_node *node,
652 				     enum maple_type type)
653 {
654 	switch (type) {
655 	case maple_arange_64:
656 		return node->ma64.gap;
657 	case maple_range_64:
658 	case maple_leaf_64:
659 	case maple_dense:
660 		return NULL;
661 	}
662 	return NULL;
663 }
664 
665 /*
666  * mas_pivot() - Get the pivot at @piv of the maple encoded node.
667  * @mas: The maple state.
668  * @piv: The pivot.
669  *
670  * Return: the pivot at @piv of @mn.
671  */
672 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
673 {
674 	struct maple_node *node = mas_mn(mas);
675 	enum maple_type type = mte_node_type(mas->node);
676 
677 	if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
678 		mas_set_err(mas, -EIO);
679 		return 0;
680 	}
681 
682 	switch (type) {
683 	case maple_arange_64:
684 		return node->ma64.pivot[piv];
685 	case maple_range_64:
686 	case maple_leaf_64:
687 		return node->mr64.pivot[piv];
688 	case maple_dense:
689 		return 0;
690 	}
691 	return 0;
692 }
693 
694 /*
695  * mas_safe_pivot() - get the pivot at @piv or mas->max.
696  * @mas: The maple state
697  * @pivots: The pointer to the maple node pivots
698  * @piv: The pivot to fetch
699  * @type: The maple node type
700  *
701  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
702  * otherwise.
703  */
704 static inline unsigned long
705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
706 	       unsigned char piv, enum maple_type type)
707 {
708 	if (piv >= mt_pivots[type])
709 		return mas->max;
710 
711 	return pivots[piv];
712 }
713 
714 /*
715  * mas_safe_min() - Return the minimum for a given offset.
716  * @mas: The maple state
717  * @pivots: The pointer to the maple node pivots
718  * @offset: The offset into the pivot array
719  *
720  * Return: The minimum range value that is contained in @offset.
721  */
722 static inline unsigned long
723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
724 {
725 	if (likely(offset))
726 		return pivots[offset - 1] + 1;
727 
728 	return mas->min;
729 }
730 
731 /*
732  * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
733  * @mn: The encoded maple node
734  * @piv: The pivot offset
735  * @val: The value of the pivot
736  */
737 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
738 				unsigned long val)
739 {
740 	struct maple_node *node = mte_to_node(mn);
741 	enum maple_type type = mte_node_type(mn);
742 
743 	BUG_ON(piv >= mt_pivots[type]);
744 	switch (type) {
745 	default:
746 	case maple_range_64:
747 	case maple_leaf_64:
748 		node->mr64.pivot[piv] = val;
749 		break;
750 	case maple_arange_64:
751 		node->ma64.pivot[piv] = val;
752 		break;
753 	case maple_dense:
754 		break;
755 	}
756 
757 }
758 
759 /*
760  * ma_slots() - Get a pointer to the maple node slots.
761  * @mn: The maple node
762  * @mt: The maple node type
763  *
764  * Return: A pointer to the maple node slots
765  */
766 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
767 {
768 	switch (mt) {
769 	default:
770 	case maple_arange_64:
771 		return mn->ma64.slot;
772 	case maple_range_64:
773 	case maple_leaf_64:
774 		return mn->mr64.slot;
775 	case maple_dense:
776 		return mn->slot;
777 	}
778 }
779 
780 static inline bool mt_write_locked(const struct maple_tree *mt)
781 {
782 	return mt_external_lock(mt) ? mt_write_lock_is_held(mt) :
783 		lockdep_is_held(&mt->ma_lock);
784 }
785 
786 static inline bool mt_locked(const struct maple_tree *mt)
787 {
788 	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
789 		lockdep_is_held(&mt->ma_lock);
790 }
791 
792 static inline void *mt_slot(const struct maple_tree *mt,
793 		void __rcu **slots, unsigned char offset)
794 {
795 	return rcu_dereference_check(slots[offset], mt_locked(mt));
796 }
797 
798 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
799 				   unsigned char offset)
800 {
801 	return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
802 }
803 /*
804  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
805  * @mas: The maple state
806  * @slots: The pointer to the slots
807  * @offset: The offset into the slots array to fetch
808  *
809  * Return: The entry stored in @slots at the @offset.
810  */
811 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
812 				       unsigned char offset)
813 {
814 	return mt_slot_locked(mas->tree, slots, offset);
815 }
816 
817 /*
818  * mas_slot() - Get the slot value when not holding the maple tree lock.
819  * @mas: The maple state
820  * @slots: The pointer to the slots
821  * @offset: The offset into the slots array to fetch
822  *
823  * Return: The entry stored in @slots at the @offset
824  */
825 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
826 			     unsigned char offset)
827 {
828 	return mt_slot(mas->tree, slots, offset);
829 }
830 
831 /*
832  * mas_root() - Get the maple tree root.
833  * @mas: The maple state.
834  *
835  * Return: The pointer to the root of the tree
836  */
837 static inline void *mas_root(struct ma_state *mas)
838 {
839 	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
840 }
841 
842 static inline void *mt_root_locked(struct maple_tree *mt)
843 {
844 	return rcu_dereference_protected(mt->ma_root, mt_write_locked(mt));
845 }
846 
847 /*
848  * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
849  * @mas: The maple state.
850  *
851  * Return: The pointer to the root of the tree
852  */
853 static inline void *mas_root_locked(struct ma_state *mas)
854 {
855 	return mt_root_locked(mas->tree);
856 }
857 
858 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
859 					     enum maple_type mt)
860 {
861 	switch (mt) {
862 	case maple_arange_64:
863 		return &mn->ma64.meta;
864 	default:
865 		return &mn->mr64.meta;
866 	}
867 }
868 
869 /*
870  * ma_set_meta() - Set the metadata information of a node.
871  * @mn: The maple node
872  * @mt: The maple node type
873  * @offset: The offset of the highest sub-gap in this node.
874  * @end: The end of the data in this node.
875  */
876 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
877 			       unsigned char offset, unsigned char end)
878 {
879 	struct maple_metadata *meta = ma_meta(mn, mt);
880 
881 	meta->gap = offset;
882 	meta->end = end;
883 }
884 
885 /*
886  * mt_clear_meta() - clear the metadata information of a node, if it exists
887  * @mt: The maple tree
888  * @mn: The maple node
889  * @type: The maple node type
890  * @offset: The offset of the highest sub-gap in this node.
891  * @end: The end of the data in this node.
892  */
893 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
894 				  enum maple_type type)
895 {
896 	struct maple_metadata *meta;
897 	unsigned long *pivots;
898 	void __rcu **slots;
899 	void *next;
900 
901 	switch (type) {
902 	case maple_range_64:
903 		pivots = mn->mr64.pivot;
904 		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
905 			slots = mn->mr64.slot;
906 			next = mt_slot_locked(mt, slots,
907 					      MAPLE_RANGE64_SLOTS - 1);
908 			if (unlikely((mte_to_node(next) &&
909 				      mte_node_type(next))))
910 				return; /* no metadata, could be node */
911 		}
912 		fallthrough;
913 	case maple_arange_64:
914 		meta = ma_meta(mn, type);
915 		break;
916 	default:
917 		return;
918 	}
919 
920 	meta->gap = 0;
921 	meta->end = 0;
922 }
923 
924 /*
925  * ma_meta_end() - Get the data end of a node from the metadata
926  * @mn: The maple node
927  * @mt: The maple node type
928  */
929 static inline unsigned char ma_meta_end(struct maple_node *mn,
930 					enum maple_type mt)
931 {
932 	struct maple_metadata *meta = ma_meta(mn, mt);
933 
934 	return meta->end;
935 }
936 
937 /*
938  * ma_meta_gap() - Get the largest gap location of a node from the metadata
939  * @mn: The maple node
940  * @mt: The maple node type
941  */
942 static inline unsigned char ma_meta_gap(struct maple_node *mn,
943 					enum maple_type mt)
944 {
945 	return mn->ma64.meta.gap;
946 }
947 
948 /*
949  * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
950  * @mn: The maple node
951  * @mn: The maple node type
952  * @offset: The location of the largest gap.
953  */
954 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
955 				   unsigned char offset)
956 {
957 
958 	struct maple_metadata *meta = ma_meta(mn, mt);
959 
960 	meta->gap = offset;
961 }
962 
963 /*
964  * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
965  * @mat - the ma_topiary, a linked list of dead nodes.
966  * @dead_enode - the node to be marked as dead and added to the tail of the list
967  *
968  * Add the @dead_enode to the linked list in @mat.
969  */
970 static inline void mat_add(struct ma_topiary *mat,
971 			   struct maple_enode *dead_enode)
972 {
973 	mte_set_node_dead(dead_enode);
974 	mte_to_mat(dead_enode)->next = NULL;
975 	if (!mat->tail) {
976 		mat->tail = mat->head = dead_enode;
977 		return;
978 	}
979 
980 	mte_to_mat(mat->tail)->next = dead_enode;
981 	mat->tail = dead_enode;
982 }
983 
984 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
985 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
986 
987 /*
988  * mas_mat_free() - Free all nodes in a dead list.
989  * @mas - the maple state
990  * @mat - the ma_topiary linked list of dead nodes to free.
991  *
992  * Free walk a dead list.
993  */
994 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
995 {
996 	struct maple_enode *next;
997 
998 	while (mat->head) {
999 		next = mte_to_mat(mat->head)->next;
1000 		mas_free(mas, mat->head);
1001 		mat->head = next;
1002 	}
1003 }
1004 
1005 /*
1006  * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1007  * @mas - the maple state
1008  * @mat - the ma_topiary linked list of dead nodes to free.
1009  *
1010  * Destroy walk a dead list.
1011  */
1012 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1013 {
1014 	struct maple_enode *next;
1015 
1016 	while (mat->head) {
1017 		next = mte_to_mat(mat->head)->next;
1018 		mte_destroy_walk(mat->head, mat->mtree);
1019 		mat->head = next;
1020 	}
1021 }
1022 /*
1023  * mas_descend() - Descend into the slot stored in the ma_state.
1024  * @mas - the maple state.
1025  *
1026  * Note: Not RCU safe, only use in write side or debug code.
1027  */
1028 static inline void mas_descend(struct ma_state *mas)
1029 {
1030 	enum maple_type type;
1031 	unsigned long *pivots;
1032 	struct maple_node *node;
1033 	void __rcu **slots;
1034 
1035 	node = mas_mn(mas);
1036 	type = mte_node_type(mas->node);
1037 	pivots = ma_pivots(node, type);
1038 	slots = ma_slots(node, type);
1039 
1040 	if (mas->offset)
1041 		mas->min = pivots[mas->offset - 1] + 1;
1042 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1043 	mas->node = mas_slot(mas, slots, mas->offset);
1044 }
1045 
1046 /*
1047  * mte_set_gap() - Set a maple node gap.
1048  * @mn: The encoded maple node
1049  * @gap: The offset of the gap to set
1050  * @val: The gap value
1051  */
1052 static inline void mte_set_gap(const struct maple_enode *mn,
1053 				 unsigned char gap, unsigned long val)
1054 {
1055 	switch (mte_node_type(mn)) {
1056 	default:
1057 		break;
1058 	case maple_arange_64:
1059 		mte_to_node(mn)->ma64.gap[gap] = val;
1060 		break;
1061 	}
1062 }
1063 
1064 /*
1065  * mas_ascend() - Walk up a level of the tree.
1066  * @mas: The maple state
1067  *
1068  * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1069  * may cause several levels of walking up to find the correct min and max.
1070  * May find a dead node which will cause a premature return.
1071  * Return: 1 on dead node, 0 otherwise
1072  */
1073 static int mas_ascend(struct ma_state *mas)
1074 {
1075 	struct maple_enode *p_enode; /* parent enode. */
1076 	struct maple_enode *a_enode; /* ancestor enode. */
1077 	struct maple_node *a_node; /* ancestor node. */
1078 	struct maple_node *p_node; /* parent node. */
1079 	unsigned char a_slot;
1080 	enum maple_type a_type;
1081 	unsigned long min, max;
1082 	unsigned long *pivots;
1083 	bool set_max = false, set_min = false;
1084 
1085 	a_node = mas_mn(mas);
1086 	if (ma_is_root(a_node)) {
1087 		mas->offset = 0;
1088 		return 0;
1089 	}
1090 
1091 	p_node = mte_parent(mas->node);
1092 	if (unlikely(a_node == p_node))
1093 		return 1;
1094 
1095 	a_type = mas_parent_type(mas, mas->node);
1096 	mas->offset = mte_parent_slot(mas->node);
1097 	a_enode = mt_mk_node(p_node, a_type);
1098 
1099 	/* Check to make sure all parent information is still accurate */
1100 	if (p_node != mte_parent(mas->node))
1101 		return 1;
1102 
1103 	mas->node = a_enode;
1104 
1105 	if (mte_is_root(a_enode)) {
1106 		mas->max = ULONG_MAX;
1107 		mas->min = 0;
1108 		return 0;
1109 	}
1110 
1111 	if (!mas->min)
1112 		set_min = true;
1113 
1114 	if (mas->max == ULONG_MAX)
1115 		set_max = true;
1116 
1117 	min = 0;
1118 	max = ULONG_MAX;
1119 	do {
1120 		p_enode = a_enode;
1121 		a_type = mas_parent_type(mas, p_enode);
1122 		a_node = mte_parent(p_enode);
1123 		a_slot = mte_parent_slot(p_enode);
1124 		a_enode = mt_mk_node(a_node, a_type);
1125 		pivots = ma_pivots(a_node, a_type);
1126 
1127 		if (unlikely(ma_dead_node(a_node)))
1128 			return 1;
1129 
1130 		if (!set_min && a_slot) {
1131 			set_min = true;
1132 			min = pivots[a_slot - 1] + 1;
1133 		}
1134 
1135 		if (!set_max && a_slot < mt_pivots[a_type]) {
1136 			set_max = true;
1137 			max = pivots[a_slot];
1138 		}
1139 
1140 		if (unlikely(ma_dead_node(a_node)))
1141 			return 1;
1142 
1143 		if (unlikely(ma_is_root(a_node)))
1144 			break;
1145 
1146 	} while (!set_min || !set_max);
1147 
1148 	mas->max = max;
1149 	mas->min = min;
1150 	return 0;
1151 }
1152 
1153 /*
1154  * mas_pop_node() - Get a previously allocated maple node from the maple state.
1155  * @mas: The maple state
1156  *
1157  * Return: A pointer to a maple node.
1158  */
1159 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1160 {
1161 	struct maple_alloc *ret, *node = mas->alloc;
1162 	unsigned long total = mas_allocated(mas);
1163 	unsigned int req = mas_alloc_req(mas);
1164 
1165 	/* nothing or a request pending. */
1166 	if (WARN_ON(!total))
1167 		return NULL;
1168 
1169 	if (total == 1) {
1170 		/* single allocation in this ma_state */
1171 		mas->alloc = NULL;
1172 		ret = node;
1173 		goto single_node;
1174 	}
1175 
1176 	if (node->node_count == 1) {
1177 		/* Single allocation in this node. */
1178 		mas->alloc = node->slot[0];
1179 		mas->alloc->total = node->total - 1;
1180 		ret = node;
1181 		goto new_head;
1182 	}
1183 	node->total--;
1184 	ret = node->slot[--node->node_count];
1185 	node->slot[node->node_count] = NULL;
1186 
1187 single_node:
1188 new_head:
1189 	if (req) {
1190 		req++;
1191 		mas_set_alloc_req(mas, req);
1192 	}
1193 
1194 	memset(ret, 0, sizeof(*ret));
1195 	return (struct maple_node *)ret;
1196 }
1197 
1198 /*
1199  * mas_push_node() - Push a node back on the maple state allocation.
1200  * @mas: The maple state
1201  * @used: The used maple node
1202  *
1203  * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1204  * requested node count as necessary.
1205  */
1206 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1207 {
1208 	struct maple_alloc *reuse = (struct maple_alloc *)used;
1209 	struct maple_alloc *head = mas->alloc;
1210 	unsigned long count;
1211 	unsigned int requested = mas_alloc_req(mas);
1212 
1213 	count = mas_allocated(mas);
1214 
1215 	reuse->request_count = 0;
1216 	reuse->node_count = 0;
1217 	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1218 		head->slot[head->node_count++] = reuse;
1219 		head->total++;
1220 		goto done;
1221 	}
1222 
1223 	reuse->total = 1;
1224 	if ((head) && !((unsigned long)head & 0x1)) {
1225 		reuse->slot[0] = head;
1226 		reuse->node_count = 1;
1227 		reuse->total += head->total;
1228 	}
1229 
1230 	mas->alloc = reuse;
1231 done:
1232 	if (requested > 1)
1233 		mas_set_alloc_req(mas, requested - 1);
1234 }
1235 
1236 /*
1237  * mas_alloc_nodes() - Allocate nodes into a maple state
1238  * @mas: The maple state
1239  * @gfp: The GFP Flags
1240  */
1241 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1242 {
1243 	struct maple_alloc *node;
1244 	unsigned long allocated = mas_allocated(mas);
1245 	unsigned int requested = mas_alloc_req(mas);
1246 	unsigned int count;
1247 	void **slots = NULL;
1248 	unsigned int max_req = 0;
1249 
1250 	if (!requested)
1251 		return;
1252 
1253 	mas_set_alloc_req(mas, 0);
1254 	if (mas->mas_flags & MA_STATE_PREALLOC) {
1255 		if (allocated)
1256 			return;
1257 		WARN_ON(!allocated);
1258 	}
1259 
1260 	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1261 		node = (struct maple_alloc *)mt_alloc_one(gfp);
1262 		if (!node)
1263 			goto nomem_one;
1264 
1265 		if (allocated) {
1266 			node->slot[0] = mas->alloc;
1267 			node->node_count = 1;
1268 		} else {
1269 			node->node_count = 0;
1270 		}
1271 
1272 		mas->alloc = node;
1273 		node->total = ++allocated;
1274 		requested--;
1275 	}
1276 
1277 	node = mas->alloc;
1278 	node->request_count = 0;
1279 	while (requested) {
1280 		max_req = MAPLE_ALLOC_SLOTS - node->node_count;
1281 		slots = (void **)&node->slot[node->node_count];
1282 		max_req = min(requested, max_req);
1283 		count = mt_alloc_bulk(gfp, max_req, slots);
1284 		if (!count)
1285 			goto nomem_bulk;
1286 
1287 		if (node->node_count == 0) {
1288 			node->slot[0]->node_count = 0;
1289 			node->slot[0]->request_count = 0;
1290 		}
1291 
1292 		node->node_count += count;
1293 		allocated += count;
1294 		node = node->slot[0];
1295 		requested -= count;
1296 	}
1297 	mas->alloc->total = allocated;
1298 	return;
1299 
1300 nomem_bulk:
1301 	/* Clean up potential freed allocations on bulk failure */
1302 	memset(slots, 0, max_req * sizeof(unsigned long));
1303 nomem_one:
1304 	mas_set_alloc_req(mas, requested);
1305 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1306 		mas->alloc->total = allocated;
1307 	mas_set_err(mas, -ENOMEM);
1308 }
1309 
1310 /*
1311  * mas_free() - Free an encoded maple node
1312  * @mas: The maple state
1313  * @used: The encoded maple node to free.
1314  *
1315  * Uses rcu free if necessary, pushes @used back on the maple state allocations
1316  * otherwise.
1317  */
1318 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1319 {
1320 	struct maple_node *tmp = mte_to_node(used);
1321 
1322 	if (mt_in_rcu(mas->tree))
1323 		ma_free_rcu(tmp);
1324 	else
1325 		mas_push_node(mas, tmp);
1326 }
1327 
1328 /*
1329  * mas_node_count() - Check if enough nodes are allocated and request more if
1330  * there is not enough nodes.
1331  * @mas: The maple state
1332  * @count: The number of nodes needed
1333  * @gfp: the gfp flags
1334  */
1335 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1336 {
1337 	unsigned long allocated = mas_allocated(mas);
1338 
1339 	if (allocated < count) {
1340 		mas_set_alloc_req(mas, count - allocated);
1341 		mas_alloc_nodes(mas, gfp);
1342 	}
1343 }
1344 
1345 /*
1346  * mas_node_count() - Check if enough nodes are allocated and request more if
1347  * there is not enough nodes.
1348  * @mas: The maple state
1349  * @count: The number of nodes needed
1350  *
1351  * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1352  */
1353 static void mas_node_count(struct ma_state *mas, int count)
1354 {
1355 	return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1356 }
1357 
1358 /*
1359  * mas_start() - Sets up maple state for operations.
1360  * @mas: The maple state.
1361  *
1362  * If mas->node == MAS_START, then set the min, max and depth to
1363  * defaults.
1364  *
1365  * Return:
1366  * - If mas->node is an error or not MAS_START, return NULL.
1367  * - If it's an empty tree:     NULL & mas->node == MAS_NONE
1368  * - If it's a single entry:    The entry & mas->node == MAS_ROOT
1369  * - If it's a tree:            NULL & mas->node == safe root node.
1370  */
1371 static inline struct maple_enode *mas_start(struct ma_state *mas)
1372 {
1373 	if (likely(mas_is_start(mas))) {
1374 		struct maple_enode *root;
1375 
1376 		mas->min = 0;
1377 		mas->max = ULONG_MAX;
1378 
1379 retry:
1380 		mas->depth = 0;
1381 		root = mas_root(mas);
1382 		/* Tree with nodes */
1383 		if (likely(xa_is_node(root))) {
1384 			mas->depth = 1;
1385 			mas->node = mte_safe_root(root);
1386 			mas->offset = 0;
1387 			if (mte_dead_node(mas->node))
1388 				goto retry;
1389 
1390 			return NULL;
1391 		}
1392 
1393 		/* empty tree */
1394 		if (unlikely(!root)) {
1395 			mas->node = MAS_NONE;
1396 			mas->offset = MAPLE_NODE_SLOTS;
1397 			return NULL;
1398 		}
1399 
1400 		/* Single entry tree */
1401 		mas->node = MAS_ROOT;
1402 		mas->offset = MAPLE_NODE_SLOTS;
1403 
1404 		/* Single entry tree. */
1405 		if (mas->index > 0)
1406 			return NULL;
1407 
1408 		return root;
1409 	}
1410 
1411 	return NULL;
1412 }
1413 
1414 /*
1415  * ma_data_end() - Find the end of the data in a node.
1416  * @node: The maple node
1417  * @type: The maple node type
1418  * @pivots: The array of pivots in the node
1419  * @max: The maximum value in the node
1420  *
1421  * Uses metadata to find the end of the data when possible.
1422  * Return: The zero indexed last slot with data (may be null).
1423  */
1424 static inline unsigned char ma_data_end(struct maple_node *node,
1425 					enum maple_type type,
1426 					unsigned long *pivots,
1427 					unsigned long max)
1428 {
1429 	unsigned char offset;
1430 
1431 	if (!pivots)
1432 		return 0;
1433 
1434 	if (type == maple_arange_64)
1435 		return ma_meta_end(node, type);
1436 
1437 	offset = mt_pivots[type] - 1;
1438 	if (likely(!pivots[offset]))
1439 		return ma_meta_end(node, type);
1440 
1441 	if (likely(pivots[offset] == max))
1442 		return offset;
1443 
1444 	return mt_pivots[type];
1445 }
1446 
1447 /*
1448  * mas_data_end() - Find the end of the data (slot).
1449  * @mas: the maple state
1450  *
1451  * This method is optimized to check the metadata of a node if the node type
1452  * supports data end metadata.
1453  *
1454  * Return: The zero indexed last slot with data (may be null).
1455  */
1456 static inline unsigned char mas_data_end(struct ma_state *mas)
1457 {
1458 	enum maple_type type;
1459 	struct maple_node *node;
1460 	unsigned char offset;
1461 	unsigned long *pivots;
1462 
1463 	type = mte_node_type(mas->node);
1464 	node = mas_mn(mas);
1465 	if (type == maple_arange_64)
1466 		return ma_meta_end(node, type);
1467 
1468 	pivots = ma_pivots(node, type);
1469 	if (unlikely(ma_dead_node(node)))
1470 		return 0;
1471 
1472 	offset = mt_pivots[type] - 1;
1473 	if (likely(!pivots[offset]))
1474 		return ma_meta_end(node, type);
1475 
1476 	if (likely(pivots[offset] == mas->max))
1477 		return offset;
1478 
1479 	return mt_pivots[type];
1480 }
1481 
1482 /*
1483  * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1484  * @mas - the maple state
1485  *
1486  * Return: The maximum gap in the leaf.
1487  */
1488 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1489 {
1490 	enum maple_type mt;
1491 	unsigned long pstart, gap, max_gap;
1492 	struct maple_node *mn;
1493 	unsigned long *pivots;
1494 	void __rcu **slots;
1495 	unsigned char i;
1496 	unsigned char max_piv;
1497 
1498 	mt = mte_node_type(mas->node);
1499 	mn = mas_mn(mas);
1500 	slots = ma_slots(mn, mt);
1501 	max_gap = 0;
1502 	if (unlikely(ma_is_dense(mt))) {
1503 		gap = 0;
1504 		for (i = 0; i < mt_slots[mt]; i++) {
1505 			if (slots[i]) {
1506 				if (gap > max_gap)
1507 					max_gap = gap;
1508 				gap = 0;
1509 			} else {
1510 				gap++;
1511 			}
1512 		}
1513 		if (gap > max_gap)
1514 			max_gap = gap;
1515 		return max_gap;
1516 	}
1517 
1518 	/*
1519 	 * Check the first implied pivot optimizes the loop below and slot 1 may
1520 	 * be skipped if there is a gap in slot 0.
1521 	 */
1522 	pivots = ma_pivots(mn, mt);
1523 	if (likely(!slots[0])) {
1524 		max_gap = pivots[0] - mas->min + 1;
1525 		i = 2;
1526 	} else {
1527 		i = 1;
1528 	}
1529 
1530 	/* reduce max_piv as the special case is checked before the loop */
1531 	max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1532 	/*
1533 	 * Check end implied pivot which can only be a gap on the right most
1534 	 * node.
1535 	 */
1536 	if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1537 		gap = ULONG_MAX - pivots[max_piv];
1538 		if (gap > max_gap)
1539 			max_gap = gap;
1540 	}
1541 
1542 	for (; i <= max_piv; i++) {
1543 		/* data == no gap. */
1544 		if (likely(slots[i]))
1545 			continue;
1546 
1547 		pstart = pivots[i - 1];
1548 		gap = pivots[i] - pstart;
1549 		if (gap > max_gap)
1550 			max_gap = gap;
1551 
1552 		/* There cannot be two gaps in a row. */
1553 		i++;
1554 	}
1555 	return max_gap;
1556 }
1557 
1558 /*
1559  * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1560  * @node: The maple node
1561  * @gaps: The pointer to the gaps
1562  * @mt: The maple node type
1563  * @*off: Pointer to store the offset location of the gap.
1564  *
1565  * Uses the metadata data end to scan backwards across set gaps.
1566  *
1567  * Return: The maximum gap value
1568  */
1569 static inline unsigned long
1570 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1571 	    unsigned char *off)
1572 {
1573 	unsigned char offset, i;
1574 	unsigned long max_gap = 0;
1575 
1576 	i = offset = ma_meta_end(node, mt);
1577 	do {
1578 		if (gaps[i] > max_gap) {
1579 			max_gap = gaps[i];
1580 			offset = i;
1581 		}
1582 	} while (i--);
1583 
1584 	*off = offset;
1585 	return max_gap;
1586 }
1587 
1588 /*
1589  * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1590  * @mas: The maple state.
1591  *
1592  * Return: The gap value.
1593  */
1594 static inline unsigned long mas_max_gap(struct ma_state *mas)
1595 {
1596 	unsigned long *gaps;
1597 	unsigned char offset;
1598 	enum maple_type mt;
1599 	struct maple_node *node;
1600 
1601 	mt = mte_node_type(mas->node);
1602 	if (ma_is_leaf(mt))
1603 		return mas_leaf_max_gap(mas);
1604 
1605 	node = mas_mn(mas);
1606 	MAS_BUG_ON(mas, mt != maple_arange_64);
1607 	offset = ma_meta_gap(node, mt);
1608 	gaps = ma_gaps(node, mt);
1609 	return gaps[offset];
1610 }
1611 
1612 /*
1613  * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1614  * @mas: The maple state
1615  * @offset: The gap offset in the parent to set
1616  * @new: The new gap value.
1617  *
1618  * Set the parent gap then continue to set the gap upwards, using the metadata
1619  * of the parent to see if it is necessary to check the node above.
1620  */
1621 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1622 		unsigned long new)
1623 {
1624 	unsigned long meta_gap = 0;
1625 	struct maple_node *pnode;
1626 	struct maple_enode *penode;
1627 	unsigned long *pgaps;
1628 	unsigned char meta_offset;
1629 	enum maple_type pmt;
1630 
1631 	pnode = mte_parent(mas->node);
1632 	pmt = mas_parent_type(mas, mas->node);
1633 	penode = mt_mk_node(pnode, pmt);
1634 	pgaps = ma_gaps(pnode, pmt);
1635 
1636 ascend:
1637 	MAS_BUG_ON(mas, pmt != maple_arange_64);
1638 	meta_offset = ma_meta_gap(pnode, pmt);
1639 	meta_gap = pgaps[meta_offset];
1640 
1641 	pgaps[offset] = new;
1642 
1643 	if (meta_gap == new)
1644 		return;
1645 
1646 	if (offset != meta_offset) {
1647 		if (meta_gap > new)
1648 			return;
1649 
1650 		ma_set_meta_gap(pnode, pmt, offset);
1651 	} else if (new < meta_gap) {
1652 		new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1653 		ma_set_meta_gap(pnode, pmt, meta_offset);
1654 	}
1655 
1656 	if (ma_is_root(pnode))
1657 		return;
1658 
1659 	/* Go to the parent node. */
1660 	pnode = mte_parent(penode);
1661 	pmt = mas_parent_type(mas, penode);
1662 	pgaps = ma_gaps(pnode, pmt);
1663 	offset = mte_parent_slot(penode);
1664 	penode = mt_mk_node(pnode, pmt);
1665 	goto ascend;
1666 }
1667 
1668 /*
1669  * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1670  * @mas - the maple state.
1671  */
1672 static inline void mas_update_gap(struct ma_state *mas)
1673 {
1674 	unsigned char pslot;
1675 	unsigned long p_gap;
1676 	unsigned long max_gap;
1677 
1678 	if (!mt_is_alloc(mas->tree))
1679 		return;
1680 
1681 	if (mte_is_root(mas->node))
1682 		return;
1683 
1684 	max_gap = mas_max_gap(mas);
1685 
1686 	pslot = mte_parent_slot(mas->node);
1687 	p_gap = ma_gaps(mte_parent(mas->node),
1688 			mas_parent_type(mas, mas->node))[pslot];
1689 
1690 	if (p_gap != max_gap)
1691 		mas_parent_gap(mas, pslot, max_gap);
1692 }
1693 
1694 /*
1695  * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1696  * @parent with the slot encoded.
1697  * @mas - the maple state (for the tree)
1698  * @parent - the maple encoded node containing the children.
1699  */
1700 static inline void mas_adopt_children(struct ma_state *mas,
1701 		struct maple_enode *parent)
1702 {
1703 	enum maple_type type = mte_node_type(parent);
1704 	struct maple_node *node = mas_mn(mas);
1705 	void __rcu **slots = ma_slots(node, type);
1706 	unsigned long *pivots = ma_pivots(node, type);
1707 	struct maple_enode *child;
1708 	unsigned char offset;
1709 
1710 	offset = ma_data_end(node, type, pivots, mas->max);
1711 	do {
1712 		child = mas_slot_locked(mas, slots, offset);
1713 		mas_set_parent(mas, child, parent, offset);
1714 	} while (offset--);
1715 }
1716 
1717 /*
1718  * mas_replace() - Replace a maple node in the tree with mas->node.  Uses the
1719  * parent encoding to locate the maple node in the tree.
1720  * @mas - the ma_state to use for operations.
1721  * @advanced - boolean to adopt the child nodes and free the old node (false) or
1722  * leave the node (true) and handle the adoption and free elsewhere.
1723  */
1724 static inline void mas_replace(struct ma_state *mas, bool advanced)
1725 	__must_hold(mas->tree->ma_lock)
1726 {
1727 	struct maple_node *mn = mas_mn(mas);
1728 	struct maple_enode *old_enode;
1729 	unsigned char offset = 0;
1730 	void __rcu **slots = NULL;
1731 
1732 	if (ma_is_root(mn)) {
1733 		old_enode = mas_root_locked(mas);
1734 	} else {
1735 		offset = mte_parent_slot(mas->node);
1736 		slots = ma_slots(mte_parent(mas->node),
1737 				 mas_parent_type(mas, mas->node));
1738 		old_enode = mas_slot_locked(mas, slots, offset);
1739 	}
1740 
1741 	if (!advanced && !mte_is_leaf(mas->node))
1742 		mas_adopt_children(mas, mas->node);
1743 
1744 	if (mte_is_root(mas->node)) {
1745 		mn->parent = ma_parent_ptr(
1746 			      ((unsigned long)mas->tree | MA_ROOT_PARENT));
1747 		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1748 		mas_set_height(mas);
1749 	} else {
1750 		rcu_assign_pointer(slots[offset], mas->node);
1751 	}
1752 
1753 	if (!advanced) {
1754 		mte_set_node_dead(old_enode);
1755 		mas_free(mas, old_enode);
1756 	}
1757 }
1758 
1759 /*
1760  * mas_new_child() - Find the new child of a node.
1761  * @mas: the maple state
1762  * @child: the maple state to store the child.
1763  */
1764 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1765 	__must_hold(mas->tree->ma_lock)
1766 {
1767 	enum maple_type mt;
1768 	unsigned char offset;
1769 	unsigned char end;
1770 	unsigned long *pivots;
1771 	struct maple_enode *entry;
1772 	struct maple_node *node;
1773 	void __rcu **slots;
1774 
1775 	mt = mte_node_type(mas->node);
1776 	node = mas_mn(mas);
1777 	slots = ma_slots(node, mt);
1778 	pivots = ma_pivots(node, mt);
1779 	end = ma_data_end(node, mt, pivots, mas->max);
1780 	for (offset = mas->offset; offset <= end; offset++) {
1781 		entry = mas_slot_locked(mas, slots, offset);
1782 		if (mte_parent(entry) == node) {
1783 			*child = *mas;
1784 			mas->offset = offset + 1;
1785 			child->offset = offset;
1786 			mas_descend(child);
1787 			child->offset = 0;
1788 			return true;
1789 		}
1790 	}
1791 	return false;
1792 }
1793 
1794 /*
1795  * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1796  * old data or set b_node->b_end.
1797  * @b_node: the maple_big_node
1798  * @shift: the shift count
1799  */
1800 static inline void mab_shift_right(struct maple_big_node *b_node,
1801 				 unsigned char shift)
1802 {
1803 	unsigned long size = b_node->b_end * sizeof(unsigned long);
1804 
1805 	memmove(b_node->pivot + shift, b_node->pivot, size);
1806 	memmove(b_node->slot + shift, b_node->slot, size);
1807 	if (b_node->type == maple_arange_64)
1808 		memmove(b_node->gap + shift, b_node->gap, size);
1809 }
1810 
1811 /*
1812  * mab_middle_node() - Check if a middle node is needed (unlikely)
1813  * @b_node: the maple_big_node that contains the data.
1814  * @size: the amount of data in the b_node
1815  * @split: the potential split location
1816  * @slot_count: the size that can be stored in a single node being considered.
1817  *
1818  * Return: true if a middle node is required.
1819  */
1820 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1821 				   unsigned char slot_count)
1822 {
1823 	unsigned char size = b_node->b_end;
1824 
1825 	if (size >= 2 * slot_count)
1826 		return true;
1827 
1828 	if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1829 		return true;
1830 
1831 	return false;
1832 }
1833 
1834 /*
1835  * mab_no_null_split() - ensure the split doesn't fall on a NULL
1836  * @b_node: the maple_big_node with the data
1837  * @split: the suggested split location
1838  * @slot_count: the number of slots in the node being considered.
1839  *
1840  * Return: the split location.
1841  */
1842 static inline int mab_no_null_split(struct maple_big_node *b_node,
1843 				    unsigned char split, unsigned char slot_count)
1844 {
1845 	if (!b_node->slot[split]) {
1846 		/*
1847 		 * If the split is less than the max slot && the right side will
1848 		 * still be sufficient, then increment the split on NULL.
1849 		 */
1850 		if ((split < slot_count - 1) &&
1851 		    (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1852 			split++;
1853 		else
1854 			split--;
1855 	}
1856 	return split;
1857 }
1858 
1859 /*
1860  * mab_calc_split() - Calculate the split location and if there needs to be two
1861  * splits.
1862  * @bn: The maple_big_node with the data
1863  * @mid_split: The second split, if required.  0 otherwise.
1864  *
1865  * Return: The first split location.  The middle split is set in @mid_split.
1866  */
1867 static inline int mab_calc_split(struct ma_state *mas,
1868 	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1869 {
1870 	unsigned char b_end = bn->b_end;
1871 	int split = b_end / 2; /* Assume equal split. */
1872 	unsigned char slot_min, slot_count = mt_slots[bn->type];
1873 
1874 	/*
1875 	 * To support gap tracking, all NULL entries are kept together and a node cannot
1876 	 * end on a NULL entry, with the exception of the left-most leaf.  The
1877 	 * limitation means that the split of a node must be checked for this condition
1878 	 * and be able to put more data in one direction or the other.
1879 	 */
1880 	if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1881 		*mid_split = 0;
1882 		split = b_end - mt_min_slots[bn->type];
1883 
1884 		if (!ma_is_leaf(bn->type))
1885 			return split;
1886 
1887 		mas->mas_flags |= MA_STATE_REBALANCE;
1888 		if (!bn->slot[split])
1889 			split--;
1890 		return split;
1891 	}
1892 
1893 	/*
1894 	 * Although extremely rare, it is possible to enter what is known as the 3-way
1895 	 * split scenario.  The 3-way split comes about by means of a store of a range
1896 	 * that overwrites the end and beginning of two full nodes.  The result is a set
1897 	 * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1898 	 * also be located in different parent nodes which are also full.  This can
1899 	 * carry upwards all the way to the root in the worst case.
1900 	 */
1901 	if (unlikely(mab_middle_node(bn, split, slot_count))) {
1902 		split = b_end / 3;
1903 		*mid_split = split * 2;
1904 	} else {
1905 		slot_min = mt_min_slots[bn->type];
1906 
1907 		*mid_split = 0;
1908 		/*
1909 		 * Avoid having a range less than the slot count unless it
1910 		 * causes one node to be deficient.
1911 		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1912 		 */
1913 		while ((split < slot_count - 1) &&
1914 		       ((bn->pivot[split] - min) < slot_count - 1) &&
1915 		       (b_end - split > slot_min))
1916 			split++;
1917 	}
1918 
1919 	/* Avoid ending a node on a NULL entry */
1920 	split = mab_no_null_split(bn, split, slot_count);
1921 
1922 	if (unlikely(*mid_split))
1923 		*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1924 
1925 	return split;
1926 }
1927 
1928 /*
1929  * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1930  * and set @b_node->b_end to the next free slot.
1931  * @mas: The maple state
1932  * @mas_start: The starting slot to copy
1933  * @mas_end: The end slot to copy (inclusively)
1934  * @b_node: The maple_big_node to place the data
1935  * @mab_start: The starting location in maple_big_node to store the data.
1936  */
1937 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1938 			unsigned char mas_end, struct maple_big_node *b_node,
1939 			unsigned char mab_start)
1940 {
1941 	enum maple_type mt;
1942 	struct maple_node *node;
1943 	void __rcu **slots;
1944 	unsigned long *pivots, *gaps;
1945 	int i = mas_start, j = mab_start;
1946 	unsigned char piv_end;
1947 
1948 	node = mas_mn(mas);
1949 	mt = mte_node_type(mas->node);
1950 	pivots = ma_pivots(node, mt);
1951 	if (!i) {
1952 		b_node->pivot[j] = pivots[i++];
1953 		if (unlikely(i > mas_end))
1954 			goto complete;
1955 		j++;
1956 	}
1957 
1958 	piv_end = min(mas_end, mt_pivots[mt]);
1959 	for (; i < piv_end; i++, j++) {
1960 		b_node->pivot[j] = pivots[i];
1961 		if (unlikely(!b_node->pivot[j]))
1962 			break;
1963 
1964 		if (unlikely(mas->max == b_node->pivot[j]))
1965 			goto complete;
1966 	}
1967 
1968 	if (likely(i <= mas_end))
1969 		b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
1970 
1971 complete:
1972 	b_node->b_end = ++j;
1973 	j -= mab_start;
1974 	slots = ma_slots(node, mt);
1975 	memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
1976 	if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
1977 		gaps = ma_gaps(node, mt);
1978 		memcpy(b_node->gap + mab_start, gaps + mas_start,
1979 		       sizeof(unsigned long) * j);
1980 	}
1981 }
1982 
1983 /*
1984  * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
1985  * @mas: The maple state
1986  * @node: The maple node
1987  * @pivots: pointer to the maple node pivots
1988  * @mt: The maple type
1989  * @end: The assumed end
1990  *
1991  * Note, end may be incremented within this function but not modified at the
1992  * source.  This is fine since the metadata is the last thing to be stored in a
1993  * node during a write.
1994  */
1995 static inline void mas_leaf_set_meta(struct ma_state *mas,
1996 		struct maple_node *node, unsigned long *pivots,
1997 		enum maple_type mt, unsigned char end)
1998 {
1999 	/* There is no room for metadata already */
2000 	if (mt_pivots[mt] <= end)
2001 		return;
2002 
2003 	if (pivots[end] && pivots[end] < mas->max)
2004 		end++;
2005 
2006 	if (end < mt_slots[mt] - 1)
2007 		ma_set_meta(node, mt, 0, end);
2008 }
2009 
2010 /*
2011  * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2012  * @b_node: the maple_big_node that has the data
2013  * @mab_start: the start location in @b_node.
2014  * @mab_end: The end location in @b_node (inclusively)
2015  * @mas: The maple state with the maple encoded node.
2016  */
2017 static inline void mab_mas_cp(struct maple_big_node *b_node,
2018 			      unsigned char mab_start, unsigned char mab_end,
2019 			      struct ma_state *mas, bool new_max)
2020 {
2021 	int i, j = 0;
2022 	enum maple_type mt = mte_node_type(mas->node);
2023 	struct maple_node *node = mte_to_node(mas->node);
2024 	void __rcu **slots = ma_slots(node, mt);
2025 	unsigned long *pivots = ma_pivots(node, mt);
2026 	unsigned long *gaps = NULL;
2027 	unsigned char end;
2028 
2029 	if (mab_end - mab_start > mt_pivots[mt])
2030 		mab_end--;
2031 
2032 	if (!pivots[mt_pivots[mt] - 1])
2033 		slots[mt_pivots[mt]] = NULL;
2034 
2035 	i = mab_start;
2036 	do {
2037 		pivots[j++] = b_node->pivot[i++];
2038 	} while (i <= mab_end && likely(b_node->pivot[i]));
2039 
2040 	memcpy(slots, b_node->slot + mab_start,
2041 	       sizeof(void *) * (i - mab_start));
2042 
2043 	if (new_max)
2044 		mas->max = b_node->pivot[i - 1];
2045 
2046 	end = j - 1;
2047 	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2048 		unsigned long max_gap = 0;
2049 		unsigned char offset = 0;
2050 
2051 		gaps = ma_gaps(node, mt);
2052 		do {
2053 			gaps[--j] = b_node->gap[--i];
2054 			if (gaps[j] > max_gap) {
2055 				offset = j;
2056 				max_gap = gaps[j];
2057 			}
2058 		} while (j);
2059 
2060 		ma_set_meta(node, mt, offset, end);
2061 	} else {
2062 		mas_leaf_set_meta(mas, node, pivots, mt, end);
2063 	}
2064 }
2065 
2066 /*
2067  * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2068  * @mas: the maple state with the maple encoded node of the sub-tree.
2069  *
2070  * Descend through a sub-tree and adopt children who do not have the correct
2071  * parents set.  Follow the parents which have the correct parents as they are
2072  * the new entries which need to be followed to find other incorrectly set
2073  * parents.
2074  */
2075 static inline void mas_descend_adopt(struct ma_state *mas)
2076 {
2077 	struct ma_state list[3], next[3];
2078 	int i, n;
2079 
2080 	/*
2081 	 * At each level there may be up to 3 correct parent pointers which indicates
2082 	 * the new nodes which need to be walked to find any new nodes at a lower level.
2083 	 */
2084 
2085 	for (i = 0; i < 3; i++) {
2086 		list[i] = *mas;
2087 		list[i].offset = 0;
2088 		next[i].offset = 0;
2089 	}
2090 	next[0] = *mas;
2091 
2092 	while (!mte_is_leaf(list[0].node)) {
2093 		n = 0;
2094 		for (i = 0; i < 3; i++) {
2095 			if (mas_is_none(&list[i]))
2096 				continue;
2097 
2098 			if (i && list[i-1].node == list[i].node)
2099 				continue;
2100 
2101 			while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2102 				n++;
2103 
2104 			mas_adopt_children(&list[i], list[i].node);
2105 		}
2106 
2107 		while (n < 3)
2108 			next[n++].node = MAS_NONE;
2109 
2110 		/* descend by setting the list to the children */
2111 		for (i = 0; i < 3; i++)
2112 			list[i] = next[i];
2113 	}
2114 }
2115 
2116 /*
2117  * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2118  * @mas: The maple state
2119  * @end: The maple node end
2120  * @mt: The maple node type
2121  */
2122 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2123 				      enum maple_type mt)
2124 {
2125 	if (!(mas->mas_flags & MA_STATE_BULK))
2126 		return;
2127 
2128 	if (mte_is_root(mas->node))
2129 		return;
2130 
2131 	if (end > mt_min_slots[mt]) {
2132 		mas->mas_flags &= ~MA_STATE_REBALANCE;
2133 		return;
2134 	}
2135 }
2136 
2137 /*
2138  * mas_store_b_node() - Store an @entry into the b_node while also copying the
2139  * data from a maple encoded node.
2140  * @wr_mas: the maple write state
2141  * @b_node: the maple_big_node to fill with data
2142  * @offset_end: the offset to end copying
2143  *
2144  * Return: The actual end of the data stored in @b_node
2145  */
2146 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2147 		struct maple_big_node *b_node, unsigned char offset_end)
2148 {
2149 	unsigned char slot;
2150 	unsigned char b_end;
2151 	/* Possible underflow of piv will wrap back to 0 before use. */
2152 	unsigned long piv;
2153 	struct ma_state *mas = wr_mas->mas;
2154 
2155 	b_node->type = wr_mas->type;
2156 	b_end = 0;
2157 	slot = mas->offset;
2158 	if (slot) {
2159 		/* Copy start data up to insert. */
2160 		mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2161 		b_end = b_node->b_end;
2162 		piv = b_node->pivot[b_end - 1];
2163 	} else
2164 		piv = mas->min - 1;
2165 
2166 	if (piv + 1 < mas->index) {
2167 		/* Handle range starting after old range */
2168 		b_node->slot[b_end] = wr_mas->content;
2169 		if (!wr_mas->content)
2170 			b_node->gap[b_end] = mas->index - 1 - piv;
2171 		b_node->pivot[b_end++] = mas->index - 1;
2172 	}
2173 
2174 	/* Store the new entry. */
2175 	mas->offset = b_end;
2176 	b_node->slot[b_end] = wr_mas->entry;
2177 	b_node->pivot[b_end] = mas->last;
2178 
2179 	/* Appended. */
2180 	if (mas->last >= mas->max)
2181 		goto b_end;
2182 
2183 	/* Handle new range ending before old range ends */
2184 	piv = mas_safe_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2185 	if (piv > mas->last) {
2186 		if (piv == ULONG_MAX)
2187 			mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2188 
2189 		if (offset_end != slot)
2190 			wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2191 							  offset_end);
2192 
2193 		b_node->slot[++b_end] = wr_mas->content;
2194 		if (!wr_mas->content)
2195 			b_node->gap[b_end] = piv - mas->last + 1;
2196 		b_node->pivot[b_end] = piv;
2197 	}
2198 
2199 	slot = offset_end + 1;
2200 	if (slot > wr_mas->node_end)
2201 		goto b_end;
2202 
2203 	/* Copy end data to the end of the node. */
2204 	mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2205 	b_node->b_end--;
2206 	return;
2207 
2208 b_end:
2209 	b_node->b_end = b_end;
2210 }
2211 
2212 /*
2213  * mas_prev_sibling() - Find the previous node with the same parent.
2214  * @mas: the maple state
2215  *
2216  * Return: True if there is a previous sibling, false otherwise.
2217  */
2218 static inline bool mas_prev_sibling(struct ma_state *mas)
2219 {
2220 	unsigned int p_slot = mte_parent_slot(mas->node);
2221 
2222 	if (mte_is_root(mas->node))
2223 		return false;
2224 
2225 	if (!p_slot)
2226 		return false;
2227 
2228 	mas_ascend(mas);
2229 	mas->offset = p_slot - 1;
2230 	mas_descend(mas);
2231 	return true;
2232 }
2233 
2234 /*
2235  * mas_next_sibling() - Find the next node with the same parent.
2236  * @mas: the maple state
2237  *
2238  * Return: true if there is a next sibling, false otherwise.
2239  */
2240 static inline bool mas_next_sibling(struct ma_state *mas)
2241 {
2242 	MA_STATE(parent, mas->tree, mas->index, mas->last);
2243 
2244 	if (mte_is_root(mas->node))
2245 		return false;
2246 
2247 	parent = *mas;
2248 	mas_ascend(&parent);
2249 	parent.offset = mte_parent_slot(mas->node) + 1;
2250 	if (parent.offset > mas_data_end(&parent))
2251 		return false;
2252 
2253 	*mas = parent;
2254 	mas_descend(mas);
2255 	return true;
2256 }
2257 
2258 /*
2259  * mte_node_or_node() - Return the encoded node or MAS_NONE.
2260  * @enode: The encoded maple node.
2261  *
2262  * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2263  *
2264  * Return: @enode or MAS_NONE
2265  */
2266 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2267 {
2268 	if (enode)
2269 		return enode;
2270 
2271 	return ma_enode_ptr(MAS_NONE);
2272 }
2273 
2274 /*
2275  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2276  * @wr_mas: The maple write state
2277  *
2278  * Uses mas_slot_locked() and does not need to worry about dead nodes.
2279  */
2280 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2281 {
2282 	struct ma_state *mas = wr_mas->mas;
2283 	unsigned char count, offset;
2284 
2285 	if (unlikely(ma_is_dense(wr_mas->type))) {
2286 		wr_mas->r_max = wr_mas->r_min = mas->index;
2287 		mas->offset = mas->index = mas->min;
2288 		return;
2289 	}
2290 
2291 	wr_mas->node = mas_mn(wr_mas->mas);
2292 	wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2293 	count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2294 					       wr_mas->pivots, mas->max);
2295 	offset = mas->offset;
2296 
2297 	while (offset < count && mas->index > wr_mas->pivots[offset])
2298 		offset++;
2299 
2300 	wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max;
2301 	wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset);
2302 	wr_mas->offset_end = mas->offset = offset;
2303 }
2304 
2305 /*
2306  * mas_topiary_range() - Add a range of slots to the topiary.
2307  * @mas: The maple state
2308  * @destroy: The topiary to add the slots (usually destroy)
2309  * @start: The starting slot inclusively
2310  * @end: The end slot inclusively
2311  */
2312 static inline void mas_topiary_range(struct ma_state *mas,
2313 	struct ma_topiary *destroy, unsigned char start, unsigned char end)
2314 {
2315 	void __rcu **slots;
2316 	unsigned char offset;
2317 
2318 	MAS_BUG_ON(mas, mte_is_leaf(mas->node));
2319 
2320 	slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2321 	for (offset = start; offset <= end; offset++) {
2322 		struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2323 
2324 		if (mte_dead_node(enode))
2325 			continue;
2326 
2327 		mat_add(destroy, enode);
2328 	}
2329 }
2330 
2331 /*
2332  * mast_topiary() - Add the portions of the tree to the removal list; either to
2333  * be freed or discarded (destroy walk).
2334  * @mast: The maple_subtree_state.
2335  */
2336 static inline void mast_topiary(struct maple_subtree_state *mast)
2337 {
2338 	MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2339 	unsigned char r_start, r_end;
2340 	unsigned char l_start, l_end;
2341 	void __rcu **l_slots, **r_slots;
2342 
2343 	wr_mas.type = mte_node_type(mast->orig_l->node);
2344 	mast->orig_l->index = mast->orig_l->last;
2345 	mas_wr_node_walk(&wr_mas);
2346 	l_start = mast->orig_l->offset + 1;
2347 	l_end = mas_data_end(mast->orig_l);
2348 	r_start = 0;
2349 	r_end = mast->orig_r->offset;
2350 
2351 	if (r_end)
2352 		r_end--;
2353 
2354 	l_slots = ma_slots(mas_mn(mast->orig_l),
2355 			   mte_node_type(mast->orig_l->node));
2356 
2357 	r_slots = ma_slots(mas_mn(mast->orig_r),
2358 			   mte_node_type(mast->orig_r->node));
2359 
2360 	if ((l_start < l_end) &&
2361 	    mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2362 		l_start++;
2363 	}
2364 
2365 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2366 		if (r_end)
2367 			r_end--;
2368 	}
2369 
2370 	if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2371 		return;
2372 
2373 	/* At the node where left and right sides meet, add the parts between */
2374 	if (mast->orig_l->node == mast->orig_r->node) {
2375 		return mas_topiary_range(mast->orig_l, mast->destroy,
2376 					     l_start, r_end);
2377 	}
2378 
2379 	/* mast->orig_r is different and consumed. */
2380 	if (mte_is_leaf(mast->orig_r->node))
2381 		return;
2382 
2383 	if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2384 		l_end--;
2385 
2386 
2387 	if (l_start <= l_end)
2388 		mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2389 
2390 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2391 		r_start++;
2392 
2393 	if (r_start <= r_end)
2394 		mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2395 }
2396 
2397 /*
2398  * mast_rebalance_next() - Rebalance against the next node
2399  * @mast: The maple subtree state
2400  * @old_r: The encoded maple node to the right (next node).
2401  */
2402 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2403 {
2404 	unsigned char b_end = mast->bn->b_end;
2405 
2406 	mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2407 		   mast->bn, b_end);
2408 	mast->orig_r->last = mast->orig_r->max;
2409 }
2410 
2411 /*
2412  * mast_rebalance_prev() - Rebalance against the previous node
2413  * @mast: The maple subtree state
2414  * @old_l: The encoded maple node to the left (previous node)
2415  */
2416 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2417 {
2418 	unsigned char end = mas_data_end(mast->orig_l) + 1;
2419 	unsigned char b_end = mast->bn->b_end;
2420 
2421 	mab_shift_right(mast->bn, end);
2422 	mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2423 	mast->l->min = mast->orig_l->min;
2424 	mast->orig_l->index = mast->orig_l->min;
2425 	mast->bn->b_end = end + b_end;
2426 	mast->l->offset += end;
2427 }
2428 
2429 /*
2430  * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2431  * the node to the right.  Checking the nodes to the right then the left at each
2432  * level upwards until root is reached.  Free and destroy as needed.
2433  * Data is copied into the @mast->bn.
2434  * @mast: The maple_subtree_state.
2435  */
2436 static inline
2437 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2438 {
2439 	struct ma_state r_tmp = *mast->orig_r;
2440 	struct ma_state l_tmp = *mast->orig_l;
2441 	struct maple_enode *ancestor = NULL;
2442 	unsigned char start, end;
2443 	unsigned char depth = 0;
2444 
2445 	r_tmp = *mast->orig_r;
2446 	l_tmp = *mast->orig_l;
2447 	do {
2448 		mas_ascend(mast->orig_r);
2449 		mas_ascend(mast->orig_l);
2450 		depth++;
2451 		if (!ancestor &&
2452 		    (mast->orig_r->node == mast->orig_l->node)) {
2453 			ancestor = mast->orig_r->node;
2454 			end = mast->orig_r->offset - 1;
2455 			start = mast->orig_l->offset + 1;
2456 		}
2457 
2458 		if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2459 			if (!ancestor) {
2460 				ancestor = mast->orig_r->node;
2461 				start = 0;
2462 			}
2463 
2464 			mast->orig_r->offset++;
2465 			do {
2466 				mas_descend(mast->orig_r);
2467 				mast->orig_r->offset = 0;
2468 				depth--;
2469 			} while (depth);
2470 
2471 			mast_rebalance_next(mast);
2472 			do {
2473 				unsigned char l_off = 0;
2474 				struct maple_enode *child = r_tmp.node;
2475 
2476 				mas_ascend(&r_tmp);
2477 				if (ancestor == r_tmp.node)
2478 					l_off = start;
2479 
2480 				if (r_tmp.offset)
2481 					r_tmp.offset--;
2482 
2483 				if (l_off < r_tmp.offset)
2484 					mas_topiary_range(&r_tmp, mast->destroy,
2485 							  l_off, r_tmp.offset);
2486 
2487 				if (l_tmp.node != child)
2488 					mat_add(mast->free, child);
2489 
2490 			} while (r_tmp.node != ancestor);
2491 
2492 			*mast->orig_l = l_tmp;
2493 			return true;
2494 
2495 		} else if (mast->orig_l->offset != 0) {
2496 			if (!ancestor) {
2497 				ancestor = mast->orig_l->node;
2498 				end = mas_data_end(mast->orig_l);
2499 			}
2500 
2501 			mast->orig_l->offset--;
2502 			do {
2503 				mas_descend(mast->orig_l);
2504 				mast->orig_l->offset =
2505 					mas_data_end(mast->orig_l);
2506 				depth--;
2507 			} while (depth);
2508 
2509 			mast_rebalance_prev(mast);
2510 			do {
2511 				unsigned char r_off;
2512 				struct maple_enode *child = l_tmp.node;
2513 
2514 				mas_ascend(&l_tmp);
2515 				if (ancestor == l_tmp.node)
2516 					r_off = end;
2517 				else
2518 					r_off = mas_data_end(&l_tmp);
2519 
2520 				if (l_tmp.offset < r_off)
2521 					l_tmp.offset++;
2522 
2523 				if (l_tmp.offset < r_off)
2524 					mas_topiary_range(&l_tmp, mast->destroy,
2525 							  l_tmp.offset, r_off);
2526 
2527 				if (r_tmp.node != child)
2528 					mat_add(mast->free, child);
2529 
2530 			} while (l_tmp.node != ancestor);
2531 
2532 			*mast->orig_r = r_tmp;
2533 			return true;
2534 		}
2535 	} while (!mte_is_root(mast->orig_r->node));
2536 
2537 	*mast->orig_r = r_tmp;
2538 	*mast->orig_l = l_tmp;
2539 	return false;
2540 }
2541 
2542 /*
2543  * mast_ascend_free() - Add current original maple state nodes to the free list
2544  * and ascend.
2545  * @mast: the maple subtree state.
2546  *
2547  * Ascend the original left and right sides and add the previous nodes to the
2548  * free list.  Set the slots to point to the correct location in the new nodes.
2549  */
2550 static inline void
2551 mast_ascend_free(struct maple_subtree_state *mast)
2552 {
2553 	MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2554 	struct maple_enode *left = mast->orig_l->node;
2555 	struct maple_enode *right = mast->orig_r->node;
2556 
2557 	mas_ascend(mast->orig_l);
2558 	mas_ascend(mast->orig_r);
2559 	mat_add(mast->free, left);
2560 
2561 	if (left != right)
2562 		mat_add(mast->free, right);
2563 
2564 	mast->orig_r->offset = 0;
2565 	mast->orig_r->index = mast->r->max;
2566 	/* last should be larger than or equal to index */
2567 	if (mast->orig_r->last < mast->orig_r->index)
2568 		mast->orig_r->last = mast->orig_r->index;
2569 	/*
2570 	 * The node may not contain the value so set slot to ensure all
2571 	 * of the nodes contents are freed or destroyed.
2572 	 */
2573 	wr_mas.type = mte_node_type(mast->orig_r->node);
2574 	mas_wr_node_walk(&wr_mas);
2575 	/* Set up the left side of things */
2576 	mast->orig_l->offset = 0;
2577 	mast->orig_l->index = mast->l->min;
2578 	wr_mas.mas = mast->orig_l;
2579 	wr_mas.type = mte_node_type(mast->orig_l->node);
2580 	mas_wr_node_walk(&wr_mas);
2581 
2582 	mast->bn->type = wr_mas.type;
2583 }
2584 
2585 /*
2586  * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2587  * @mas: the maple state with the allocations.
2588  * @b_node: the maple_big_node with the type encoding.
2589  *
2590  * Use the node type from the maple_big_node to allocate a new node from the
2591  * ma_state.  This function exists mainly for code readability.
2592  *
2593  * Return: A new maple encoded node
2594  */
2595 static inline struct maple_enode
2596 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2597 {
2598 	return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2599 }
2600 
2601 /*
2602  * mas_mab_to_node() - Set up right and middle nodes
2603  *
2604  * @mas: the maple state that contains the allocations.
2605  * @b_node: the node which contains the data.
2606  * @left: The pointer which will have the left node
2607  * @right: The pointer which may have the right node
2608  * @middle: the pointer which may have the middle node (rare)
2609  * @mid_split: the split location for the middle node
2610  *
2611  * Return: the split of left.
2612  */
2613 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2614 	struct maple_big_node *b_node, struct maple_enode **left,
2615 	struct maple_enode **right, struct maple_enode **middle,
2616 	unsigned char *mid_split, unsigned long min)
2617 {
2618 	unsigned char split = 0;
2619 	unsigned char slot_count = mt_slots[b_node->type];
2620 
2621 	*left = mas_new_ma_node(mas, b_node);
2622 	*right = NULL;
2623 	*middle = NULL;
2624 	*mid_split = 0;
2625 
2626 	if (b_node->b_end < slot_count) {
2627 		split = b_node->b_end;
2628 	} else {
2629 		split = mab_calc_split(mas, b_node, mid_split, min);
2630 		*right = mas_new_ma_node(mas, b_node);
2631 	}
2632 
2633 	if (*mid_split)
2634 		*middle = mas_new_ma_node(mas, b_node);
2635 
2636 	return split;
2637 
2638 }
2639 
2640 /*
2641  * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2642  * pointer.
2643  * @b_node - the big node to add the entry
2644  * @mas - the maple state to get the pivot (mas->max)
2645  * @entry - the entry to add, if NULL nothing happens.
2646  */
2647 static inline void mab_set_b_end(struct maple_big_node *b_node,
2648 				 struct ma_state *mas,
2649 				 void *entry)
2650 {
2651 	if (!entry)
2652 		return;
2653 
2654 	b_node->slot[b_node->b_end] = entry;
2655 	if (mt_is_alloc(mas->tree))
2656 		b_node->gap[b_node->b_end] = mas_max_gap(mas);
2657 	b_node->pivot[b_node->b_end++] = mas->max;
2658 }
2659 
2660 /*
2661  * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2662  * of @mas->node to either @left or @right, depending on @slot and @split
2663  *
2664  * @mas - the maple state with the node that needs a parent
2665  * @left - possible parent 1
2666  * @right - possible parent 2
2667  * @slot - the slot the mas->node was placed
2668  * @split - the split location between @left and @right
2669  */
2670 static inline void mas_set_split_parent(struct ma_state *mas,
2671 					struct maple_enode *left,
2672 					struct maple_enode *right,
2673 					unsigned char *slot, unsigned char split)
2674 {
2675 	if (mas_is_none(mas))
2676 		return;
2677 
2678 	if ((*slot) <= split)
2679 		mas_set_parent(mas, mas->node, left, *slot);
2680 	else if (right)
2681 		mas_set_parent(mas, mas->node, right, (*slot) - split - 1);
2682 
2683 	(*slot)++;
2684 }
2685 
2686 /*
2687  * mte_mid_split_check() - Check if the next node passes the mid-split
2688  * @**l: Pointer to left encoded maple node.
2689  * @**m: Pointer to middle encoded maple node.
2690  * @**r: Pointer to right encoded maple node.
2691  * @slot: The offset
2692  * @*split: The split location.
2693  * @mid_split: The middle split.
2694  */
2695 static inline void mte_mid_split_check(struct maple_enode **l,
2696 				       struct maple_enode **r,
2697 				       struct maple_enode *right,
2698 				       unsigned char slot,
2699 				       unsigned char *split,
2700 				       unsigned char mid_split)
2701 {
2702 	if (*r == right)
2703 		return;
2704 
2705 	if (slot < mid_split)
2706 		return;
2707 
2708 	*l = *r;
2709 	*r = right;
2710 	*split = mid_split;
2711 }
2712 
2713 /*
2714  * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2715  * is taken from @mast->l.
2716  * @mast - the maple subtree state
2717  * @left - the left node
2718  * @right - the right node
2719  * @split - the split location.
2720  */
2721 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2722 					  struct maple_enode *left,
2723 					  struct maple_enode *middle,
2724 					  struct maple_enode *right,
2725 					  unsigned char split,
2726 					  unsigned char mid_split)
2727 {
2728 	unsigned char slot;
2729 	struct maple_enode *l = left;
2730 	struct maple_enode *r = right;
2731 
2732 	if (mas_is_none(mast->l))
2733 		return;
2734 
2735 	if (middle)
2736 		r = middle;
2737 
2738 	slot = mast->l->offset;
2739 
2740 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2741 	mas_set_split_parent(mast->l, l, r, &slot, split);
2742 
2743 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2744 	mas_set_split_parent(mast->m, l, r, &slot, split);
2745 
2746 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2747 	mas_set_split_parent(mast->r, l, r, &slot, split);
2748 }
2749 
2750 /*
2751  * mas_wmb_replace() - Write memory barrier and replace
2752  * @mas: The maple state
2753  * @free: the maple topiary list of nodes to free
2754  * @destroy: The maple topiary list of nodes to destroy (walk and free)
2755  *
2756  * Updates gap as necessary.
2757  */
2758 static inline void mas_wmb_replace(struct ma_state *mas,
2759 				   struct ma_topiary *free,
2760 				   struct ma_topiary *destroy)
2761 {
2762 	/* All nodes must see old data as dead prior to replacing that data */
2763 	smp_wmb(); /* Needed for RCU */
2764 
2765 	/* Insert the new data in the tree */
2766 	mas_replace(mas, true);
2767 
2768 	if (!mte_is_leaf(mas->node))
2769 		mas_descend_adopt(mas);
2770 
2771 	mas_mat_free(mas, free);
2772 
2773 	if (destroy)
2774 		mas_mat_destroy(mas, destroy);
2775 
2776 	if (mte_is_leaf(mas->node))
2777 		return;
2778 
2779 	mas_update_gap(mas);
2780 }
2781 
2782 /*
2783  * mast_new_root() - Set a new tree root during subtree creation
2784  * @mast: The maple subtree state
2785  * @mas: The maple state
2786  */
2787 static inline void mast_new_root(struct maple_subtree_state *mast,
2788 				 struct ma_state *mas)
2789 {
2790 	mas_mn(mast->l)->parent =
2791 		ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2792 	if (!mte_dead_node(mast->orig_l->node) &&
2793 	    !mte_is_root(mast->orig_l->node)) {
2794 		do {
2795 			mast_ascend_free(mast);
2796 			mast_topiary(mast);
2797 		} while (!mte_is_root(mast->orig_l->node));
2798 	}
2799 	if ((mast->orig_l->node != mas->node) &&
2800 		   (mast->l->depth > mas_mt_height(mas))) {
2801 		mat_add(mast->free, mas->node);
2802 	}
2803 }
2804 
2805 /*
2806  * mast_cp_to_nodes() - Copy data out to nodes.
2807  * @mast: The maple subtree state
2808  * @left: The left encoded maple node
2809  * @middle: The middle encoded maple node
2810  * @right: The right encoded maple node
2811  * @split: The location to split between left and (middle ? middle : right)
2812  * @mid_split: The location to split between middle and right.
2813  */
2814 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2815 	struct maple_enode *left, struct maple_enode *middle,
2816 	struct maple_enode *right, unsigned char split, unsigned char mid_split)
2817 {
2818 	bool new_lmax = true;
2819 
2820 	mast->l->node = mte_node_or_none(left);
2821 	mast->m->node = mte_node_or_none(middle);
2822 	mast->r->node = mte_node_or_none(right);
2823 
2824 	mast->l->min = mast->orig_l->min;
2825 	if (split == mast->bn->b_end) {
2826 		mast->l->max = mast->orig_r->max;
2827 		new_lmax = false;
2828 	}
2829 
2830 	mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2831 
2832 	if (middle) {
2833 		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2834 		mast->m->min = mast->bn->pivot[split] + 1;
2835 		split = mid_split;
2836 	}
2837 
2838 	mast->r->max = mast->orig_r->max;
2839 	if (right) {
2840 		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2841 		mast->r->min = mast->bn->pivot[split] + 1;
2842 	}
2843 }
2844 
2845 /*
2846  * mast_combine_cp_left - Copy in the original left side of the tree into the
2847  * combined data set in the maple subtree state big node.
2848  * @mast: The maple subtree state
2849  */
2850 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2851 {
2852 	unsigned char l_slot = mast->orig_l->offset;
2853 
2854 	if (!l_slot)
2855 		return;
2856 
2857 	mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2858 }
2859 
2860 /*
2861  * mast_combine_cp_right: Copy in the original right side of the tree into the
2862  * combined data set in the maple subtree state big node.
2863  * @mast: The maple subtree state
2864  */
2865 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2866 {
2867 	if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2868 		return;
2869 
2870 	mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2871 		   mt_slot_count(mast->orig_r->node), mast->bn,
2872 		   mast->bn->b_end);
2873 	mast->orig_r->last = mast->orig_r->max;
2874 }
2875 
2876 /*
2877  * mast_sufficient: Check if the maple subtree state has enough data in the big
2878  * node to create at least one sufficient node
2879  * @mast: the maple subtree state
2880  */
2881 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2882 {
2883 	if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2884 		return true;
2885 
2886 	return false;
2887 }
2888 
2889 /*
2890  * mast_overflow: Check if there is too much data in the subtree state for a
2891  * single node.
2892  * @mast: The maple subtree state
2893  */
2894 static inline bool mast_overflow(struct maple_subtree_state *mast)
2895 {
2896 	if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2897 		return true;
2898 
2899 	return false;
2900 }
2901 
2902 static inline void *mtree_range_walk(struct ma_state *mas)
2903 {
2904 	unsigned long *pivots;
2905 	unsigned char offset;
2906 	struct maple_node *node;
2907 	struct maple_enode *next, *last;
2908 	enum maple_type type;
2909 	void __rcu **slots;
2910 	unsigned char end;
2911 	unsigned long max, min;
2912 	unsigned long prev_max, prev_min;
2913 
2914 	next = mas->node;
2915 	min = mas->min;
2916 	max = mas->max;
2917 	do {
2918 		offset = 0;
2919 		last = next;
2920 		node = mte_to_node(next);
2921 		type = mte_node_type(next);
2922 		pivots = ma_pivots(node, type);
2923 		end = ma_data_end(node, type, pivots, max);
2924 		if (unlikely(ma_dead_node(node)))
2925 			goto dead_node;
2926 
2927 		if (pivots[offset] >= mas->index) {
2928 			prev_max = max;
2929 			prev_min = min;
2930 			max = pivots[offset];
2931 			goto next;
2932 		}
2933 
2934 		do {
2935 			offset++;
2936 		} while ((offset < end) && (pivots[offset] < mas->index));
2937 
2938 		prev_min = min;
2939 		min = pivots[offset - 1] + 1;
2940 		prev_max = max;
2941 		if (likely(offset < end && pivots[offset]))
2942 			max = pivots[offset];
2943 
2944 next:
2945 		slots = ma_slots(node, type);
2946 		next = mt_slot(mas->tree, slots, offset);
2947 		if (unlikely(ma_dead_node(node)))
2948 			goto dead_node;
2949 	} while (!ma_is_leaf(type));
2950 
2951 	mas->offset = offset;
2952 	mas->index = min;
2953 	mas->last = max;
2954 	mas->min = prev_min;
2955 	mas->max = prev_max;
2956 	mas->node = last;
2957 	return (void *)next;
2958 
2959 dead_node:
2960 	mas_reset(mas);
2961 	return NULL;
2962 }
2963 
2964 /*
2965  * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
2966  * @mas: The starting maple state
2967  * @mast: The maple_subtree_state, keeps track of 4 maple states.
2968  * @count: The estimated count of iterations needed.
2969  *
2970  * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
2971  * is hit.  First @b_node is split into two entries which are inserted into the
2972  * next iteration of the loop.  @b_node is returned populated with the final
2973  * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
2974  * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
2975  * to account of what has been copied into the new sub-tree.  The update of
2976  * orig_l_mas->last is used in mas_consume to find the slots that will need to
2977  * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
2978  * the new sub-tree in case the sub-tree becomes the full tree.
2979  *
2980  * Return: the number of elements in b_node during the last loop.
2981  */
2982 static int mas_spanning_rebalance(struct ma_state *mas,
2983 		struct maple_subtree_state *mast, unsigned char count)
2984 {
2985 	unsigned char split, mid_split;
2986 	unsigned char slot = 0;
2987 	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
2988 
2989 	MA_STATE(l_mas, mas->tree, mas->index, mas->index);
2990 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
2991 	MA_STATE(m_mas, mas->tree, mas->index, mas->index);
2992 	MA_TOPIARY(free, mas->tree);
2993 	MA_TOPIARY(destroy, mas->tree);
2994 
2995 	/*
2996 	 * The tree needs to be rebalanced and leaves need to be kept at the same level.
2997 	 * Rebalancing is done by use of the ``struct maple_topiary``.
2998 	 */
2999 	mast->l = &l_mas;
3000 	mast->m = &m_mas;
3001 	mast->r = &r_mas;
3002 	mast->free = &free;
3003 	mast->destroy = &destroy;
3004 	l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3005 
3006 	/* Check if this is not root and has sufficient data.  */
3007 	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3008 	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3009 		mast_spanning_rebalance(mast);
3010 
3011 	mast->orig_l->depth = 0;
3012 
3013 	/*
3014 	 * Each level of the tree is examined and balanced, pushing data to the left or
3015 	 * right, or rebalancing against left or right nodes is employed to avoid
3016 	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
3017 	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
3018 	 * will have the incorrect parent pointers and currently be in two trees: the
3019 	 * original tree and the partially new tree.  To remedy the parent pointers in
3020 	 * the old tree, the new data is swapped into the active tree and a walk down
3021 	 * the tree is performed and the parent pointers are updated.
3022 	 * See mas_descend_adopt() for more information..
3023 	 */
3024 	while (count--) {
3025 		mast->bn->b_end--;
3026 		mast->bn->type = mte_node_type(mast->orig_l->node);
3027 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3028 					&mid_split, mast->orig_l->min);
3029 		mast_set_split_parents(mast, left, middle, right, split,
3030 				       mid_split);
3031 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3032 
3033 		/*
3034 		 * Copy data from next level in the tree to mast->bn from next
3035 		 * iteration
3036 		 */
3037 		memset(mast->bn, 0, sizeof(struct maple_big_node));
3038 		mast->bn->type = mte_node_type(left);
3039 		mast->orig_l->depth++;
3040 
3041 		/* Root already stored in l->node. */
3042 		if (mas_is_root_limits(mast->l))
3043 			goto new_root;
3044 
3045 		mast_ascend_free(mast);
3046 		mast_combine_cp_left(mast);
3047 		l_mas.offset = mast->bn->b_end;
3048 		mab_set_b_end(mast->bn, &l_mas, left);
3049 		mab_set_b_end(mast->bn, &m_mas, middle);
3050 		mab_set_b_end(mast->bn, &r_mas, right);
3051 
3052 		/* Copy anything necessary out of the right node. */
3053 		mast_combine_cp_right(mast);
3054 		mast_topiary(mast);
3055 		mast->orig_l->last = mast->orig_l->max;
3056 
3057 		if (mast_sufficient(mast))
3058 			continue;
3059 
3060 		if (mast_overflow(mast))
3061 			continue;
3062 
3063 		/* May be a new root stored in mast->bn */
3064 		if (mas_is_root_limits(mast->orig_l))
3065 			break;
3066 
3067 		mast_spanning_rebalance(mast);
3068 
3069 		/* rebalancing from other nodes may require another loop. */
3070 		if (!count)
3071 			count++;
3072 	}
3073 
3074 	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3075 				mte_node_type(mast->orig_l->node));
3076 	mast->orig_l->depth++;
3077 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3078 	mas_set_parent(mas, left, l_mas.node, slot);
3079 	if (middle)
3080 		mas_set_parent(mas, middle, l_mas.node, ++slot);
3081 
3082 	if (right)
3083 		mas_set_parent(mas, right, l_mas.node, ++slot);
3084 
3085 	if (mas_is_root_limits(mast->l)) {
3086 new_root:
3087 		mast_new_root(mast, mas);
3088 	} else {
3089 		mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3090 	}
3091 
3092 	if (!mte_dead_node(mast->orig_l->node))
3093 		mat_add(&free, mast->orig_l->node);
3094 
3095 	mas->depth = mast->orig_l->depth;
3096 	*mast->orig_l = l_mas;
3097 	mte_set_node_dead(mas->node);
3098 
3099 	/* Set up mas for insertion. */
3100 	mast->orig_l->depth = mas->depth;
3101 	mast->orig_l->alloc = mas->alloc;
3102 	*mas = *mast->orig_l;
3103 	mas_wmb_replace(mas, &free, &destroy);
3104 	mtree_range_walk(mas);
3105 	return mast->bn->b_end;
3106 }
3107 
3108 /*
3109  * mas_rebalance() - Rebalance a given node.
3110  * @mas: The maple state
3111  * @b_node: The big maple node.
3112  *
3113  * Rebalance two nodes into a single node or two new nodes that are sufficient.
3114  * Continue upwards until tree is sufficient.
3115  *
3116  * Return: the number of elements in b_node during the last loop.
3117  */
3118 static inline int mas_rebalance(struct ma_state *mas,
3119 				struct maple_big_node *b_node)
3120 {
3121 	char empty_count = mas_mt_height(mas);
3122 	struct maple_subtree_state mast;
3123 	unsigned char shift, b_end = ++b_node->b_end;
3124 
3125 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3126 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3127 
3128 	trace_ma_op(__func__, mas);
3129 
3130 	/*
3131 	 * Rebalancing occurs if a node is insufficient.  Data is rebalanced
3132 	 * against the node to the right if it exists, otherwise the node to the
3133 	 * left of this node is rebalanced against this node.  If rebalancing
3134 	 * causes just one node to be produced instead of two, then the parent
3135 	 * is also examined and rebalanced if it is insufficient.  Every level
3136 	 * tries to combine the data in the same way.  If one node contains the
3137 	 * entire range of the tree, then that node is used as a new root node.
3138 	 */
3139 	mas_node_count(mas, empty_count * 2 - 1);
3140 	if (mas_is_err(mas))
3141 		return 0;
3142 
3143 	mast.orig_l = &l_mas;
3144 	mast.orig_r = &r_mas;
3145 	mast.bn = b_node;
3146 	mast.bn->type = mte_node_type(mas->node);
3147 
3148 	l_mas = r_mas = *mas;
3149 
3150 	if (mas_next_sibling(&r_mas)) {
3151 		mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3152 		r_mas.last = r_mas.index = r_mas.max;
3153 	} else {
3154 		mas_prev_sibling(&l_mas);
3155 		shift = mas_data_end(&l_mas) + 1;
3156 		mab_shift_right(b_node, shift);
3157 		mas->offset += shift;
3158 		mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3159 		b_node->b_end = shift + b_end;
3160 		l_mas.index = l_mas.last = l_mas.min;
3161 	}
3162 
3163 	return mas_spanning_rebalance(mas, &mast, empty_count);
3164 }
3165 
3166 /*
3167  * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3168  * state.
3169  * @mas: The maple state
3170  * @end: The end of the left-most node.
3171  *
3172  * During a mass-insert event (such as forking), it may be necessary to
3173  * rebalance the left-most node when it is not sufficient.
3174  */
3175 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3176 {
3177 	enum maple_type mt = mte_node_type(mas->node);
3178 	struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3179 	struct maple_enode *eparent;
3180 	unsigned char offset, tmp, split = mt_slots[mt] / 2;
3181 	void __rcu **l_slots, **slots;
3182 	unsigned long *l_pivs, *pivs, gap;
3183 	bool in_rcu = mt_in_rcu(mas->tree);
3184 
3185 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3186 
3187 	l_mas = *mas;
3188 	mas_prev_sibling(&l_mas);
3189 
3190 	/* set up node. */
3191 	if (in_rcu) {
3192 		/* Allocate for both left and right as well as parent. */
3193 		mas_node_count(mas, 3);
3194 		if (mas_is_err(mas))
3195 			return;
3196 
3197 		newnode = mas_pop_node(mas);
3198 	} else {
3199 		newnode = &reuse;
3200 	}
3201 
3202 	node = mas_mn(mas);
3203 	newnode->parent = node->parent;
3204 	slots = ma_slots(newnode, mt);
3205 	pivs = ma_pivots(newnode, mt);
3206 	left = mas_mn(&l_mas);
3207 	l_slots = ma_slots(left, mt);
3208 	l_pivs = ma_pivots(left, mt);
3209 	if (!l_slots[split])
3210 		split++;
3211 	tmp = mas_data_end(&l_mas) - split;
3212 
3213 	memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3214 	memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3215 	pivs[tmp] = l_mas.max;
3216 	memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3217 	memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3218 
3219 	l_mas.max = l_pivs[split];
3220 	mas->min = l_mas.max + 1;
3221 	eparent = mt_mk_node(mte_parent(l_mas.node),
3222 			     mas_parent_type(&l_mas, l_mas.node));
3223 	tmp += end;
3224 	if (!in_rcu) {
3225 		unsigned char max_p = mt_pivots[mt];
3226 		unsigned char max_s = mt_slots[mt];
3227 
3228 		if (tmp < max_p)
3229 			memset(pivs + tmp, 0,
3230 			       sizeof(unsigned long) * (max_p - tmp));
3231 
3232 		if (tmp < mt_slots[mt])
3233 			memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3234 
3235 		memcpy(node, newnode, sizeof(struct maple_node));
3236 		ma_set_meta(node, mt, 0, tmp - 1);
3237 		mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3238 			      l_pivs[split]);
3239 
3240 		/* Remove data from l_pivs. */
3241 		tmp = split + 1;
3242 		memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3243 		memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3244 		ma_set_meta(left, mt, 0, split);
3245 
3246 		goto done;
3247 	}
3248 
3249 	/* RCU requires replacing both l_mas, mas, and parent. */
3250 	mas->node = mt_mk_node(newnode, mt);
3251 	ma_set_meta(newnode, mt, 0, tmp);
3252 
3253 	new_left = mas_pop_node(mas);
3254 	new_left->parent = left->parent;
3255 	mt = mte_node_type(l_mas.node);
3256 	slots = ma_slots(new_left, mt);
3257 	pivs = ma_pivots(new_left, mt);
3258 	memcpy(slots, l_slots, sizeof(void *) * split);
3259 	memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3260 	ma_set_meta(new_left, mt, 0, split);
3261 	l_mas.node = mt_mk_node(new_left, mt);
3262 
3263 	/* replace parent. */
3264 	offset = mte_parent_slot(mas->node);
3265 	mt = mas_parent_type(&l_mas, l_mas.node);
3266 	parent = mas_pop_node(mas);
3267 	slots = ma_slots(parent, mt);
3268 	pivs = ma_pivots(parent, mt);
3269 	memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3270 	rcu_assign_pointer(slots[offset], mas->node);
3271 	rcu_assign_pointer(slots[offset - 1], l_mas.node);
3272 	pivs[offset - 1] = l_mas.max;
3273 	eparent = mt_mk_node(parent, mt);
3274 done:
3275 	gap = mas_leaf_max_gap(mas);
3276 	mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3277 	gap = mas_leaf_max_gap(&l_mas);
3278 	mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3279 	mas_ascend(mas);
3280 
3281 	if (in_rcu)
3282 		mas_replace(mas, false);
3283 
3284 	mas_update_gap(mas);
3285 }
3286 
3287 /*
3288  * mas_split_final_node() - Split the final node in a subtree operation.
3289  * @mast: the maple subtree state
3290  * @mas: The maple state
3291  * @height: The height of the tree in case it's a new root.
3292  */
3293 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3294 					struct ma_state *mas, int height)
3295 {
3296 	struct maple_enode *ancestor;
3297 
3298 	if (mte_is_root(mas->node)) {
3299 		if (mt_is_alloc(mas->tree))
3300 			mast->bn->type = maple_arange_64;
3301 		else
3302 			mast->bn->type = maple_range_64;
3303 		mas->depth = height;
3304 	}
3305 	/*
3306 	 * Only a single node is used here, could be root.
3307 	 * The Big_node data should just fit in a single node.
3308 	 */
3309 	ancestor = mas_new_ma_node(mas, mast->bn);
3310 	mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset);
3311 	mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset);
3312 	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3313 
3314 	mast->l->node = ancestor;
3315 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3316 	mas->offset = mast->bn->b_end - 1;
3317 	return true;
3318 }
3319 
3320 /*
3321  * mast_fill_bnode() - Copy data into the big node in the subtree state
3322  * @mast: The maple subtree state
3323  * @mas: the maple state
3324  * @skip: The number of entries to skip for new nodes insertion.
3325  */
3326 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3327 					 struct ma_state *mas,
3328 					 unsigned char skip)
3329 {
3330 	bool cp = true;
3331 	struct maple_enode *old = mas->node;
3332 	unsigned char split;
3333 
3334 	memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3335 	memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3336 	memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3337 	mast->bn->b_end = 0;
3338 
3339 	if (mte_is_root(mas->node)) {
3340 		cp = false;
3341 	} else {
3342 		mas_ascend(mas);
3343 		mat_add(mast->free, old);
3344 		mas->offset = mte_parent_slot(mas->node);
3345 	}
3346 
3347 	if (cp && mast->l->offset)
3348 		mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3349 
3350 	split = mast->bn->b_end;
3351 	mab_set_b_end(mast->bn, mast->l, mast->l->node);
3352 	mast->r->offset = mast->bn->b_end;
3353 	mab_set_b_end(mast->bn, mast->r, mast->r->node);
3354 	if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3355 		cp = false;
3356 
3357 	if (cp)
3358 		mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3359 			   mast->bn, mast->bn->b_end);
3360 
3361 	mast->bn->b_end--;
3362 	mast->bn->type = mte_node_type(mas->node);
3363 }
3364 
3365 /*
3366  * mast_split_data() - Split the data in the subtree state big node into regular
3367  * nodes.
3368  * @mast: The maple subtree state
3369  * @mas: The maple state
3370  * @split: The location to split the big node
3371  */
3372 static inline void mast_split_data(struct maple_subtree_state *mast,
3373 	   struct ma_state *mas, unsigned char split)
3374 {
3375 	unsigned char p_slot;
3376 
3377 	mab_mas_cp(mast->bn, 0, split, mast->l, true);
3378 	mte_set_pivot(mast->r->node, 0, mast->r->max);
3379 	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3380 	mast->l->offset = mte_parent_slot(mas->node);
3381 	mast->l->max = mast->bn->pivot[split];
3382 	mast->r->min = mast->l->max + 1;
3383 	if (mte_is_leaf(mas->node))
3384 		return;
3385 
3386 	p_slot = mast->orig_l->offset;
3387 	mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3388 			     &p_slot, split);
3389 	mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3390 			     &p_slot, split);
3391 }
3392 
3393 /*
3394  * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3395  * data to the right or left node if there is room.
3396  * @mas: The maple state
3397  * @height: The current height of the maple state
3398  * @mast: The maple subtree state
3399  * @left: Push left or not.
3400  *
3401  * Keeping the height of the tree low means faster lookups.
3402  *
3403  * Return: True if pushed, false otherwise.
3404  */
3405 static inline bool mas_push_data(struct ma_state *mas, int height,
3406 				 struct maple_subtree_state *mast, bool left)
3407 {
3408 	unsigned char slot_total = mast->bn->b_end;
3409 	unsigned char end, space, split;
3410 
3411 	MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3412 	tmp_mas = *mas;
3413 	tmp_mas.depth = mast->l->depth;
3414 
3415 	if (left && !mas_prev_sibling(&tmp_mas))
3416 		return false;
3417 	else if (!left && !mas_next_sibling(&tmp_mas))
3418 		return false;
3419 
3420 	end = mas_data_end(&tmp_mas);
3421 	slot_total += end;
3422 	space = 2 * mt_slot_count(mas->node) - 2;
3423 	/* -2 instead of -1 to ensure there isn't a triple split */
3424 	if (ma_is_leaf(mast->bn->type))
3425 		space--;
3426 
3427 	if (mas->max == ULONG_MAX)
3428 		space--;
3429 
3430 	if (slot_total >= space)
3431 		return false;
3432 
3433 	/* Get the data; Fill mast->bn */
3434 	mast->bn->b_end++;
3435 	if (left) {
3436 		mab_shift_right(mast->bn, end + 1);
3437 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3438 		mast->bn->b_end = slot_total + 1;
3439 	} else {
3440 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3441 	}
3442 
3443 	/* Configure mast for splitting of mast->bn */
3444 	split = mt_slots[mast->bn->type] - 2;
3445 	if (left) {
3446 		/*  Switch mas to prev node  */
3447 		mat_add(mast->free, mas->node);
3448 		*mas = tmp_mas;
3449 		/* Start using mast->l for the left side. */
3450 		tmp_mas.node = mast->l->node;
3451 		*mast->l = tmp_mas;
3452 	} else {
3453 		mat_add(mast->free, tmp_mas.node);
3454 		tmp_mas.node = mast->r->node;
3455 		*mast->r = tmp_mas;
3456 		split = slot_total - split;
3457 	}
3458 	split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3459 	/* Update parent slot for split calculation. */
3460 	if (left)
3461 		mast->orig_l->offset += end + 1;
3462 
3463 	mast_split_data(mast, mas, split);
3464 	mast_fill_bnode(mast, mas, 2);
3465 	mas_split_final_node(mast, mas, height + 1);
3466 	return true;
3467 }
3468 
3469 /*
3470  * mas_split() - Split data that is too big for one node into two.
3471  * @mas: The maple state
3472  * @b_node: The maple big node
3473  * Return: 1 on success, 0 on failure.
3474  */
3475 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3476 {
3477 	struct maple_subtree_state mast;
3478 	int height = 0;
3479 	unsigned char mid_split, split = 0;
3480 
3481 	/*
3482 	 * Splitting is handled differently from any other B-tree; the Maple
3483 	 * Tree splits upwards.  Splitting up means that the split operation
3484 	 * occurs when the walk of the tree hits the leaves and not on the way
3485 	 * down.  The reason for splitting up is that it is impossible to know
3486 	 * how much space will be needed until the leaf is (or leaves are)
3487 	 * reached.  Since overwriting data is allowed and a range could
3488 	 * overwrite more than one range or result in changing one entry into 3
3489 	 * entries, it is impossible to know if a split is required until the
3490 	 * data is examined.
3491 	 *
3492 	 * Splitting is a balancing act between keeping allocations to a minimum
3493 	 * and avoiding a 'jitter' event where a tree is expanded to make room
3494 	 * for an entry followed by a contraction when the entry is removed.  To
3495 	 * accomplish the balance, there are empty slots remaining in both left
3496 	 * and right nodes after a split.
3497 	 */
3498 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3499 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3500 	MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3501 	MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3502 	MA_TOPIARY(mat, mas->tree);
3503 
3504 	trace_ma_op(__func__, mas);
3505 	mas->depth = mas_mt_height(mas);
3506 	/* Allocation failures will happen early. */
3507 	mas_node_count(mas, 1 + mas->depth * 2);
3508 	if (mas_is_err(mas))
3509 		return 0;
3510 
3511 	mast.l = &l_mas;
3512 	mast.r = &r_mas;
3513 	mast.orig_l = &prev_l_mas;
3514 	mast.orig_r = &prev_r_mas;
3515 	mast.free = &mat;
3516 	mast.bn = b_node;
3517 
3518 	while (height++ <= mas->depth) {
3519 		if (mt_slots[b_node->type] > b_node->b_end) {
3520 			mas_split_final_node(&mast, mas, height);
3521 			break;
3522 		}
3523 
3524 		l_mas = r_mas = *mas;
3525 		l_mas.node = mas_new_ma_node(mas, b_node);
3526 		r_mas.node = mas_new_ma_node(mas, b_node);
3527 		/*
3528 		 * Another way that 'jitter' is avoided is to terminate a split up early if the
3529 		 * left or right node has space to spare.  This is referred to as "pushing left"
3530 		 * or "pushing right" and is similar to the B* tree, except the nodes left or
3531 		 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3532 		 * is a significant savings.
3533 		 */
3534 		/* Try to push left. */
3535 		if (mas_push_data(mas, height, &mast, true))
3536 			break;
3537 
3538 		/* Try to push right. */
3539 		if (mas_push_data(mas, height, &mast, false))
3540 			break;
3541 
3542 		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3543 		mast_split_data(&mast, mas, split);
3544 		/*
3545 		 * Usually correct, mab_mas_cp in the above call overwrites
3546 		 * r->max.
3547 		 */
3548 		mast.r->max = mas->max;
3549 		mast_fill_bnode(&mast, mas, 1);
3550 		prev_l_mas = *mast.l;
3551 		prev_r_mas = *mast.r;
3552 	}
3553 
3554 	/* Set the original node as dead */
3555 	mat_add(mast.free, mas->node);
3556 	mas->node = l_mas.node;
3557 	mas_wmb_replace(mas, mast.free, NULL);
3558 	mtree_range_walk(mas);
3559 	return 1;
3560 }
3561 
3562 /*
3563  * mas_reuse_node() - Reuse the node to store the data.
3564  * @wr_mas: The maple write state
3565  * @bn: The maple big node
3566  * @end: The end of the data.
3567  *
3568  * Will always return false in RCU mode.
3569  *
3570  * Return: True if node was reused, false otherwise.
3571  */
3572 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3573 			  struct maple_big_node *bn, unsigned char end)
3574 {
3575 	/* Need to be rcu safe. */
3576 	if (mt_in_rcu(wr_mas->mas->tree))
3577 		return false;
3578 
3579 	if (end > bn->b_end) {
3580 		int clear = mt_slots[wr_mas->type] - bn->b_end;
3581 
3582 		memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3583 		memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3584 	}
3585 	mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3586 	return true;
3587 }
3588 
3589 /*
3590  * mas_commit_b_node() - Commit the big node into the tree.
3591  * @wr_mas: The maple write state
3592  * @b_node: The maple big node
3593  * @end: The end of the data.
3594  */
3595 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3596 			    struct maple_big_node *b_node, unsigned char end)
3597 {
3598 	struct maple_node *node;
3599 	unsigned char b_end = b_node->b_end;
3600 	enum maple_type b_type = b_node->type;
3601 
3602 	if ((b_end < mt_min_slots[b_type]) &&
3603 	    (!mte_is_root(wr_mas->mas->node)) &&
3604 	    (mas_mt_height(wr_mas->mas) > 1))
3605 		return mas_rebalance(wr_mas->mas, b_node);
3606 
3607 	if (b_end >= mt_slots[b_type])
3608 		return mas_split(wr_mas->mas, b_node);
3609 
3610 	if (mas_reuse_node(wr_mas, b_node, end))
3611 		goto reuse_node;
3612 
3613 	mas_node_count(wr_mas->mas, 1);
3614 	if (mas_is_err(wr_mas->mas))
3615 		return 0;
3616 
3617 	node = mas_pop_node(wr_mas->mas);
3618 	node->parent = mas_mn(wr_mas->mas)->parent;
3619 	wr_mas->mas->node = mt_mk_node(node, b_type);
3620 	mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3621 	mas_replace(wr_mas->mas, false);
3622 reuse_node:
3623 	mas_update_gap(wr_mas->mas);
3624 	return 1;
3625 }
3626 
3627 /*
3628  * mas_root_expand() - Expand a root to a node
3629  * @mas: The maple state
3630  * @entry: The entry to store into the tree
3631  */
3632 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3633 {
3634 	void *contents = mas_root_locked(mas);
3635 	enum maple_type type = maple_leaf_64;
3636 	struct maple_node *node;
3637 	void __rcu **slots;
3638 	unsigned long *pivots;
3639 	int slot = 0;
3640 
3641 	mas_node_count(mas, 1);
3642 	if (unlikely(mas_is_err(mas)))
3643 		return 0;
3644 
3645 	node = mas_pop_node(mas);
3646 	pivots = ma_pivots(node, type);
3647 	slots = ma_slots(node, type);
3648 	node->parent = ma_parent_ptr(
3649 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3650 	mas->node = mt_mk_node(node, type);
3651 
3652 	if (mas->index) {
3653 		if (contents) {
3654 			rcu_assign_pointer(slots[slot], contents);
3655 			if (likely(mas->index > 1))
3656 				slot++;
3657 		}
3658 		pivots[slot++] = mas->index - 1;
3659 	}
3660 
3661 	rcu_assign_pointer(slots[slot], entry);
3662 	mas->offset = slot;
3663 	pivots[slot] = mas->last;
3664 	if (mas->last != ULONG_MAX)
3665 		pivots[++slot] = ULONG_MAX;
3666 
3667 	mas->depth = 1;
3668 	mas_set_height(mas);
3669 	ma_set_meta(node, maple_leaf_64, 0, slot);
3670 	/* swap the new root into the tree */
3671 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3672 	return slot;
3673 }
3674 
3675 static inline void mas_store_root(struct ma_state *mas, void *entry)
3676 {
3677 	if (likely((mas->last != 0) || (mas->index != 0)))
3678 		mas_root_expand(mas, entry);
3679 	else if (((unsigned long) (entry) & 3) == 2)
3680 		mas_root_expand(mas, entry);
3681 	else {
3682 		rcu_assign_pointer(mas->tree->ma_root, entry);
3683 		mas->node = MAS_START;
3684 	}
3685 }
3686 
3687 /*
3688  * mas_is_span_wr() - Check if the write needs to be treated as a write that
3689  * spans the node.
3690  * @mas: The maple state
3691  * @piv: The pivot value being written
3692  * @type: The maple node type
3693  * @entry: The data to write
3694  *
3695  * Spanning writes are writes that start in one node and end in another OR if
3696  * the write of a %NULL will cause the node to end with a %NULL.
3697  *
3698  * Return: True if this is a spanning write, false otherwise.
3699  */
3700 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3701 {
3702 	unsigned long max = wr_mas->r_max;
3703 	unsigned long last = wr_mas->mas->last;
3704 	enum maple_type type = wr_mas->type;
3705 	void *entry = wr_mas->entry;
3706 
3707 	/* Contained in this pivot, fast path */
3708 	if (last < max)
3709 		return false;
3710 
3711 	if (ma_is_leaf(type)) {
3712 		max = wr_mas->mas->max;
3713 		if (last < max)
3714 			return false;
3715 	}
3716 
3717 	if (last == max) {
3718 		/*
3719 		 * The last entry of leaf node cannot be NULL unless it is the
3720 		 * rightmost node (writing ULONG_MAX), otherwise it spans slots.
3721 		 */
3722 		if (entry || last == ULONG_MAX)
3723 			return false;
3724 	}
3725 
3726 	trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry);
3727 	return true;
3728 }
3729 
3730 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3731 {
3732 	wr_mas->type = mte_node_type(wr_mas->mas->node);
3733 	mas_wr_node_walk(wr_mas);
3734 	wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3735 }
3736 
3737 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3738 {
3739 	wr_mas->mas->max = wr_mas->r_max;
3740 	wr_mas->mas->min = wr_mas->r_min;
3741 	wr_mas->mas->node = wr_mas->content;
3742 	wr_mas->mas->offset = 0;
3743 	wr_mas->mas->depth++;
3744 }
3745 /*
3746  * mas_wr_walk() - Walk the tree for a write.
3747  * @wr_mas: The maple write state
3748  *
3749  * Uses mas_slot_locked() and does not need to worry about dead nodes.
3750  *
3751  * Return: True if it's contained in a node, false on spanning write.
3752  */
3753 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3754 {
3755 	struct ma_state *mas = wr_mas->mas;
3756 
3757 	while (true) {
3758 		mas_wr_walk_descend(wr_mas);
3759 		if (unlikely(mas_is_span_wr(wr_mas)))
3760 			return false;
3761 
3762 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3763 						  mas->offset);
3764 		if (ma_is_leaf(wr_mas->type))
3765 			return true;
3766 
3767 		mas_wr_walk_traverse(wr_mas);
3768 	}
3769 
3770 	return true;
3771 }
3772 
3773 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3774 {
3775 	struct ma_state *mas = wr_mas->mas;
3776 
3777 	while (true) {
3778 		mas_wr_walk_descend(wr_mas);
3779 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3780 						  mas->offset);
3781 		if (ma_is_leaf(wr_mas->type))
3782 			return true;
3783 		mas_wr_walk_traverse(wr_mas);
3784 
3785 	}
3786 	return true;
3787 }
3788 /*
3789  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3790  * @l_wr_mas: The left maple write state
3791  * @r_wr_mas: The right maple write state
3792  */
3793 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3794 					    struct ma_wr_state *r_wr_mas)
3795 {
3796 	struct ma_state *r_mas = r_wr_mas->mas;
3797 	struct ma_state *l_mas = l_wr_mas->mas;
3798 	unsigned char l_slot;
3799 
3800 	l_slot = l_mas->offset;
3801 	if (!l_wr_mas->content)
3802 		l_mas->index = l_wr_mas->r_min;
3803 
3804 	if ((l_mas->index == l_wr_mas->r_min) &&
3805 		 (l_slot &&
3806 		  !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3807 		if (l_slot > 1)
3808 			l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3809 		else
3810 			l_mas->index = l_mas->min;
3811 
3812 		l_mas->offset = l_slot - 1;
3813 	}
3814 
3815 	if (!r_wr_mas->content) {
3816 		if (r_mas->last < r_wr_mas->r_max)
3817 			r_mas->last = r_wr_mas->r_max;
3818 		r_mas->offset++;
3819 	} else if ((r_mas->last == r_wr_mas->r_max) &&
3820 	    (r_mas->last < r_mas->max) &&
3821 	    !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3822 		r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3823 					     r_wr_mas->type, r_mas->offset + 1);
3824 		r_mas->offset++;
3825 	}
3826 }
3827 
3828 static inline void *mas_state_walk(struct ma_state *mas)
3829 {
3830 	void *entry;
3831 
3832 	entry = mas_start(mas);
3833 	if (mas_is_none(mas))
3834 		return NULL;
3835 
3836 	if (mas_is_ptr(mas))
3837 		return entry;
3838 
3839 	return mtree_range_walk(mas);
3840 }
3841 
3842 /*
3843  * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3844  * to date.
3845  *
3846  * @mas: The maple state.
3847  *
3848  * Note: Leaves mas in undesirable state.
3849  * Return: The entry for @mas->index or %NULL on dead node.
3850  */
3851 static inline void *mtree_lookup_walk(struct ma_state *mas)
3852 {
3853 	unsigned long *pivots;
3854 	unsigned char offset;
3855 	struct maple_node *node;
3856 	struct maple_enode *next;
3857 	enum maple_type type;
3858 	void __rcu **slots;
3859 	unsigned char end;
3860 	unsigned long max;
3861 
3862 	next = mas->node;
3863 	max = ULONG_MAX;
3864 	do {
3865 		offset = 0;
3866 		node = mte_to_node(next);
3867 		type = mte_node_type(next);
3868 		pivots = ma_pivots(node, type);
3869 		end = ma_data_end(node, type, pivots, max);
3870 		if (unlikely(ma_dead_node(node)))
3871 			goto dead_node;
3872 		do {
3873 			if (pivots[offset] >= mas->index) {
3874 				max = pivots[offset];
3875 				break;
3876 			}
3877 		} while (++offset < end);
3878 
3879 		slots = ma_slots(node, type);
3880 		next = mt_slot(mas->tree, slots, offset);
3881 		if (unlikely(ma_dead_node(node)))
3882 			goto dead_node;
3883 	} while (!ma_is_leaf(type));
3884 
3885 	return (void *)next;
3886 
3887 dead_node:
3888 	mas_reset(mas);
3889 	return NULL;
3890 }
3891 
3892 /*
3893  * mas_new_root() - Create a new root node that only contains the entry passed
3894  * in.
3895  * @mas: The maple state
3896  * @entry: The entry to store.
3897  *
3898  * Only valid when the index == 0 and the last == ULONG_MAX
3899  *
3900  * Return 0 on error, 1 on success.
3901  */
3902 static inline int mas_new_root(struct ma_state *mas, void *entry)
3903 {
3904 	struct maple_enode *root = mas_root_locked(mas);
3905 	enum maple_type type = maple_leaf_64;
3906 	struct maple_node *node;
3907 	void __rcu **slots;
3908 	unsigned long *pivots;
3909 
3910 	if (!entry && !mas->index && mas->last == ULONG_MAX) {
3911 		mas->depth = 0;
3912 		mas_set_height(mas);
3913 		rcu_assign_pointer(mas->tree->ma_root, entry);
3914 		mas->node = MAS_START;
3915 		goto done;
3916 	}
3917 
3918 	mas_node_count(mas, 1);
3919 	if (mas_is_err(mas))
3920 		return 0;
3921 
3922 	node = mas_pop_node(mas);
3923 	pivots = ma_pivots(node, type);
3924 	slots = ma_slots(node, type);
3925 	node->parent = ma_parent_ptr(
3926 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3927 	mas->node = mt_mk_node(node, type);
3928 	rcu_assign_pointer(slots[0], entry);
3929 	pivots[0] = mas->last;
3930 	mas->depth = 1;
3931 	mas_set_height(mas);
3932 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3933 
3934 done:
3935 	if (xa_is_node(root))
3936 		mte_destroy_walk(root, mas->tree);
3937 
3938 	return 1;
3939 }
3940 /*
3941  * mas_wr_spanning_store() - Create a subtree with the store operation completed
3942  * and new nodes where necessary, then place the sub-tree in the actual tree.
3943  * Note that mas is expected to point to the node which caused the store to
3944  * span.
3945  * @wr_mas: The maple write state
3946  *
3947  * Return: 0 on error, positive on success.
3948  */
3949 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
3950 {
3951 	struct maple_subtree_state mast;
3952 	struct maple_big_node b_node;
3953 	struct ma_state *mas;
3954 	unsigned char height;
3955 
3956 	/* Left and Right side of spanning store */
3957 	MA_STATE(l_mas, NULL, 0, 0);
3958 	MA_STATE(r_mas, NULL, 0, 0);
3959 
3960 	MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
3961 	MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
3962 
3963 	/*
3964 	 * A store operation that spans multiple nodes is called a spanning
3965 	 * store and is handled early in the store call stack by the function
3966 	 * mas_is_span_wr().  When a spanning store is identified, the maple
3967 	 * state is duplicated.  The first maple state walks the left tree path
3968 	 * to ``index``, the duplicate walks the right tree path to ``last``.
3969 	 * The data in the two nodes are combined into a single node, two nodes,
3970 	 * or possibly three nodes (see the 3-way split above).  A ``NULL``
3971 	 * written to the last entry of a node is considered a spanning store as
3972 	 * a rebalance is required for the operation to complete and an overflow
3973 	 * of data may happen.
3974 	 */
3975 	mas = wr_mas->mas;
3976 	trace_ma_op(__func__, mas);
3977 
3978 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
3979 		return mas_new_root(mas, wr_mas->entry);
3980 	/*
3981 	 * Node rebalancing may occur due to this store, so there may be three new
3982 	 * entries per level plus a new root.
3983 	 */
3984 	height = mas_mt_height(mas);
3985 	mas_node_count(mas, 1 + height * 3);
3986 	if (mas_is_err(mas))
3987 		return 0;
3988 
3989 	/*
3990 	 * Set up right side.  Need to get to the next offset after the spanning
3991 	 * store to ensure it's not NULL and to combine both the next node and
3992 	 * the node with the start together.
3993 	 */
3994 	r_mas = *mas;
3995 	/* Avoid overflow, walk to next slot in the tree. */
3996 	if (r_mas.last + 1)
3997 		r_mas.last++;
3998 
3999 	r_mas.index = r_mas.last;
4000 	mas_wr_walk_index(&r_wr_mas);
4001 	r_mas.last = r_mas.index = mas->last;
4002 
4003 	/* Set up left side. */
4004 	l_mas = *mas;
4005 	mas_wr_walk_index(&l_wr_mas);
4006 
4007 	if (!wr_mas->entry) {
4008 		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4009 		mas->offset = l_mas.offset;
4010 		mas->index = l_mas.index;
4011 		mas->last = l_mas.last = r_mas.last;
4012 	}
4013 
4014 	/* expanding NULLs may make this cover the entire range */
4015 	if (!l_mas.index && r_mas.last == ULONG_MAX) {
4016 		mas_set_range(mas, 0, ULONG_MAX);
4017 		return mas_new_root(mas, wr_mas->entry);
4018 	}
4019 
4020 	memset(&b_node, 0, sizeof(struct maple_big_node));
4021 	/* Copy l_mas and store the value in b_node. */
4022 	mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4023 	/* Copy r_mas into b_node. */
4024 	if (r_mas.offset <= r_wr_mas.node_end)
4025 		mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4026 			   &b_node, b_node.b_end + 1);
4027 	else
4028 		b_node.b_end++;
4029 
4030 	/* Stop spanning searches by searching for just index. */
4031 	l_mas.index = l_mas.last = mas->index;
4032 
4033 	mast.bn = &b_node;
4034 	mast.orig_l = &l_mas;
4035 	mast.orig_r = &r_mas;
4036 	/* Combine l_mas and r_mas and split them up evenly again. */
4037 	return mas_spanning_rebalance(mas, &mast, height + 1);
4038 }
4039 
4040 /*
4041  * mas_wr_node_store() - Attempt to store the value in a node
4042  * @wr_mas: The maple write state
4043  *
4044  * Attempts to reuse the node, but may allocate.
4045  *
4046  * Return: True if stored, false otherwise
4047  */
4048 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
4049 				     unsigned char new_end)
4050 {
4051 	struct ma_state *mas = wr_mas->mas;
4052 	void __rcu **dst_slots;
4053 	unsigned long *dst_pivots;
4054 	unsigned char dst_offset, offset_end = wr_mas->offset_end;
4055 	struct maple_node reuse, *newnode;
4056 	unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type];
4057 	bool in_rcu = mt_in_rcu(mas->tree);
4058 
4059 	/* Check if there is enough data. The room is enough. */
4060 	if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4061 	    !(mas->mas_flags & MA_STATE_BULK))
4062 		return false;
4063 
4064 	if (mas->last == wr_mas->end_piv)
4065 		offset_end++; /* don't copy this offset */
4066 	else if (unlikely(wr_mas->r_max == ULONG_MAX))
4067 		mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4068 
4069 	/* set up node. */
4070 	if (in_rcu) {
4071 		mas_node_count(mas, 1);
4072 		if (mas_is_err(mas))
4073 			return false;
4074 
4075 		newnode = mas_pop_node(mas);
4076 	} else {
4077 		memset(&reuse, 0, sizeof(struct maple_node));
4078 		newnode = &reuse;
4079 	}
4080 
4081 	newnode->parent = mas_mn(mas)->parent;
4082 	dst_pivots = ma_pivots(newnode, wr_mas->type);
4083 	dst_slots = ma_slots(newnode, wr_mas->type);
4084 	/* Copy from start to insert point */
4085 	memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset);
4086 	memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset);
4087 
4088 	/* Handle insert of new range starting after old range */
4089 	if (wr_mas->r_min < mas->index) {
4090 		rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content);
4091 		dst_pivots[mas->offset++] = mas->index - 1;
4092 	}
4093 
4094 	/* Store the new entry and range end. */
4095 	if (mas->offset < node_pivots)
4096 		dst_pivots[mas->offset] = mas->last;
4097 	rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry);
4098 
4099 	/*
4100 	 * this range wrote to the end of the node or it overwrote the rest of
4101 	 * the data
4102 	 */
4103 	if (offset_end > wr_mas->node_end)
4104 		goto done;
4105 
4106 	dst_offset = mas->offset + 1;
4107 	/* Copy to the end of node if necessary. */
4108 	copy_size = wr_mas->node_end - offset_end + 1;
4109 	memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
4110 	       sizeof(void *) * copy_size);
4111 	memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
4112 	       sizeof(unsigned long) * (copy_size - 1));
4113 
4114 	if (new_end < node_pivots)
4115 		dst_pivots[new_end] = mas->max;
4116 
4117 done:
4118 	mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4119 	if (in_rcu) {
4120 		mte_set_node_dead(mas->node);
4121 		mas->node = mt_mk_node(newnode, wr_mas->type);
4122 		mas_replace(mas, false);
4123 	} else {
4124 		memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4125 	}
4126 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4127 	mas_update_gap(mas);
4128 	return true;
4129 }
4130 
4131 /*
4132  * mas_wr_slot_store: Attempt to store a value in a slot.
4133  * @wr_mas: the maple write state
4134  *
4135  * Return: True if stored, false otherwise
4136  */
4137 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4138 {
4139 	struct ma_state *mas = wr_mas->mas;
4140 	unsigned char offset = mas->offset;
4141 	void __rcu **slots = wr_mas->slots;
4142 	bool gap = false;
4143 
4144 	gap |= !mt_slot_locked(mas->tree, slots, offset);
4145 	gap |= !mt_slot_locked(mas->tree, slots, offset + 1);
4146 
4147 	if (wr_mas->offset_end - offset == 1) {
4148 		if (mas->index == wr_mas->r_min) {
4149 			/* Overwriting the range and a part of the next one */
4150 			rcu_assign_pointer(slots[offset], wr_mas->entry);
4151 			wr_mas->pivots[offset] = mas->last;
4152 		} else {
4153 			/* Overwriting a part of the range and the next one */
4154 			rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4155 			wr_mas->pivots[offset] = mas->index - 1;
4156 			mas->offset++; /* Keep mas accurate. */
4157 		}
4158 	} else if (!mt_in_rcu(mas->tree)) {
4159 		/*
4160 		 * Expand the range, only partially overwriting the previous and
4161 		 * next ranges
4162 		 */
4163 		gap |= !mt_slot_locked(mas->tree, slots, offset + 2);
4164 		rcu_assign_pointer(slots[offset + 1], wr_mas->entry);
4165 		wr_mas->pivots[offset] = mas->index - 1;
4166 		wr_mas->pivots[offset + 1] = mas->last;
4167 		mas->offset++; /* Keep mas accurate. */
4168 	} else {
4169 		return false;
4170 	}
4171 
4172 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4173 	/*
4174 	 * Only update gap when the new entry is empty or there is an empty
4175 	 * entry in the original two ranges.
4176 	 */
4177 	if (!wr_mas->entry || gap)
4178 		mas_update_gap(mas);
4179 
4180 	return true;
4181 }
4182 
4183 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4184 {
4185 	struct ma_state *mas = wr_mas->mas;
4186 
4187 	if (!wr_mas->slots[wr_mas->offset_end]) {
4188 		/* If this one is null, the next and prev are not */
4189 		mas->last = wr_mas->end_piv;
4190 	} else {
4191 		/* Check next slot(s) if we are overwriting the end */
4192 		if ((mas->last == wr_mas->end_piv) &&
4193 		    (wr_mas->node_end != wr_mas->offset_end) &&
4194 		    !wr_mas->slots[wr_mas->offset_end + 1]) {
4195 			wr_mas->offset_end++;
4196 			if (wr_mas->offset_end == wr_mas->node_end)
4197 				mas->last = mas->max;
4198 			else
4199 				mas->last = wr_mas->pivots[wr_mas->offset_end];
4200 			wr_mas->end_piv = mas->last;
4201 		}
4202 	}
4203 
4204 	if (!wr_mas->content) {
4205 		/* If this one is null, the next and prev are not */
4206 		mas->index = wr_mas->r_min;
4207 	} else {
4208 		/* Check prev slot if we are overwriting the start */
4209 		if (mas->index == wr_mas->r_min && mas->offset &&
4210 		    !wr_mas->slots[mas->offset - 1]) {
4211 			mas->offset--;
4212 			wr_mas->r_min = mas->index =
4213 				mas_safe_min(mas, wr_mas->pivots, mas->offset);
4214 			wr_mas->r_max = wr_mas->pivots[mas->offset];
4215 		}
4216 	}
4217 }
4218 
4219 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4220 {
4221 	while ((wr_mas->offset_end < wr_mas->node_end) &&
4222 	       (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
4223 		wr_mas->offset_end++;
4224 
4225 	if (wr_mas->offset_end < wr_mas->node_end)
4226 		wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
4227 	else
4228 		wr_mas->end_piv = wr_mas->mas->max;
4229 
4230 	if (!wr_mas->entry)
4231 		mas_wr_extend_null(wr_mas);
4232 }
4233 
4234 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
4235 {
4236 	struct ma_state *mas = wr_mas->mas;
4237 	unsigned char new_end = wr_mas->node_end + 2;
4238 
4239 	new_end -= wr_mas->offset_end - mas->offset;
4240 	if (wr_mas->r_min == mas->index)
4241 		new_end--;
4242 
4243 	if (wr_mas->end_piv == mas->last)
4244 		new_end--;
4245 
4246 	return new_end;
4247 }
4248 
4249 /*
4250  * mas_wr_append: Attempt to append
4251  * @wr_mas: the maple write state
4252  *
4253  * Return: True if appended, false otherwise
4254  */
4255 static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
4256 				 unsigned char new_end)
4257 {
4258 	unsigned char end = wr_mas->node_end;
4259 	struct ma_state *mas = wr_mas->mas;
4260 	unsigned char node_pivots = mt_pivots[wr_mas->type];
4261 
4262 	if (mas->offset != wr_mas->node_end)
4263 		return false;
4264 
4265 	if (new_end < node_pivots) {
4266 		wr_mas->pivots[new_end] = wr_mas->pivots[end];
4267 		ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4268 	}
4269 
4270 	if (new_end == wr_mas->node_end + 1) {
4271 		if (mas->last == wr_mas->r_max) {
4272 			/* Append to end of range */
4273 			rcu_assign_pointer(wr_mas->slots[new_end],
4274 					   wr_mas->entry);
4275 			wr_mas->pivots[end] = mas->index - 1;
4276 			mas->offset = new_end;
4277 		} else {
4278 			/* Append to start of range */
4279 			rcu_assign_pointer(wr_mas->slots[new_end],
4280 					   wr_mas->content);
4281 			wr_mas->pivots[end] = mas->last;
4282 			rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4283 		}
4284 	} else {
4285 		/* Append to the range without touching any boundaries. */
4286 		rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4287 		wr_mas->pivots[end + 1] = mas->last;
4288 		rcu_assign_pointer(wr_mas->slots[end + 1], wr_mas->entry);
4289 		wr_mas->pivots[end] = mas->index - 1;
4290 		mas->offset = end + 1;
4291 	}
4292 
4293 	if (!wr_mas->content || !wr_mas->entry)
4294 		mas_update_gap(mas);
4295 
4296 	return  true;
4297 }
4298 
4299 /*
4300  * mas_wr_bnode() - Slow path for a modification.
4301  * @wr_mas: The write maple state
4302  *
4303  * This is where split, rebalance end up.
4304  */
4305 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4306 {
4307 	struct maple_big_node b_node;
4308 
4309 	trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4310 	memset(&b_node, 0, sizeof(struct maple_big_node));
4311 	mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4312 	mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4313 }
4314 
4315 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4316 {
4317 	struct ma_state *mas = wr_mas->mas;
4318 	unsigned char new_end;
4319 
4320 	/* Direct replacement */
4321 	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4322 		rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4323 		if (!!wr_mas->entry ^ !!wr_mas->content)
4324 			mas_update_gap(mas);
4325 		return;
4326 	}
4327 
4328 	/*
4329 	 * new_end exceeds the size of the maple node and cannot enter the fast
4330 	 * path.
4331 	 */
4332 	new_end = mas_wr_new_end(wr_mas);
4333 	if (new_end >= mt_slots[wr_mas->type])
4334 		goto slow_path;
4335 
4336 	/* Attempt to append */
4337 	if (mas_wr_append(wr_mas, new_end))
4338 		return;
4339 
4340 	if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
4341 		return;
4342 
4343 	if (mas_wr_node_store(wr_mas, new_end))
4344 		return;
4345 
4346 	if (mas_is_err(mas))
4347 		return;
4348 
4349 slow_path:
4350 	mas_wr_bnode(wr_mas);
4351 }
4352 
4353 /*
4354  * mas_wr_store_entry() - Internal call to store a value
4355  * @mas: The maple state
4356  * @entry: The entry to store.
4357  *
4358  * Return: The contents that was stored at the index.
4359  */
4360 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4361 {
4362 	struct ma_state *mas = wr_mas->mas;
4363 
4364 	wr_mas->content = mas_start(mas);
4365 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4366 		mas_store_root(mas, wr_mas->entry);
4367 		return wr_mas->content;
4368 	}
4369 
4370 	if (unlikely(!mas_wr_walk(wr_mas))) {
4371 		mas_wr_spanning_store(wr_mas);
4372 		return wr_mas->content;
4373 	}
4374 
4375 	/* At this point, we are at the leaf node that needs to be altered. */
4376 	mas_wr_end_piv(wr_mas);
4377 	/* New root for a single pointer */
4378 	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4379 		mas_new_root(mas, wr_mas->entry);
4380 		return wr_mas->content;
4381 	}
4382 
4383 	mas_wr_modify(wr_mas);
4384 	return wr_mas->content;
4385 }
4386 
4387 /**
4388  * mas_insert() - Internal call to insert a value
4389  * @mas: The maple state
4390  * @entry: The entry to store
4391  *
4392  * Return: %NULL or the contents that already exists at the requested index
4393  * otherwise.  The maple state needs to be checked for error conditions.
4394  */
4395 static inline void *mas_insert(struct ma_state *mas, void *entry)
4396 {
4397 	MA_WR_STATE(wr_mas, mas, entry);
4398 
4399 	/*
4400 	 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4401 	 * tree.  If the insert fits exactly into an existing gap with a value
4402 	 * of NULL, then the slot only needs to be written with the new value.
4403 	 * If the range being inserted is adjacent to another range, then only a
4404 	 * single pivot needs to be inserted (as well as writing the entry).  If
4405 	 * the new range is within a gap but does not touch any other ranges,
4406 	 * then two pivots need to be inserted: the start - 1, and the end.  As
4407 	 * usual, the entry must be written.  Most operations require a new node
4408 	 * to be allocated and replace an existing node to ensure RCU safety,
4409 	 * when in RCU mode.  The exception to requiring a newly allocated node
4410 	 * is when inserting at the end of a node (appending).  When done
4411 	 * carefully, appending can reuse the node in place.
4412 	 */
4413 	wr_mas.content = mas_start(mas);
4414 	if (wr_mas.content)
4415 		goto exists;
4416 
4417 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4418 		mas_store_root(mas, entry);
4419 		return NULL;
4420 	}
4421 
4422 	/* spanning writes always overwrite something */
4423 	if (!mas_wr_walk(&wr_mas))
4424 		goto exists;
4425 
4426 	/* At this point, we are at the leaf node that needs to be altered. */
4427 	wr_mas.offset_end = mas->offset;
4428 	wr_mas.end_piv = wr_mas.r_max;
4429 
4430 	if (wr_mas.content || (mas->last > wr_mas.r_max))
4431 		goto exists;
4432 
4433 	if (!entry)
4434 		return NULL;
4435 
4436 	mas_wr_modify(&wr_mas);
4437 	return wr_mas.content;
4438 
4439 exists:
4440 	mas_set_err(mas, -EEXIST);
4441 	return wr_mas.content;
4442 
4443 }
4444 
4445 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4446 {
4447 retry:
4448 	mas_set(mas, index);
4449 	mas_state_walk(mas);
4450 	if (mas_is_start(mas))
4451 		goto retry;
4452 }
4453 
4454 static inline bool mas_rewalk_if_dead(struct ma_state *mas,
4455 		struct maple_node *node, const unsigned long index)
4456 {
4457 	if (unlikely(ma_dead_node(node))) {
4458 		mas_rewalk(mas, index);
4459 		return true;
4460 	}
4461 	return false;
4462 }
4463 
4464 /*
4465  * mas_prev_node() - Find the prev non-null entry at the same level in the
4466  * tree.  The prev value will be mas->node[mas->offset] or MAS_NONE.
4467  * @mas: The maple state
4468  * @min: The lower limit to search
4469  *
4470  * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4471  * Return: 1 if the node is dead, 0 otherwise.
4472  */
4473 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4474 {
4475 	enum maple_type mt;
4476 	int offset, level;
4477 	void __rcu **slots;
4478 	struct maple_node *node;
4479 	unsigned long *pivots;
4480 	unsigned long max;
4481 
4482 	node = mas_mn(mas);
4483 	if (!mas->min)
4484 		goto no_entry;
4485 
4486 	max = mas->min - 1;
4487 	if (max < min)
4488 		goto no_entry;
4489 
4490 	level = 0;
4491 	do {
4492 		if (ma_is_root(node))
4493 			goto no_entry;
4494 
4495 		/* Walk up. */
4496 		if (unlikely(mas_ascend(mas)))
4497 			return 1;
4498 		offset = mas->offset;
4499 		level++;
4500 		node = mas_mn(mas);
4501 	} while (!offset);
4502 
4503 	offset--;
4504 	mt = mte_node_type(mas->node);
4505 	while (level > 1) {
4506 		level--;
4507 		slots = ma_slots(node, mt);
4508 		mas->node = mas_slot(mas, slots, offset);
4509 		if (unlikely(ma_dead_node(node)))
4510 			return 1;
4511 
4512 		mt = mte_node_type(mas->node);
4513 		node = mas_mn(mas);
4514 		pivots = ma_pivots(node, mt);
4515 		offset = ma_data_end(node, mt, pivots, max);
4516 		if (unlikely(ma_dead_node(node)))
4517 			return 1;
4518 	}
4519 
4520 	slots = ma_slots(node, mt);
4521 	mas->node = mas_slot(mas, slots, offset);
4522 	pivots = ma_pivots(node, mt);
4523 	if (unlikely(ma_dead_node(node)))
4524 		return 1;
4525 
4526 	if (likely(offset))
4527 		mas->min = pivots[offset - 1] + 1;
4528 	mas->max = max;
4529 	mas->offset = mas_data_end(mas);
4530 	if (unlikely(mte_dead_node(mas->node)))
4531 		return 1;
4532 
4533 	return 0;
4534 
4535 no_entry:
4536 	if (unlikely(ma_dead_node(node)))
4537 		return 1;
4538 
4539 	mas->node = MAS_NONE;
4540 	return 0;
4541 }
4542 
4543 /*
4544  * mas_prev_slot() - Get the entry in the previous slot
4545  *
4546  * @mas: The maple state
4547  * @max: The minimum starting range
4548  *
4549  * Return: The entry in the previous slot which is possibly NULL
4550  */
4551 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
4552 {
4553 	void *entry;
4554 	void __rcu **slots;
4555 	unsigned long pivot;
4556 	enum maple_type type;
4557 	unsigned long *pivots;
4558 	struct maple_node *node;
4559 	unsigned long save_point = mas->index;
4560 
4561 retry:
4562 	node = mas_mn(mas);
4563 	type = mte_node_type(mas->node);
4564 	pivots = ma_pivots(node, type);
4565 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4566 		goto retry;
4567 
4568 again:
4569 	if (mas->min <= min) {
4570 		pivot = mas_safe_min(mas, pivots, mas->offset);
4571 
4572 		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4573 			goto retry;
4574 
4575 		if (pivot <= min)
4576 			return NULL;
4577 	}
4578 
4579 	if (likely(mas->offset)) {
4580 		mas->offset--;
4581 		mas->last = mas->index - 1;
4582 		mas->index = mas_safe_min(mas, pivots, mas->offset);
4583 	} else  {
4584 		if (mas_prev_node(mas, min)) {
4585 			mas_rewalk(mas, save_point);
4586 			goto retry;
4587 		}
4588 
4589 		if (mas_is_none(mas))
4590 			return NULL;
4591 
4592 		mas->last = mas->max;
4593 		node = mas_mn(mas);
4594 		type = mte_node_type(mas->node);
4595 		pivots = ma_pivots(node, type);
4596 		mas->index = pivots[mas->offset - 1] + 1;
4597 	}
4598 
4599 	slots = ma_slots(node, type);
4600 	entry = mas_slot(mas, slots, mas->offset);
4601 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4602 		goto retry;
4603 
4604 	if (likely(entry))
4605 		return entry;
4606 
4607 	if (!empty)
4608 		goto again;
4609 
4610 	return entry;
4611 }
4612 
4613 /*
4614  * mas_next_node() - Get the next node at the same level in the tree.
4615  * @mas: The maple state
4616  * @max: The maximum pivot value to check.
4617  *
4618  * The next value will be mas->node[mas->offset] or MAS_NONE.
4619  * Return: 1 on dead node, 0 otherwise.
4620  */
4621 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4622 				unsigned long max)
4623 {
4624 	unsigned long min;
4625 	unsigned long *pivots;
4626 	struct maple_enode *enode;
4627 	int level = 0;
4628 	unsigned char node_end;
4629 	enum maple_type mt;
4630 	void __rcu **slots;
4631 
4632 	if (mas->max >= max)
4633 		goto no_entry;
4634 
4635 	min = mas->max + 1;
4636 	level = 0;
4637 	do {
4638 		if (ma_is_root(node))
4639 			goto no_entry;
4640 
4641 		/* Walk up. */
4642 		if (unlikely(mas_ascend(mas)))
4643 			return 1;
4644 
4645 		level++;
4646 		node = mas_mn(mas);
4647 		mt = mte_node_type(mas->node);
4648 		pivots = ma_pivots(node, mt);
4649 		node_end = ma_data_end(node, mt, pivots, mas->max);
4650 		if (unlikely(ma_dead_node(node)))
4651 			return 1;
4652 
4653 	} while (unlikely(mas->offset == node_end));
4654 
4655 	slots = ma_slots(node, mt);
4656 	mas->offset++;
4657 	enode = mas_slot(mas, slots, mas->offset);
4658 	if (unlikely(ma_dead_node(node)))
4659 		return 1;
4660 
4661 	if (level > 1)
4662 		mas->offset = 0;
4663 
4664 	while (unlikely(level > 1)) {
4665 		level--;
4666 		mas->node = enode;
4667 		node = mas_mn(mas);
4668 		mt = mte_node_type(mas->node);
4669 		slots = ma_slots(node, mt);
4670 		enode = mas_slot(mas, slots, 0);
4671 		if (unlikely(ma_dead_node(node)))
4672 			return 1;
4673 	}
4674 
4675 	if (!mas->offset)
4676 		pivots = ma_pivots(node, mt);
4677 
4678 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
4679 	if (unlikely(ma_dead_node(node)))
4680 		return 1;
4681 
4682 	mas->node = enode;
4683 	mas->min = min;
4684 	return 0;
4685 
4686 no_entry:
4687 	if (unlikely(ma_dead_node(node)))
4688 		return 1;
4689 
4690 	mas->node = MAS_NONE;
4691 	return 0;
4692 }
4693 
4694 /*
4695  * mas_next_slot() - Get the entry in the next slot
4696  *
4697  * @mas: The maple state
4698  * @max: The maximum starting range
4699  * @empty: Can be empty
4700  *
4701  * Return: The entry in the next slot which is possibly NULL
4702  */
4703 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
4704 {
4705 	void __rcu **slots;
4706 	unsigned long *pivots;
4707 	unsigned long pivot;
4708 	enum maple_type type;
4709 	struct maple_node *node;
4710 	unsigned char data_end;
4711 	unsigned long save_point = mas->last;
4712 	void *entry;
4713 
4714 retry:
4715 	node = mas_mn(mas);
4716 	type = mte_node_type(mas->node);
4717 	pivots = ma_pivots(node, type);
4718 	data_end = ma_data_end(node, type, pivots, mas->max);
4719 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4720 		goto retry;
4721 
4722 again:
4723 	if (mas->max >= max) {
4724 		if (likely(mas->offset < data_end))
4725 			pivot = pivots[mas->offset];
4726 		else
4727 			return NULL; /* must be mas->max */
4728 
4729 		if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4730 			goto retry;
4731 
4732 		if (pivot >= max)
4733 			return NULL;
4734 	}
4735 
4736 	if (likely(mas->offset < data_end)) {
4737 		mas->index = pivots[mas->offset] + 1;
4738 		mas->offset++;
4739 		if (likely(mas->offset < data_end))
4740 			mas->last = pivots[mas->offset];
4741 		else
4742 			mas->last = mas->max;
4743 	} else  {
4744 		if (mas_next_node(mas, node, max)) {
4745 			mas_rewalk(mas, save_point);
4746 			goto retry;
4747 		}
4748 
4749 		if (mas_is_none(mas))
4750 			return NULL;
4751 
4752 		mas->offset = 0;
4753 		mas->index = mas->min;
4754 		node = mas_mn(mas);
4755 		type = mte_node_type(mas->node);
4756 		pivots = ma_pivots(node, type);
4757 		mas->last = pivots[0];
4758 	}
4759 
4760 	slots = ma_slots(node, type);
4761 	entry = mt_slot(mas->tree, slots, mas->offset);
4762 	if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
4763 		goto retry;
4764 
4765 	if (entry)
4766 		return entry;
4767 
4768 	if (!empty) {
4769 		if (!mas->offset)
4770 			data_end = 2;
4771 		goto again;
4772 	}
4773 
4774 	return entry;
4775 }
4776 
4777 /*
4778  * mas_next_entry() - Internal function to get the next entry.
4779  * @mas: The maple state
4780  * @limit: The maximum range start.
4781  *
4782  * Set the @mas->node to the next entry and the range_start to
4783  * the beginning value for the entry.  Does not check beyond @limit.
4784  * Sets @mas->index and @mas->last to the limit if it is hit.
4785  * Restarts on dead nodes.
4786  *
4787  * Return: the next entry or %NULL.
4788  */
4789 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4790 {
4791 	if (mas->last >= limit)
4792 		return NULL;
4793 
4794 	return mas_next_slot(mas, limit, false);
4795 }
4796 
4797 /*
4798  * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4799  * highest gap address of a given size in a given node and descend.
4800  * @mas: The maple state
4801  * @size: The needed size.
4802  *
4803  * Return: True if found in a leaf, false otherwise.
4804  *
4805  */
4806 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size,
4807 		unsigned long *gap_min, unsigned long *gap_max)
4808 {
4809 	enum maple_type type = mte_node_type(mas->node);
4810 	struct maple_node *node = mas_mn(mas);
4811 	unsigned long *pivots, *gaps;
4812 	void __rcu **slots;
4813 	unsigned long gap = 0;
4814 	unsigned long max, min;
4815 	unsigned char offset;
4816 
4817 	if (unlikely(mas_is_err(mas)))
4818 		return true;
4819 
4820 	if (ma_is_dense(type)) {
4821 		/* dense nodes. */
4822 		mas->offset = (unsigned char)(mas->index - mas->min);
4823 		return true;
4824 	}
4825 
4826 	pivots = ma_pivots(node, type);
4827 	slots = ma_slots(node, type);
4828 	gaps = ma_gaps(node, type);
4829 	offset = mas->offset;
4830 	min = mas_safe_min(mas, pivots, offset);
4831 	/* Skip out of bounds. */
4832 	while (mas->last < min)
4833 		min = mas_safe_min(mas, pivots, --offset);
4834 
4835 	max = mas_safe_pivot(mas, pivots, offset, type);
4836 	while (mas->index <= max) {
4837 		gap = 0;
4838 		if (gaps)
4839 			gap = gaps[offset];
4840 		else if (!mas_slot(mas, slots, offset))
4841 			gap = max - min + 1;
4842 
4843 		if (gap) {
4844 			if ((size <= gap) && (size <= mas->last - min + 1))
4845 				break;
4846 
4847 			if (!gaps) {
4848 				/* Skip the next slot, it cannot be a gap. */
4849 				if (offset < 2)
4850 					goto ascend;
4851 
4852 				offset -= 2;
4853 				max = pivots[offset];
4854 				min = mas_safe_min(mas, pivots, offset);
4855 				continue;
4856 			}
4857 		}
4858 
4859 		if (!offset)
4860 			goto ascend;
4861 
4862 		offset--;
4863 		max = min - 1;
4864 		min = mas_safe_min(mas, pivots, offset);
4865 	}
4866 
4867 	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
4868 		goto no_space;
4869 
4870 	if (unlikely(ma_is_leaf(type))) {
4871 		mas->offset = offset;
4872 		*gap_min = min;
4873 		*gap_max = min + gap - 1;
4874 		return true;
4875 	}
4876 
4877 	/* descend, only happens under lock. */
4878 	mas->node = mas_slot(mas, slots, offset);
4879 	mas->min = min;
4880 	mas->max = max;
4881 	mas->offset = mas_data_end(mas);
4882 	return false;
4883 
4884 ascend:
4885 	if (!mte_is_root(mas->node))
4886 		return false;
4887 
4888 no_space:
4889 	mas_set_err(mas, -EBUSY);
4890 	return false;
4891 }
4892 
4893 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
4894 {
4895 	enum maple_type type = mte_node_type(mas->node);
4896 	unsigned long pivot, min, gap = 0;
4897 	unsigned char offset, data_end;
4898 	unsigned long *gaps, *pivots;
4899 	void __rcu **slots;
4900 	struct maple_node *node;
4901 	bool found = false;
4902 
4903 	if (ma_is_dense(type)) {
4904 		mas->offset = (unsigned char)(mas->index - mas->min);
4905 		return true;
4906 	}
4907 
4908 	node = mas_mn(mas);
4909 	pivots = ma_pivots(node, type);
4910 	slots = ma_slots(node, type);
4911 	gaps = ma_gaps(node, type);
4912 	offset = mas->offset;
4913 	min = mas_safe_min(mas, pivots, offset);
4914 	data_end = ma_data_end(node, type, pivots, mas->max);
4915 	for (; offset <= data_end; offset++) {
4916 		pivot = mas_safe_pivot(mas, pivots, offset, type);
4917 
4918 		/* Not within lower bounds */
4919 		if (mas->index > pivot)
4920 			goto next_slot;
4921 
4922 		if (gaps)
4923 			gap = gaps[offset];
4924 		else if (!mas_slot(mas, slots, offset))
4925 			gap = min(pivot, mas->last) - max(mas->index, min) + 1;
4926 		else
4927 			goto next_slot;
4928 
4929 		if (gap >= size) {
4930 			if (ma_is_leaf(type)) {
4931 				found = true;
4932 				goto done;
4933 			}
4934 			if (mas->index <= pivot) {
4935 				mas->node = mas_slot(mas, slots, offset);
4936 				mas->min = min;
4937 				mas->max = pivot;
4938 				offset = 0;
4939 				break;
4940 			}
4941 		}
4942 next_slot:
4943 		min = pivot + 1;
4944 		if (mas->last <= pivot) {
4945 			mas_set_err(mas, -EBUSY);
4946 			return true;
4947 		}
4948 	}
4949 
4950 	if (mte_is_root(mas->node))
4951 		found = true;
4952 done:
4953 	mas->offset = offset;
4954 	return found;
4955 }
4956 
4957 /**
4958  * mas_walk() - Search for @mas->index in the tree.
4959  * @mas: The maple state.
4960  *
4961  * mas->index and mas->last will be set to the range if there is a value.  If
4962  * mas->node is MAS_NONE, reset to MAS_START.
4963  *
4964  * Return: the entry at the location or %NULL.
4965  */
4966 void *mas_walk(struct ma_state *mas)
4967 {
4968 	void *entry;
4969 
4970 	if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas))
4971 		mas->node = MAS_START;
4972 retry:
4973 	entry = mas_state_walk(mas);
4974 	if (mas_is_start(mas)) {
4975 		goto retry;
4976 	} else if (mas_is_none(mas)) {
4977 		mas->index = 0;
4978 		mas->last = ULONG_MAX;
4979 	} else if (mas_is_ptr(mas)) {
4980 		if (!mas->index) {
4981 			mas->last = 0;
4982 			return entry;
4983 		}
4984 
4985 		mas->index = 1;
4986 		mas->last = ULONG_MAX;
4987 		mas->node = MAS_NONE;
4988 		return NULL;
4989 	}
4990 
4991 	return entry;
4992 }
4993 EXPORT_SYMBOL_GPL(mas_walk);
4994 
4995 static inline bool mas_rewind_node(struct ma_state *mas)
4996 {
4997 	unsigned char slot;
4998 
4999 	do {
5000 		if (mte_is_root(mas->node)) {
5001 			slot = mas->offset;
5002 			if (!slot)
5003 				return false;
5004 		} else {
5005 			mas_ascend(mas);
5006 			slot = mas->offset;
5007 		}
5008 	} while (!slot);
5009 
5010 	mas->offset = --slot;
5011 	return true;
5012 }
5013 
5014 /*
5015  * mas_skip_node() - Internal function.  Skip over a node.
5016  * @mas: The maple state.
5017  *
5018  * Return: true if there is another node, false otherwise.
5019  */
5020 static inline bool mas_skip_node(struct ma_state *mas)
5021 {
5022 	if (mas_is_err(mas))
5023 		return false;
5024 
5025 	do {
5026 		if (mte_is_root(mas->node)) {
5027 			if (mas->offset >= mas_data_end(mas)) {
5028 				mas_set_err(mas, -EBUSY);
5029 				return false;
5030 			}
5031 		} else {
5032 			mas_ascend(mas);
5033 		}
5034 	} while (mas->offset >= mas_data_end(mas));
5035 
5036 	mas->offset++;
5037 	return true;
5038 }
5039 
5040 /*
5041  * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
5042  * @size
5043  * @mas: The maple state
5044  * @size: The size of the gap required
5045  *
5046  * Search between @mas->index and @mas->last for a gap of @size.
5047  */
5048 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5049 {
5050 	struct maple_enode *last = NULL;
5051 
5052 	/*
5053 	 * There are 4 options:
5054 	 * go to child (descend)
5055 	 * go back to parent (ascend)
5056 	 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5057 	 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5058 	 */
5059 	while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5060 		if (last == mas->node)
5061 			mas_skip_node(mas);
5062 		else
5063 			last = mas->node;
5064 	}
5065 }
5066 
5067 /*
5068  * mas_sparse_area() - Internal function.  Return upper or lower limit when
5069  * searching for a gap in an empty tree.
5070  * @mas: The maple state
5071  * @min: the minimum range
5072  * @max: The maximum range
5073  * @size: The size of the gap
5074  * @fwd: Searching forward or back
5075  */
5076 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min,
5077 				unsigned long max, unsigned long size, bool fwd)
5078 {
5079 	if (!unlikely(mas_is_none(mas)) && min == 0) {
5080 		min++;
5081 		/*
5082 		 * At this time, min is increased, we need to recheck whether
5083 		 * the size is satisfied.
5084 		 */
5085 		if (min > max || max - min + 1 < size)
5086 			return -EBUSY;
5087 	}
5088 	/* mas_is_ptr */
5089 
5090 	if (fwd) {
5091 		mas->index = min;
5092 		mas->last = min + size - 1;
5093 	} else {
5094 		mas->last = max;
5095 		mas->index = max - size + 1;
5096 	}
5097 	return 0;
5098 }
5099 
5100 /*
5101  * mas_empty_area() - Get the lowest address within the range that is
5102  * sufficient for the size requested.
5103  * @mas: The maple state
5104  * @min: The lowest value of the range
5105  * @max: The highest value of the range
5106  * @size: The size needed
5107  */
5108 int mas_empty_area(struct ma_state *mas, unsigned long min,
5109 		unsigned long max, unsigned long size)
5110 {
5111 	unsigned char offset;
5112 	unsigned long *pivots;
5113 	enum maple_type mt;
5114 
5115 	if (min > max)
5116 		return -EINVAL;
5117 
5118 	if (size == 0 || max - min < size - 1)
5119 		return -EINVAL;
5120 
5121 	if (mas_is_start(mas))
5122 		mas_start(mas);
5123 	else if (mas->offset >= 2)
5124 		mas->offset -= 2;
5125 	else if (!mas_skip_node(mas))
5126 		return -EBUSY;
5127 
5128 	/* Empty set */
5129 	if (mas_is_none(mas) || mas_is_ptr(mas))
5130 		return mas_sparse_area(mas, min, max, size, true);
5131 
5132 	/* The start of the window can only be within these values */
5133 	mas->index = min;
5134 	mas->last = max;
5135 	mas_awalk(mas, size);
5136 
5137 	if (unlikely(mas_is_err(mas)))
5138 		return xa_err(mas->node);
5139 
5140 	offset = mas->offset;
5141 	if (unlikely(offset == MAPLE_NODE_SLOTS))
5142 		return -EBUSY;
5143 
5144 	mt = mte_node_type(mas->node);
5145 	pivots = ma_pivots(mas_mn(mas), mt);
5146 	min = mas_safe_min(mas, pivots, offset);
5147 	if (mas->index < min)
5148 		mas->index = min;
5149 	mas->last = mas->index + size - 1;
5150 	return 0;
5151 }
5152 EXPORT_SYMBOL_GPL(mas_empty_area);
5153 
5154 /*
5155  * mas_empty_area_rev() - Get the highest address within the range that is
5156  * sufficient for the size requested.
5157  * @mas: The maple state
5158  * @min: The lowest value of the range
5159  * @max: The highest value of the range
5160  * @size: The size needed
5161  */
5162 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5163 		unsigned long max, unsigned long size)
5164 {
5165 	struct maple_enode *last = mas->node;
5166 
5167 	if (min > max)
5168 		return -EINVAL;
5169 
5170 	if (size == 0 || max - min < size - 1)
5171 		return -EINVAL;
5172 
5173 	if (mas_is_start(mas)) {
5174 		mas_start(mas);
5175 		mas->offset = mas_data_end(mas);
5176 	} else if (mas->offset >= 2) {
5177 		mas->offset -= 2;
5178 	} else if (!mas_rewind_node(mas)) {
5179 		return -EBUSY;
5180 	}
5181 
5182 	/* Empty set. */
5183 	if (mas_is_none(mas) || mas_is_ptr(mas))
5184 		return mas_sparse_area(mas, min, max, size, false);
5185 
5186 	/* The start of the window can only be within these values. */
5187 	mas->index = min;
5188 	mas->last = max;
5189 
5190 	while (!mas_rev_awalk(mas, size, &min, &max)) {
5191 		if (last == mas->node) {
5192 			if (!mas_rewind_node(mas))
5193 				return -EBUSY;
5194 		} else {
5195 			last = mas->node;
5196 		}
5197 	}
5198 
5199 	if (mas_is_err(mas))
5200 		return xa_err(mas->node);
5201 
5202 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5203 		return -EBUSY;
5204 
5205 	/* Trim the upper limit to the max. */
5206 	if (max < mas->last)
5207 		mas->last = max;
5208 
5209 	mas->index = mas->last - size + 1;
5210 	return 0;
5211 }
5212 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5213 
5214 /*
5215  * mte_dead_leaves() - Mark all leaves of a node as dead.
5216  * @mas: The maple state
5217  * @slots: Pointer to the slot array
5218  * @type: The maple node type
5219  *
5220  * Must hold the write lock.
5221  *
5222  * Return: The number of leaves marked as dead.
5223  */
5224 static inline
5225 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5226 			      void __rcu **slots)
5227 {
5228 	struct maple_node *node;
5229 	enum maple_type type;
5230 	void *entry;
5231 	int offset;
5232 
5233 	for (offset = 0; offset < mt_slot_count(enode); offset++) {
5234 		entry = mt_slot(mt, slots, offset);
5235 		type = mte_node_type(entry);
5236 		node = mte_to_node(entry);
5237 		/* Use both node and type to catch LE & BE metadata */
5238 		if (!node || !type)
5239 			break;
5240 
5241 		mte_set_node_dead(entry);
5242 		node->type = type;
5243 		rcu_assign_pointer(slots[offset], node);
5244 	}
5245 
5246 	return offset;
5247 }
5248 
5249 /**
5250  * mte_dead_walk() - Walk down a dead tree to just before the leaves
5251  * @enode: The maple encoded node
5252  * @offset: The starting offset
5253  *
5254  * Note: This can only be used from the RCU callback context.
5255  */
5256 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5257 {
5258 	struct maple_node *node, *next;
5259 	void __rcu **slots = NULL;
5260 
5261 	next = mte_to_node(*enode);
5262 	do {
5263 		*enode = ma_enode_ptr(next);
5264 		node = mte_to_node(*enode);
5265 		slots = ma_slots(node, node->type);
5266 		next = rcu_dereference_protected(slots[offset],
5267 					lock_is_held(&rcu_callback_map));
5268 		offset = 0;
5269 	} while (!ma_is_leaf(next->type));
5270 
5271 	return slots;
5272 }
5273 
5274 /**
5275  * mt_free_walk() - Walk & free a tree in the RCU callback context
5276  * @head: The RCU head that's within the node.
5277  *
5278  * Note: This can only be used from the RCU callback context.
5279  */
5280 static void mt_free_walk(struct rcu_head *head)
5281 {
5282 	void __rcu **slots;
5283 	struct maple_node *node, *start;
5284 	struct maple_enode *enode;
5285 	unsigned char offset;
5286 	enum maple_type type;
5287 
5288 	node = container_of(head, struct maple_node, rcu);
5289 
5290 	if (ma_is_leaf(node->type))
5291 		goto free_leaf;
5292 
5293 	start = node;
5294 	enode = mt_mk_node(node, node->type);
5295 	slots = mte_dead_walk(&enode, 0);
5296 	node = mte_to_node(enode);
5297 	do {
5298 		mt_free_bulk(node->slot_len, slots);
5299 		offset = node->parent_slot + 1;
5300 		enode = node->piv_parent;
5301 		if (mte_to_node(enode) == node)
5302 			goto free_leaf;
5303 
5304 		type = mte_node_type(enode);
5305 		slots = ma_slots(mte_to_node(enode), type);
5306 		if ((offset < mt_slots[type]) &&
5307 		    rcu_dereference_protected(slots[offset],
5308 					      lock_is_held(&rcu_callback_map)))
5309 			slots = mte_dead_walk(&enode, offset);
5310 		node = mte_to_node(enode);
5311 	} while ((node != start) || (node->slot_len < offset));
5312 
5313 	slots = ma_slots(node, node->type);
5314 	mt_free_bulk(node->slot_len, slots);
5315 
5316 free_leaf:
5317 	mt_free_rcu(&node->rcu);
5318 }
5319 
5320 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5321 	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5322 {
5323 	struct maple_node *node;
5324 	struct maple_enode *next = *enode;
5325 	void __rcu **slots = NULL;
5326 	enum maple_type type;
5327 	unsigned char next_offset = 0;
5328 
5329 	do {
5330 		*enode = next;
5331 		node = mte_to_node(*enode);
5332 		type = mte_node_type(*enode);
5333 		slots = ma_slots(node, type);
5334 		next = mt_slot_locked(mt, slots, next_offset);
5335 		if ((mte_dead_node(next)))
5336 			next = mt_slot_locked(mt, slots, ++next_offset);
5337 
5338 		mte_set_node_dead(*enode);
5339 		node->type = type;
5340 		node->piv_parent = prev;
5341 		node->parent_slot = offset;
5342 		offset = next_offset;
5343 		next_offset = 0;
5344 		prev = *enode;
5345 	} while (!mte_is_leaf(next));
5346 
5347 	return slots;
5348 }
5349 
5350 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5351 			    bool free)
5352 {
5353 	void __rcu **slots;
5354 	struct maple_node *node = mte_to_node(enode);
5355 	struct maple_enode *start;
5356 
5357 	if (mte_is_leaf(enode)) {
5358 		node->type = mte_node_type(enode);
5359 		goto free_leaf;
5360 	}
5361 
5362 	start = enode;
5363 	slots = mte_destroy_descend(&enode, mt, start, 0);
5364 	node = mte_to_node(enode); // Updated in the above call.
5365 	do {
5366 		enum maple_type type;
5367 		unsigned char offset;
5368 		struct maple_enode *parent, *tmp;
5369 
5370 		node->slot_len = mte_dead_leaves(enode, mt, slots);
5371 		if (free)
5372 			mt_free_bulk(node->slot_len, slots);
5373 		offset = node->parent_slot + 1;
5374 		enode = node->piv_parent;
5375 		if (mte_to_node(enode) == node)
5376 			goto free_leaf;
5377 
5378 		type = mte_node_type(enode);
5379 		slots = ma_slots(mte_to_node(enode), type);
5380 		if (offset >= mt_slots[type])
5381 			goto next;
5382 
5383 		tmp = mt_slot_locked(mt, slots, offset);
5384 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
5385 			parent = enode;
5386 			enode = tmp;
5387 			slots = mte_destroy_descend(&enode, mt, parent, offset);
5388 		}
5389 next:
5390 		node = mte_to_node(enode);
5391 	} while (start != enode);
5392 
5393 	node = mte_to_node(enode);
5394 	node->slot_len = mte_dead_leaves(enode, mt, slots);
5395 	if (free)
5396 		mt_free_bulk(node->slot_len, slots);
5397 
5398 free_leaf:
5399 	if (free)
5400 		mt_free_rcu(&node->rcu);
5401 	else
5402 		mt_clear_meta(mt, node, node->type);
5403 }
5404 
5405 /*
5406  * mte_destroy_walk() - Free a tree or sub-tree.
5407  * @enode: the encoded maple node (maple_enode) to start
5408  * @mt: the tree to free - needed for node types.
5409  *
5410  * Must hold the write lock.
5411  */
5412 static inline void mte_destroy_walk(struct maple_enode *enode,
5413 				    struct maple_tree *mt)
5414 {
5415 	struct maple_node *node = mte_to_node(enode);
5416 
5417 	if (mt_in_rcu(mt)) {
5418 		mt_destroy_walk(enode, mt, false);
5419 		call_rcu(&node->rcu, mt_free_walk);
5420 	} else {
5421 		mt_destroy_walk(enode, mt, true);
5422 	}
5423 }
5424 
5425 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5426 {
5427 	if (mas_is_start(wr_mas->mas))
5428 		return;
5429 
5430 	if (unlikely(mas_is_paused(wr_mas->mas)))
5431 		goto reset;
5432 
5433 	if (unlikely(mas_is_none(wr_mas->mas)))
5434 		goto reset;
5435 
5436 	/*
5437 	 * A less strict version of mas_is_span_wr() where we allow spanning
5438 	 * writes within this node.  This is to stop partial walks in
5439 	 * mas_prealloc() from being reset.
5440 	 */
5441 	if (wr_mas->mas->last > wr_mas->mas->max)
5442 		goto reset;
5443 
5444 	if (wr_mas->entry)
5445 		return;
5446 
5447 	if (mte_is_leaf(wr_mas->mas->node) &&
5448 	    wr_mas->mas->last == wr_mas->mas->max)
5449 		goto reset;
5450 
5451 	return;
5452 
5453 reset:
5454 	mas_reset(wr_mas->mas);
5455 }
5456 
5457 /* Interface */
5458 
5459 /**
5460  * mas_store() - Store an @entry.
5461  * @mas: The maple state.
5462  * @entry: The entry to store.
5463  *
5464  * The @mas->index and @mas->last is used to set the range for the @entry.
5465  * Note: The @mas should have pre-allocated entries to ensure there is memory to
5466  * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5467  *
5468  * Return: the first entry between mas->index and mas->last or %NULL.
5469  */
5470 void *mas_store(struct ma_state *mas, void *entry)
5471 {
5472 	MA_WR_STATE(wr_mas, mas, entry);
5473 
5474 	trace_ma_write(__func__, mas, 0, entry);
5475 #ifdef CONFIG_DEBUG_MAPLE_TREE
5476 	if (MAS_WARN_ON(mas, mas->index > mas->last))
5477 		pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry);
5478 
5479 	if (mas->index > mas->last) {
5480 		mas_set_err(mas, -EINVAL);
5481 		return NULL;
5482 	}
5483 
5484 #endif
5485 
5486 	/*
5487 	 * Storing is the same operation as insert with the added caveat that it
5488 	 * can overwrite entries.  Although this seems simple enough, one may
5489 	 * want to examine what happens if a single store operation was to
5490 	 * overwrite multiple entries within a self-balancing B-Tree.
5491 	 */
5492 	mas_wr_store_setup(&wr_mas);
5493 	mas_wr_store_entry(&wr_mas);
5494 	return wr_mas.content;
5495 }
5496 EXPORT_SYMBOL_GPL(mas_store);
5497 
5498 /**
5499  * mas_store_gfp() - Store a value into the tree.
5500  * @mas: The maple state
5501  * @entry: The entry to store
5502  * @gfp: The GFP_FLAGS to use for allocations if necessary.
5503  *
5504  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5505  * be allocated.
5506  */
5507 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5508 {
5509 	MA_WR_STATE(wr_mas, mas, entry);
5510 
5511 	mas_wr_store_setup(&wr_mas);
5512 	trace_ma_write(__func__, mas, 0, entry);
5513 retry:
5514 	mas_wr_store_entry(&wr_mas);
5515 	if (unlikely(mas_nomem(mas, gfp)))
5516 		goto retry;
5517 
5518 	if (unlikely(mas_is_err(mas)))
5519 		return xa_err(mas->node);
5520 
5521 	return 0;
5522 }
5523 EXPORT_SYMBOL_GPL(mas_store_gfp);
5524 
5525 /**
5526  * mas_store_prealloc() - Store a value into the tree using memory
5527  * preallocated in the maple state.
5528  * @mas: The maple state
5529  * @entry: The entry to store.
5530  */
5531 void mas_store_prealloc(struct ma_state *mas, void *entry)
5532 {
5533 	MA_WR_STATE(wr_mas, mas, entry);
5534 
5535 	mas_wr_store_setup(&wr_mas);
5536 	trace_ma_write(__func__, mas, 0, entry);
5537 	mas_wr_store_entry(&wr_mas);
5538 	MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas));
5539 	mas_destroy(mas);
5540 }
5541 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5542 
5543 /**
5544  * mas_preallocate() - Preallocate enough nodes for a store operation
5545  * @mas: The maple state
5546  * @entry: The entry that will be stored
5547  * @gfp: The GFP_FLAGS to use for allocations.
5548  *
5549  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5550  */
5551 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
5552 {
5553 	MA_WR_STATE(wr_mas, mas, entry);
5554 	unsigned char node_size;
5555 	int request = 1;
5556 	int ret;
5557 
5558 
5559 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
5560 		goto ask_now;
5561 
5562 	mas_wr_store_setup(&wr_mas);
5563 	wr_mas.content = mas_start(mas);
5564 	/* Root expand */
5565 	if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
5566 		goto ask_now;
5567 
5568 	if (unlikely(!mas_wr_walk(&wr_mas))) {
5569 		/* Spanning store, use worst case for now */
5570 		request = 1 + mas_mt_height(mas) * 3;
5571 		goto ask_now;
5572 	}
5573 
5574 	/* At this point, we are at the leaf node that needs to be altered. */
5575 	/* Exact fit, no nodes needed. */
5576 	if (wr_mas.r_min == mas->index && wr_mas.r_max == mas->last)
5577 		return 0;
5578 
5579 	mas_wr_end_piv(&wr_mas);
5580 	node_size = mas_wr_new_end(&wr_mas);
5581 	if (node_size >= mt_slots[wr_mas.type]) {
5582 		/* Split, worst case for now. */
5583 		request = 1 + mas_mt_height(mas) * 2;
5584 		goto ask_now;
5585 	}
5586 
5587 	/* New root needs a singe node */
5588 	if (unlikely(mte_is_root(mas->node)))
5589 		goto ask_now;
5590 
5591 	/* Potential spanning rebalance collapsing a node, use worst-case */
5592 	if (node_size  - 1 <= mt_min_slots[wr_mas.type])
5593 		request = mas_mt_height(mas) * 2 - 1;
5594 
5595 	/* node store, slot store needs one node */
5596 ask_now:
5597 	mas_node_count_gfp(mas, request, gfp);
5598 	mas->mas_flags |= MA_STATE_PREALLOC;
5599 	if (likely(!mas_is_err(mas)))
5600 		return 0;
5601 
5602 	mas_set_alloc_req(mas, 0);
5603 	ret = xa_err(mas->node);
5604 	mas_reset(mas);
5605 	mas_destroy(mas);
5606 	mas_reset(mas);
5607 	return ret;
5608 }
5609 EXPORT_SYMBOL_GPL(mas_preallocate);
5610 
5611 /*
5612  * mas_destroy() - destroy a maple state.
5613  * @mas: The maple state
5614  *
5615  * Upon completion, check the left-most node and rebalance against the node to
5616  * the right if necessary.  Frees any allocated nodes associated with this maple
5617  * state.
5618  */
5619 void mas_destroy(struct ma_state *mas)
5620 {
5621 	struct maple_alloc *node;
5622 	unsigned long total;
5623 
5624 	/*
5625 	 * When using mas_for_each() to insert an expected number of elements,
5626 	 * it is possible that the number inserted is less than the expected
5627 	 * number.  To fix an invalid final node, a check is performed here to
5628 	 * rebalance the previous node with the final node.
5629 	 */
5630 	if (mas->mas_flags & MA_STATE_REBALANCE) {
5631 		unsigned char end;
5632 
5633 		mas_start(mas);
5634 		mtree_range_walk(mas);
5635 		end = mas_data_end(mas) + 1;
5636 		if (end < mt_min_slot_count(mas->node) - 1)
5637 			mas_destroy_rebalance(mas, end);
5638 
5639 		mas->mas_flags &= ~MA_STATE_REBALANCE;
5640 	}
5641 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5642 
5643 	total = mas_allocated(mas);
5644 	while (total) {
5645 		node = mas->alloc;
5646 		mas->alloc = node->slot[0];
5647 		if (node->node_count > 1) {
5648 			size_t count = node->node_count - 1;
5649 
5650 			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5651 			total -= count;
5652 		}
5653 		kmem_cache_free(maple_node_cache, node);
5654 		total--;
5655 	}
5656 
5657 	mas->alloc = NULL;
5658 }
5659 EXPORT_SYMBOL_GPL(mas_destroy);
5660 
5661 /*
5662  * mas_expected_entries() - Set the expected number of entries that will be inserted.
5663  * @mas: The maple state
5664  * @nr_entries: The number of expected entries.
5665  *
5666  * This will attempt to pre-allocate enough nodes to store the expected number
5667  * of entries.  The allocations will occur using the bulk allocator interface
5668  * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5669  * to ensure any unused nodes are freed.
5670  *
5671  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5672  */
5673 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5674 {
5675 	int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5676 	struct maple_enode *enode = mas->node;
5677 	int nr_nodes;
5678 	int ret;
5679 
5680 	/*
5681 	 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5682 	 * forking a process and duplicating the VMAs from one tree to a new
5683 	 * tree.  When such a situation arises, it is known that the new tree is
5684 	 * not going to be used until the entire tree is populated.  For
5685 	 * performance reasons, it is best to use a bulk load with RCU disabled.
5686 	 * This allows for optimistic splitting that favours the left and reuse
5687 	 * of nodes during the operation.
5688 	 */
5689 
5690 	/* Optimize splitting for bulk insert in-order */
5691 	mas->mas_flags |= MA_STATE_BULK;
5692 
5693 	/*
5694 	 * Avoid overflow, assume a gap between each entry and a trailing null.
5695 	 * If this is wrong, it just means allocation can happen during
5696 	 * insertion of entries.
5697 	 */
5698 	nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5699 	if (!mt_is_alloc(mas->tree))
5700 		nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5701 
5702 	/* Leaves; reduce slots to keep space for expansion */
5703 	nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5704 	/* Internal nodes */
5705 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5706 	/* Add working room for split (2 nodes) + new parents */
5707 	mas_node_count(mas, nr_nodes + 3);
5708 
5709 	/* Detect if allocations run out */
5710 	mas->mas_flags |= MA_STATE_PREALLOC;
5711 
5712 	if (!mas_is_err(mas))
5713 		return 0;
5714 
5715 	ret = xa_err(mas->node);
5716 	mas->node = enode;
5717 	mas_destroy(mas);
5718 	return ret;
5719 
5720 }
5721 EXPORT_SYMBOL_GPL(mas_expected_entries);
5722 
5723 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
5724 		void **entry)
5725 {
5726 	bool was_none = mas_is_none(mas);
5727 
5728 	if (mas_is_none(mas) || mas_is_paused(mas))
5729 		mas->node = MAS_START;
5730 
5731 	if (mas_is_start(mas))
5732 		*entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5733 
5734 	if (mas_is_ptr(mas)) {
5735 		*entry = NULL;
5736 		if (was_none && mas->index == 0) {
5737 			mas->index = mas->last = 0;
5738 			return true;
5739 		}
5740 		mas->index = 1;
5741 		mas->last = ULONG_MAX;
5742 		mas->node = MAS_NONE;
5743 		return true;
5744 	}
5745 
5746 	if (mas_is_none(mas))
5747 		return true;
5748 	return false;
5749 }
5750 
5751 /**
5752  * mas_next() - Get the next entry.
5753  * @mas: The maple state
5754  * @max: The maximum index to check.
5755  *
5756  * Returns the next entry after @mas->index.
5757  * Must hold rcu_read_lock or the write lock.
5758  * Can return the zero entry.
5759  *
5760  * Return: The next entry or %NULL
5761  */
5762 void *mas_next(struct ma_state *mas, unsigned long max)
5763 {
5764 	void *entry = NULL;
5765 
5766 	if (mas_next_setup(mas, max, &entry))
5767 		return entry;
5768 
5769 	/* Retries on dead nodes handled by mas_next_slot */
5770 	return mas_next_slot(mas, max, false);
5771 }
5772 EXPORT_SYMBOL_GPL(mas_next);
5773 
5774 /**
5775  * mas_next_range() - Advance the maple state to the next range
5776  * @mas: The maple state
5777  * @max: The maximum index to check.
5778  *
5779  * Sets @mas->index and @mas->last to the range.
5780  * Must hold rcu_read_lock or the write lock.
5781  * Can return the zero entry.
5782  *
5783  * Return: The next entry or %NULL
5784  */
5785 void *mas_next_range(struct ma_state *mas, unsigned long max)
5786 {
5787 	void *entry = NULL;
5788 
5789 	if (mas_next_setup(mas, max, &entry))
5790 		return entry;
5791 
5792 	/* Retries on dead nodes handled by mas_next_slot */
5793 	return mas_next_slot(mas, max, true);
5794 }
5795 EXPORT_SYMBOL_GPL(mas_next_range);
5796 
5797 /**
5798  * mt_next() - get the next value in the maple tree
5799  * @mt: The maple tree
5800  * @index: The start index
5801  * @max: The maximum index to check
5802  *
5803  * Takes RCU read lock internally to protect the search, which does not
5804  * protect the returned pointer after dropping RCU read lock.
5805  * See also: Documentation/core-api/maple_tree.rst
5806  *
5807  * Return: The entry higher than @index or %NULL if nothing is found.
5808  */
5809 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5810 {
5811 	void *entry = NULL;
5812 	MA_STATE(mas, mt, index, index);
5813 
5814 	rcu_read_lock();
5815 	entry = mas_next(&mas, max);
5816 	rcu_read_unlock();
5817 	return entry;
5818 }
5819 EXPORT_SYMBOL_GPL(mt_next);
5820 
5821 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
5822 		void **entry)
5823 {
5824 	if (mas->index <= min)
5825 		goto none;
5826 
5827 	if (mas_is_none(mas) || mas_is_paused(mas))
5828 		mas->node = MAS_START;
5829 
5830 	if (mas_is_start(mas)) {
5831 		mas_walk(mas);
5832 		if (!mas->index)
5833 			goto none;
5834 	}
5835 
5836 	if (unlikely(mas_is_ptr(mas))) {
5837 		if (!mas->index)
5838 			goto none;
5839 		mas->index = mas->last = 0;
5840 		*entry = mas_root(mas);
5841 		return true;
5842 	}
5843 
5844 	if (mas_is_none(mas)) {
5845 		if (mas->index) {
5846 			/* Walked to out-of-range pointer? */
5847 			mas->index = mas->last = 0;
5848 			mas->node = MAS_ROOT;
5849 			*entry = mas_root(mas);
5850 			return true;
5851 		}
5852 		return true;
5853 	}
5854 
5855 	return false;
5856 
5857 none:
5858 	mas->node = MAS_NONE;
5859 	return true;
5860 }
5861 
5862 /**
5863  * mas_prev() - Get the previous entry
5864  * @mas: The maple state
5865  * @min: The minimum value to check.
5866  *
5867  * Must hold rcu_read_lock or the write lock.
5868  * Will reset mas to MAS_START if the node is MAS_NONE.  Will stop on not
5869  * searchable nodes.
5870  *
5871  * Return: the previous value or %NULL.
5872  */
5873 void *mas_prev(struct ma_state *mas, unsigned long min)
5874 {
5875 	void *entry = NULL;
5876 
5877 	if (mas_prev_setup(mas, min, &entry))
5878 		return entry;
5879 
5880 	return mas_prev_slot(mas, min, false);
5881 }
5882 EXPORT_SYMBOL_GPL(mas_prev);
5883 
5884 /**
5885  * mas_prev_range() - Advance to the previous range
5886  * @mas: The maple state
5887  * @min: The minimum value to check.
5888  *
5889  * Sets @mas->index and @mas->last to the range.
5890  * Must hold rcu_read_lock or the write lock.
5891  * Will reset mas to MAS_START if the node is MAS_NONE.  Will stop on not
5892  * searchable nodes.
5893  *
5894  * Return: the previous value or %NULL.
5895  */
5896 void *mas_prev_range(struct ma_state *mas, unsigned long min)
5897 {
5898 	void *entry = NULL;
5899 
5900 	if (mas_prev_setup(mas, min, &entry))
5901 		return entry;
5902 
5903 	return mas_prev_slot(mas, min, true);
5904 }
5905 EXPORT_SYMBOL_GPL(mas_prev_range);
5906 
5907 /**
5908  * mt_prev() - get the previous value in the maple tree
5909  * @mt: The maple tree
5910  * @index: The start index
5911  * @min: The minimum index to check
5912  *
5913  * Takes RCU read lock internally to protect the search, which does not
5914  * protect the returned pointer after dropping RCU read lock.
5915  * See also: Documentation/core-api/maple_tree.rst
5916  *
5917  * Return: The entry before @index or %NULL if nothing is found.
5918  */
5919 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
5920 {
5921 	void *entry = NULL;
5922 	MA_STATE(mas, mt, index, index);
5923 
5924 	rcu_read_lock();
5925 	entry = mas_prev(&mas, min);
5926 	rcu_read_unlock();
5927 	return entry;
5928 }
5929 EXPORT_SYMBOL_GPL(mt_prev);
5930 
5931 /**
5932  * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
5933  * @mas: The maple state to pause
5934  *
5935  * Some users need to pause a walk and drop the lock they're holding in
5936  * order to yield to a higher priority thread or carry out an operation
5937  * on an entry.  Those users should call this function before they drop
5938  * the lock.  It resets the @mas to be suitable for the next iteration
5939  * of the loop after the user has reacquired the lock.  If most entries
5940  * found during a walk require you to call mas_pause(), the mt_for_each()
5941  * iterator may be more appropriate.
5942  *
5943  */
5944 void mas_pause(struct ma_state *mas)
5945 {
5946 	mas->node = MAS_PAUSE;
5947 }
5948 EXPORT_SYMBOL_GPL(mas_pause);
5949 
5950 /**
5951  * mas_find_setup() - Internal function to set up mas_find*().
5952  * @mas: The maple state
5953  * @max: The maximum index
5954  * @entry: Pointer to the entry
5955  *
5956  * Returns: True if entry is the answer, false otherwise.
5957  */
5958 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
5959 		void **entry)
5960 {
5961 	*entry = NULL;
5962 
5963 	if (unlikely(mas_is_none(mas))) {
5964 		if (unlikely(mas->last >= max))
5965 			return true;
5966 
5967 		mas->index = mas->last;
5968 		mas->node = MAS_START;
5969 	} else if (unlikely(mas_is_paused(mas))) {
5970 		if (unlikely(mas->last >= max))
5971 			return true;
5972 
5973 		mas->node = MAS_START;
5974 		mas->index = ++mas->last;
5975 	} else if (unlikely(mas_is_ptr(mas)))
5976 		goto ptr_out_of_range;
5977 
5978 	if (unlikely(mas_is_start(mas))) {
5979 		/* First run or continue */
5980 		if (mas->index > max)
5981 			return true;
5982 
5983 		*entry = mas_walk(mas);
5984 		if (*entry)
5985 			return true;
5986 
5987 	}
5988 
5989 	if (unlikely(!mas_searchable(mas))) {
5990 		if (unlikely(mas_is_ptr(mas)))
5991 			goto ptr_out_of_range;
5992 
5993 		return true;
5994 	}
5995 
5996 	if (mas->index == max)
5997 		return true;
5998 
5999 	return false;
6000 
6001 ptr_out_of_range:
6002 	mas->node = MAS_NONE;
6003 	mas->index = 1;
6004 	mas->last = ULONG_MAX;
6005 	return true;
6006 }
6007 
6008 /**
6009  * mas_find() - On the first call, find the entry at or after mas->index up to
6010  * %max.  Otherwise, find the entry after mas->index.
6011  * @mas: The maple state
6012  * @max: The maximum value to check.
6013  *
6014  * Must hold rcu_read_lock or the write lock.
6015  * If an entry exists, last and index are updated accordingly.
6016  * May set @mas->node to MAS_NONE.
6017  *
6018  * Return: The entry or %NULL.
6019  */
6020 void *mas_find(struct ma_state *mas, unsigned long max)
6021 {
6022 	void *entry = NULL;
6023 
6024 	if (mas_find_setup(mas, max, &entry))
6025 		return entry;
6026 
6027 	/* Retries on dead nodes handled by mas_next_slot */
6028 	return mas_next_slot(mas, max, false);
6029 }
6030 EXPORT_SYMBOL_GPL(mas_find);
6031 
6032 /**
6033  * mas_find_range() - On the first call, find the entry at or after
6034  * mas->index up to %max.  Otherwise, advance to the next slot mas->index.
6035  * @mas: The maple state
6036  * @max: The maximum value to check.
6037  *
6038  * Must hold rcu_read_lock or the write lock.
6039  * If an entry exists, last and index are updated accordingly.
6040  * May set @mas->node to MAS_NONE.
6041  *
6042  * Return: The entry or %NULL.
6043  */
6044 void *mas_find_range(struct ma_state *mas, unsigned long max)
6045 {
6046 	void *entry;
6047 
6048 	if (mas_find_setup(mas, max, &entry))
6049 		return entry;
6050 
6051 	/* Retries on dead nodes handled by mas_next_slot */
6052 	return mas_next_slot(mas, max, true);
6053 }
6054 EXPORT_SYMBOL_GPL(mas_find_range);
6055 
6056 /**
6057  * mas_find_rev_setup() - Internal function to set up mas_find_*_rev()
6058  * @mas: The maple state
6059  * @min: The minimum index
6060  * @entry: Pointer to the entry
6061  *
6062  * Returns: True if entry is the answer, false otherwise.
6063  */
6064 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
6065 		void **entry)
6066 {
6067 	*entry = NULL;
6068 
6069 	if (unlikely(mas_is_none(mas))) {
6070 		if (mas->index <= min)
6071 			goto none;
6072 
6073 		mas->last = mas->index;
6074 		mas->node = MAS_START;
6075 	}
6076 
6077 	if (unlikely(mas_is_paused(mas))) {
6078 		if (unlikely(mas->index <= min)) {
6079 			mas->node = MAS_NONE;
6080 			return true;
6081 		}
6082 		mas->node = MAS_START;
6083 		mas->last = --mas->index;
6084 	}
6085 
6086 	if (unlikely(mas_is_start(mas))) {
6087 		/* First run or continue */
6088 		if (mas->index < min)
6089 			return true;
6090 
6091 		*entry = mas_walk(mas);
6092 		if (*entry)
6093 			return true;
6094 	}
6095 
6096 	if (unlikely(!mas_searchable(mas))) {
6097 		if (mas_is_ptr(mas))
6098 			goto none;
6099 
6100 		if (mas_is_none(mas)) {
6101 			/*
6102 			 * Walked to the location, and there was nothing so the
6103 			 * previous location is 0.
6104 			 */
6105 			mas->last = mas->index = 0;
6106 			mas->node = MAS_ROOT;
6107 			*entry = mas_root(mas);
6108 			return true;
6109 		}
6110 	}
6111 
6112 	if (mas->index < min)
6113 		return true;
6114 
6115 	return false;
6116 
6117 none:
6118 	mas->node = MAS_NONE;
6119 	return true;
6120 }
6121 
6122 /**
6123  * mas_find_rev: On the first call, find the first non-null entry at or below
6124  * mas->index down to %min.  Otherwise find the first non-null entry below
6125  * mas->index down to %min.
6126  * @mas: The maple state
6127  * @min: The minimum value to check.
6128  *
6129  * Must hold rcu_read_lock or the write lock.
6130  * If an entry exists, last and index are updated accordingly.
6131  * May set @mas->node to MAS_NONE.
6132  *
6133  * Return: The entry or %NULL.
6134  */
6135 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6136 {
6137 	void *entry;
6138 
6139 	if (mas_find_rev_setup(mas, min, &entry))
6140 		return entry;
6141 
6142 	/* Retries on dead nodes handled by mas_prev_slot */
6143 	return mas_prev_slot(mas, min, false);
6144 
6145 }
6146 EXPORT_SYMBOL_GPL(mas_find_rev);
6147 
6148 /**
6149  * mas_find_range_rev: On the first call, find the first non-null entry at or
6150  * below mas->index down to %min.  Otherwise advance to the previous slot after
6151  * mas->index down to %min.
6152  * @mas: The maple state
6153  * @min: The minimum value to check.
6154  *
6155  * Must hold rcu_read_lock or the write lock.
6156  * If an entry exists, last and index are updated accordingly.
6157  * May set @mas->node to MAS_NONE.
6158  *
6159  * Return: The entry or %NULL.
6160  */
6161 void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
6162 {
6163 	void *entry;
6164 
6165 	if (mas_find_rev_setup(mas, min, &entry))
6166 		return entry;
6167 
6168 	/* Retries on dead nodes handled by mas_prev_slot */
6169 	return mas_prev_slot(mas, min, true);
6170 }
6171 EXPORT_SYMBOL_GPL(mas_find_range_rev);
6172 
6173 /**
6174  * mas_erase() - Find the range in which index resides and erase the entire
6175  * range.
6176  * @mas: The maple state
6177  *
6178  * Must hold the write lock.
6179  * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6180  * erases that range.
6181  *
6182  * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6183  */
6184 void *mas_erase(struct ma_state *mas)
6185 {
6186 	void *entry;
6187 	MA_WR_STATE(wr_mas, mas, NULL);
6188 
6189 	if (mas_is_none(mas) || mas_is_paused(mas))
6190 		mas->node = MAS_START;
6191 
6192 	/* Retry unnecessary when holding the write lock. */
6193 	entry = mas_state_walk(mas);
6194 	if (!entry)
6195 		return NULL;
6196 
6197 write_retry:
6198 	/* Must reset to ensure spanning writes of last slot are detected */
6199 	mas_reset(mas);
6200 	mas_wr_store_setup(&wr_mas);
6201 	mas_wr_store_entry(&wr_mas);
6202 	if (mas_nomem(mas, GFP_KERNEL))
6203 		goto write_retry;
6204 
6205 	return entry;
6206 }
6207 EXPORT_SYMBOL_GPL(mas_erase);
6208 
6209 /**
6210  * mas_nomem() - Check if there was an error allocating and do the allocation
6211  * if necessary If there are allocations, then free them.
6212  * @mas: The maple state
6213  * @gfp: The GFP_FLAGS to use for allocations
6214  * Return: true on allocation, false otherwise.
6215  */
6216 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6217 	__must_hold(mas->tree->ma_lock)
6218 {
6219 	if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6220 		mas_destroy(mas);
6221 		return false;
6222 	}
6223 
6224 	if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6225 		mtree_unlock(mas->tree);
6226 		mas_alloc_nodes(mas, gfp);
6227 		mtree_lock(mas->tree);
6228 	} else {
6229 		mas_alloc_nodes(mas, gfp);
6230 	}
6231 
6232 	if (!mas_allocated(mas))
6233 		return false;
6234 
6235 	mas->node = MAS_START;
6236 	return true;
6237 }
6238 
6239 void __init maple_tree_init(void)
6240 {
6241 	maple_node_cache = kmem_cache_create("maple_node",
6242 			sizeof(struct maple_node), sizeof(struct maple_node),
6243 			SLAB_PANIC, NULL);
6244 }
6245 
6246 /**
6247  * mtree_load() - Load a value stored in a maple tree
6248  * @mt: The maple tree
6249  * @index: The index to load
6250  *
6251  * Return: the entry or %NULL
6252  */
6253 void *mtree_load(struct maple_tree *mt, unsigned long index)
6254 {
6255 	MA_STATE(mas, mt, index, index);
6256 	void *entry;
6257 
6258 	trace_ma_read(__func__, &mas);
6259 	rcu_read_lock();
6260 retry:
6261 	entry = mas_start(&mas);
6262 	if (unlikely(mas_is_none(&mas)))
6263 		goto unlock;
6264 
6265 	if (unlikely(mas_is_ptr(&mas))) {
6266 		if (index)
6267 			entry = NULL;
6268 
6269 		goto unlock;
6270 	}
6271 
6272 	entry = mtree_lookup_walk(&mas);
6273 	if (!entry && unlikely(mas_is_start(&mas)))
6274 		goto retry;
6275 unlock:
6276 	rcu_read_unlock();
6277 	if (xa_is_zero(entry))
6278 		return NULL;
6279 
6280 	return entry;
6281 }
6282 EXPORT_SYMBOL(mtree_load);
6283 
6284 /**
6285  * mtree_store_range() - Store an entry at a given range.
6286  * @mt: The maple tree
6287  * @index: The start of the range
6288  * @last: The end of the range
6289  * @entry: The entry to store
6290  * @gfp: The GFP_FLAGS to use for allocations
6291  *
6292  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6293  * be allocated.
6294  */
6295 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6296 		unsigned long last, void *entry, gfp_t gfp)
6297 {
6298 	MA_STATE(mas, mt, index, last);
6299 	MA_WR_STATE(wr_mas, &mas, entry);
6300 
6301 	trace_ma_write(__func__, &mas, 0, entry);
6302 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6303 		return -EINVAL;
6304 
6305 	if (index > last)
6306 		return -EINVAL;
6307 
6308 	mtree_lock(mt);
6309 retry:
6310 	mas_wr_store_entry(&wr_mas);
6311 	if (mas_nomem(&mas, gfp))
6312 		goto retry;
6313 
6314 	mtree_unlock(mt);
6315 	if (mas_is_err(&mas))
6316 		return xa_err(mas.node);
6317 
6318 	return 0;
6319 }
6320 EXPORT_SYMBOL(mtree_store_range);
6321 
6322 /**
6323  * mtree_store() - Store an entry at a given index.
6324  * @mt: The maple tree
6325  * @index: The index to store the value
6326  * @entry: The entry to store
6327  * @gfp: The GFP_FLAGS to use for allocations
6328  *
6329  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6330  * be allocated.
6331  */
6332 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6333 		 gfp_t gfp)
6334 {
6335 	return mtree_store_range(mt, index, index, entry, gfp);
6336 }
6337 EXPORT_SYMBOL(mtree_store);
6338 
6339 /**
6340  * mtree_insert_range() - Insert an entry at a given range if there is no value.
6341  * @mt: The maple tree
6342  * @first: The start of the range
6343  * @last: The end of the range
6344  * @entry: The entry to store
6345  * @gfp: The GFP_FLAGS to use for allocations.
6346  *
6347  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6348  * request, -ENOMEM if memory could not be allocated.
6349  */
6350 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6351 		unsigned long last, void *entry, gfp_t gfp)
6352 {
6353 	MA_STATE(ms, mt, first, last);
6354 
6355 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6356 		return -EINVAL;
6357 
6358 	if (first > last)
6359 		return -EINVAL;
6360 
6361 	mtree_lock(mt);
6362 retry:
6363 	mas_insert(&ms, entry);
6364 	if (mas_nomem(&ms, gfp))
6365 		goto retry;
6366 
6367 	mtree_unlock(mt);
6368 	if (mas_is_err(&ms))
6369 		return xa_err(ms.node);
6370 
6371 	return 0;
6372 }
6373 EXPORT_SYMBOL(mtree_insert_range);
6374 
6375 /**
6376  * mtree_insert() - Insert an entry at a given index if there is no value.
6377  * @mt: The maple tree
6378  * @index : The index to store the value
6379  * @entry: The entry to store
6380  * @gfp: The GFP_FLAGS to use for allocations.
6381  *
6382  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6383  * request, -ENOMEM if memory could not be allocated.
6384  */
6385 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6386 		 gfp_t gfp)
6387 {
6388 	return mtree_insert_range(mt, index, index, entry, gfp);
6389 }
6390 EXPORT_SYMBOL(mtree_insert);
6391 
6392 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6393 		void *entry, unsigned long size, unsigned long min,
6394 		unsigned long max, gfp_t gfp)
6395 {
6396 	int ret = 0;
6397 
6398 	MA_STATE(mas, mt, 0, 0);
6399 	if (!mt_is_alloc(mt))
6400 		return -EINVAL;
6401 
6402 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6403 		return -EINVAL;
6404 
6405 	mtree_lock(mt);
6406 retry:
6407 	ret = mas_empty_area(&mas, min, max, size);
6408 	if (ret)
6409 		goto unlock;
6410 
6411 	mas_insert(&mas, entry);
6412 	/*
6413 	 * mas_nomem() may release the lock, causing the allocated area
6414 	 * to be unavailable, so try to allocate a free area again.
6415 	 */
6416 	if (mas_nomem(&mas, gfp))
6417 		goto retry;
6418 
6419 	if (mas_is_err(&mas))
6420 		ret = xa_err(mas.node);
6421 	else
6422 		*startp = mas.index;
6423 
6424 unlock:
6425 	mtree_unlock(mt);
6426 	return ret;
6427 }
6428 EXPORT_SYMBOL(mtree_alloc_range);
6429 
6430 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6431 		void *entry, unsigned long size, unsigned long min,
6432 		unsigned long max, gfp_t gfp)
6433 {
6434 	int ret = 0;
6435 
6436 	MA_STATE(mas, mt, 0, 0);
6437 	if (!mt_is_alloc(mt))
6438 		return -EINVAL;
6439 
6440 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6441 		return -EINVAL;
6442 
6443 	mtree_lock(mt);
6444 retry:
6445 	ret = mas_empty_area_rev(&mas, min, max, size);
6446 	if (ret)
6447 		goto unlock;
6448 
6449 	mas_insert(&mas, entry);
6450 	/*
6451 	 * mas_nomem() may release the lock, causing the allocated area
6452 	 * to be unavailable, so try to allocate a free area again.
6453 	 */
6454 	if (mas_nomem(&mas, gfp))
6455 		goto retry;
6456 
6457 	if (mas_is_err(&mas))
6458 		ret = xa_err(mas.node);
6459 	else
6460 		*startp = mas.index;
6461 
6462 unlock:
6463 	mtree_unlock(mt);
6464 	return ret;
6465 }
6466 EXPORT_SYMBOL(mtree_alloc_rrange);
6467 
6468 /**
6469  * mtree_erase() - Find an index and erase the entire range.
6470  * @mt: The maple tree
6471  * @index: The index to erase
6472  *
6473  * Erasing is the same as a walk to an entry then a store of a NULL to that
6474  * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6475  *
6476  * Return: The entry stored at the @index or %NULL
6477  */
6478 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6479 {
6480 	void *entry = NULL;
6481 
6482 	MA_STATE(mas, mt, index, index);
6483 	trace_ma_op(__func__, &mas);
6484 
6485 	mtree_lock(mt);
6486 	entry = mas_erase(&mas);
6487 	mtree_unlock(mt);
6488 
6489 	return entry;
6490 }
6491 EXPORT_SYMBOL(mtree_erase);
6492 
6493 /**
6494  * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6495  * @mt: The maple tree
6496  *
6497  * Note: Does not handle locking.
6498  */
6499 void __mt_destroy(struct maple_tree *mt)
6500 {
6501 	void *root = mt_root_locked(mt);
6502 
6503 	rcu_assign_pointer(mt->ma_root, NULL);
6504 	if (xa_is_node(root))
6505 		mte_destroy_walk(root, mt);
6506 
6507 	mt->ma_flags = 0;
6508 }
6509 EXPORT_SYMBOL_GPL(__mt_destroy);
6510 
6511 /**
6512  * mtree_destroy() - Destroy a maple tree
6513  * @mt: The maple tree
6514  *
6515  * Frees all resources used by the tree.  Handles locking.
6516  */
6517 void mtree_destroy(struct maple_tree *mt)
6518 {
6519 	mtree_lock(mt);
6520 	__mt_destroy(mt);
6521 	mtree_unlock(mt);
6522 }
6523 EXPORT_SYMBOL(mtree_destroy);
6524 
6525 /**
6526  * mt_find() - Search from the start up until an entry is found.
6527  * @mt: The maple tree
6528  * @index: Pointer which contains the start location of the search
6529  * @max: The maximum value of the search range
6530  *
6531  * Takes RCU read lock internally to protect the search, which does not
6532  * protect the returned pointer after dropping RCU read lock.
6533  * See also: Documentation/core-api/maple_tree.rst
6534  *
6535  * In case that an entry is found @index is updated to point to the next
6536  * possible entry independent whether the found entry is occupying a
6537  * single index or a range if indices.
6538  *
6539  * Return: The entry at or after the @index or %NULL
6540  */
6541 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6542 {
6543 	MA_STATE(mas, mt, *index, *index);
6544 	void *entry;
6545 #ifdef CONFIG_DEBUG_MAPLE_TREE
6546 	unsigned long copy = *index;
6547 #endif
6548 
6549 	trace_ma_read(__func__, &mas);
6550 
6551 	if ((*index) > max)
6552 		return NULL;
6553 
6554 	rcu_read_lock();
6555 retry:
6556 	entry = mas_state_walk(&mas);
6557 	if (mas_is_start(&mas))
6558 		goto retry;
6559 
6560 	if (unlikely(xa_is_zero(entry)))
6561 		entry = NULL;
6562 
6563 	if (entry)
6564 		goto unlock;
6565 
6566 	while (mas_searchable(&mas) && (mas.last < max)) {
6567 		entry = mas_next_entry(&mas, max);
6568 		if (likely(entry && !xa_is_zero(entry)))
6569 			break;
6570 	}
6571 
6572 	if (unlikely(xa_is_zero(entry)))
6573 		entry = NULL;
6574 unlock:
6575 	rcu_read_unlock();
6576 	if (likely(entry)) {
6577 		*index = mas.last + 1;
6578 #ifdef CONFIG_DEBUG_MAPLE_TREE
6579 		if (MT_WARN_ON(mt, (*index) && ((*index) <= copy)))
6580 			pr_err("index not increased! %lx <= %lx\n",
6581 			       *index, copy);
6582 #endif
6583 	}
6584 
6585 	return entry;
6586 }
6587 EXPORT_SYMBOL(mt_find);
6588 
6589 /**
6590  * mt_find_after() - Search from the start up until an entry is found.
6591  * @mt: The maple tree
6592  * @index: Pointer which contains the start location of the search
6593  * @max: The maximum value to check
6594  *
6595  * Same as mt_find() except that it checks @index for 0 before
6596  * searching. If @index == 0, the search is aborted. This covers a wrap
6597  * around of @index to 0 in an iterator loop.
6598  *
6599  * Return: The entry at or after the @index or %NULL
6600  */
6601 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6602 		    unsigned long max)
6603 {
6604 	if (!(*index))
6605 		return NULL;
6606 
6607 	return mt_find(mt, index, max);
6608 }
6609 EXPORT_SYMBOL(mt_find_after);
6610 
6611 #ifdef CONFIG_DEBUG_MAPLE_TREE
6612 atomic_t maple_tree_tests_run;
6613 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6614 atomic_t maple_tree_tests_passed;
6615 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6616 
6617 #ifndef __KERNEL__
6618 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6619 void mt_set_non_kernel(unsigned int val)
6620 {
6621 	kmem_cache_set_non_kernel(maple_node_cache, val);
6622 }
6623 
6624 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6625 unsigned long mt_get_alloc_size(void)
6626 {
6627 	return kmem_cache_get_alloc(maple_node_cache);
6628 }
6629 
6630 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6631 void mt_zero_nr_tallocated(void)
6632 {
6633 	kmem_cache_zero_nr_tallocated(maple_node_cache);
6634 }
6635 
6636 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6637 unsigned int mt_nr_tallocated(void)
6638 {
6639 	return kmem_cache_nr_tallocated(maple_node_cache);
6640 }
6641 
6642 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6643 unsigned int mt_nr_allocated(void)
6644 {
6645 	return kmem_cache_nr_allocated(maple_node_cache);
6646 }
6647 
6648 /*
6649  * mas_dead_node() - Check if the maple state is pointing to a dead node.
6650  * @mas: The maple state
6651  * @index: The index to restore in @mas.
6652  *
6653  * Used in test code.
6654  * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6655  */
6656 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6657 {
6658 	if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6659 		return 0;
6660 
6661 	if (likely(!mte_dead_node(mas->node)))
6662 		return 0;
6663 
6664 	mas_rewalk(mas, index);
6665 	return 1;
6666 }
6667 
6668 void mt_cache_shrink(void)
6669 {
6670 }
6671 #else
6672 /*
6673  * mt_cache_shrink() - For testing, don't use this.
6674  *
6675  * Certain testcases can trigger an OOM when combined with other memory
6676  * debugging configuration options.  This function is used to reduce the
6677  * possibility of an out of memory even due to kmem_cache objects remaining
6678  * around for longer than usual.
6679  */
6680 void mt_cache_shrink(void)
6681 {
6682 	kmem_cache_shrink(maple_node_cache);
6683 
6684 }
6685 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6686 
6687 #endif /* not defined __KERNEL__ */
6688 /*
6689  * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6690  * @mas: The maple state
6691  * @offset: The offset into the slot array to fetch.
6692  *
6693  * Return: The entry stored at @offset.
6694  */
6695 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6696 		unsigned char offset)
6697 {
6698 	return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6699 			offset);
6700 }
6701 
6702 /* Depth first search, post-order */
6703 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6704 {
6705 
6706 	struct maple_enode *p = MAS_NONE, *mn = mas->node;
6707 	unsigned long p_min, p_max;
6708 
6709 	mas_next_node(mas, mas_mn(mas), max);
6710 	if (!mas_is_none(mas))
6711 		return;
6712 
6713 	if (mte_is_root(mn))
6714 		return;
6715 
6716 	mas->node = mn;
6717 	mas_ascend(mas);
6718 	do {
6719 		p = mas->node;
6720 		p_min = mas->min;
6721 		p_max = mas->max;
6722 		mas_prev_node(mas, 0);
6723 	} while (!mas_is_none(mas));
6724 
6725 	mas->node = p;
6726 	mas->max = p_max;
6727 	mas->min = p_min;
6728 }
6729 
6730 /* Tree validations */
6731 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6732 		unsigned long min, unsigned long max, unsigned int depth,
6733 		enum mt_dump_format format);
6734 static void mt_dump_range(unsigned long min, unsigned long max,
6735 			  unsigned int depth, enum mt_dump_format format)
6736 {
6737 	static const char spaces[] = "                                ";
6738 
6739 	switch(format) {
6740 	case mt_dump_hex:
6741 		if (min == max)
6742 			pr_info("%.*s%lx: ", depth * 2, spaces, min);
6743 		else
6744 			pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
6745 		break;
6746 	default:
6747 	case mt_dump_dec:
6748 		if (min == max)
6749 			pr_info("%.*s%lu: ", depth * 2, spaces, min);
6750 		else
6751 			pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6752 	}
6753 }
6754 
6755 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6756 			  unsigned int depth, enum mt_dump_format format)
6757 {
6758 	mt_dump_range(min, max, depth, format);
6759 
6760 	if (xa_is_value(entry))
6761 		pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6762 				xa_to_value(entry), entry);
6763 	else if (xa_is_zero(entry))
6764 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
6765 	else if (mt_is_reserved(entry))
6766 		pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6767 	else
6768 		pr_cont("%p\n", entry);
6769 }
6770 
6771 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6772 		unsigned long min, unsigned long max, unsigned int depth,
6773 		enum mt_dump_format format)
6774 {
6775 	struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6776 	bool leaf = mte_is_leaf(entry);
6777 	unsigned long first = min;
6778 	int i;
6779 
6780 	pr_cont(" contents: ");
6781 	for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) {
6782 		switch(format) {
6783 		case mt_dump_hex:
6784 			pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
6785 			break;
6786 		default:
6787 		case mt_dump_dec:
6788 			pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6789 		}
6790 	}
6791 	pr_cont("%p\n", node->slot[i]);
6792 	for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6793 		unsigned long last = max;
6794 
6795 		if (i < (MAPLE_RANGE64_SLOTS - 1))
6796 			last = node->pivot[i];
6797 		else if (!node->slot[i] && max != mt_node_max(entry))
6798 			break;
6799 		if (last == 0 && i > 0)
6800 			break;
6801 		if (leaf)
6802 			mt_dump_entry(mt_slot(mt, node->slot, i),
6803 					first, last, depth + 1, format);
6804 		else if (node->slot[i])
6805 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6806 					first, last, depth + 1, format);
6807 
6808 		if (last == max)
6809 			break;
6810 		if (last > max) {
6811 			switch(format) {
6812 			case mt_dump_hex:
6813 				pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
6814 					node, last, max, i);
6815 				break;
6816 			default:
6817 			case mt_dump_dec:
6818 				pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6819 					node, last, max, i);
6820 			}
6821 		}
6822 		first = last + 1;
6823 	}
6824 }
6825 
6826 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6827 	unsigned long min, unsigned long max, unsigned int depth,
6828 	enum mt_dump_format format)
6829 {
6830 	struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6831 	bool leaf = mte_is_leaf(entry);
6832 	unsigned long first = min;
6833 	int i;
6834 
6835 	pr_cont(" contents: ");
6836 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6837 		pr_cont("%lu ", node->gap[i]);
6838 	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6839 	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6840 		pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6841 	pr_cont("%p\n", node->slot[i]);
6842 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6843 		unsigned long last = max;
6844 
6845 		if (i < (MAPLE_ARANGE64_SLOTS - 1))
6846 			last = node->pivot[i];
6847 		else if (!node->slot[i])
6848 			break;
6849 		if (last == 0 && i > 0)
6850 			break;
6851 		if (leaf)
6852 			mt_dump_entry(mt_slot(mt, node->slot, i),
6853 					first, last, depth + 1, format);
6854 		else if (node->slot[i])
6855 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6856 					first, last, depth + 1, format);
6857 
6858 		if (last == max)
6859 			break;
6860 		if (last > max) {
6861 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6862 					node, last, max, i);
6863 			break;
6864 		}
6865 		first = last + 1;
6866 	}
6867 }
6868 
6869 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6870 		unsigned long min, unsigned long max, unsigned int depth,
6871 		enum mt_dump_format format)
6872 {
6873 	struct maple_node *node = mte_to_node(entry);
6874 	unsigned int type = mte_node_type(entry);
6875 	unsigned int i;
6876 
6877 	mt_dump_range(min, max, depth, format);
6878 
6879 	pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6880 			node ? node->parent : NULL);
6881 	switch (type) {
6882 	case maple_dense:
6883 		pr_cont("\n");
6884 		for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6885 			if (min + i > max)
6886 				pr_cont("OUT OF RANGE: ");
6887 			mt_dump_entry(mt_slot(mt, node->slot, i),
6888 					min + i, min + i, depth, format);
6889 		}
6890 		break;
6891 	case maple_leaf_64:
6892 	case maple_range_64:
6893 		mt_dump_range64(mt, entry, min, max, depth, format);
6894 		break;
6895 	case maple_arange_64:
6896 		mt_dump_arange64(mt, entry, min, max, depth, format);
6897 		break;
6898 
6899 	default:
6900 		pr_cont(" UNKNOWN TYPE\n");
6901 	}
6902 }
6903 
6904 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format)
6905 {
6906 	void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6907 
6908 	pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6909 		 mt, mt->ma_flags, mt_height(mt), entry);
6910 	if (!xa_is_node(entry))
6911 		mt_dump_entry(entry, 0, 0, 0, format);
6912 	else if (entry)
6913 		mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format);
6914 }
6915 EXPORT_SYMBOL_GPL(mt_dump);
6916 
6917 /*
6918  * Calculate the maximum gap in a node and check if that's what is reported in
6919  * the parent (unless root).
6920  */
6921 static void mas_validate_gaps(struct ma_state *mas)
6922 {
6923 	struct maple_enode *mte = mas->node;
6924 	struct maple_node *p_mn, *node = mte_to_node(mte);
6925 	enum maple_type mt = mte_node_type(mas->node);
6926 	unsigned long gap = 0, max_gap = 0;
6927 	unsigned long p_end, p_start = mas->min;
6928 	unsigned char p_slot, offset;
6929 	unsigned long *gaps = NULL;
6930 	unsigned long *pivots = ma_pivots(node, mt);
6931 	unsigned int i;
6932 
6933 	if (ma_is_dense(mt)) {
6934 		for (i = 0; i < mt_slot_count(mte); i++) {
6935 			if (mas_get_slot(mas, i)) {
6936 				if (gap > max_gap)
6937 					max_gap = gap;
6938 				gap = 0;
6939 				continue;
6940 			}
6941 			gap++;
6942 		}
6943 		goto counted;
6944 	}
6945 
6946 	gaps = ma_gaps(node, mt);
6947 	for (i = 0; i < mt_slot_count(mte); i++) {
6948 		p_end = mas_safe_pivot(mas, pivots, i, mt);
6949 
6950 		if (!gaps) {
6951 			if (!mas_get_slot(mas, i))
6952 				gap = p_end - p_start + 1;
6953 		} else {
6954 			void *entry = mas_get_slot(mas, i);
6955 
6956 			gap = gaps[i];
6957 			MT_BUG_ON(mas->tree, !entry);
6958 
6959 			if (gap > p_end - p_start + 1) {
6960 				pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
6961 				       mas_mn(mas), i, gap, p_end, p_start,
6962 				       p_end - p_start + 1);
6963 				MT_BUG_ON(mas->tree, gap > p_end - p_start + 1);
6964 			}
6965 		}
6966 
6967 		if (gap > max_gap)
6968 			max_gap = gap;
6969 
6970 		p_start = p_end + 1;
6971 		if (p_end >= mas->max)
6972 			break;
6973 	}
6974 
6975 counted:
6976 	if (mt == maple_arange_64) {
6977 		offset = ma_meta_gap(node, mt);
6978 		if (offset > i) {
6979 			pr_err("gap offset %p[%u] is invalid\n", node, offset);
6980 			MT_BUG_ON(mas->tree, 1);
6981 		}
6982 
6983 		if (gaps[offset] != max_gap) {
6984 			pr_err("gap %p[%u] is not the largest gap %lu\n",
6985 			       node, offset, max_gap);
6986 			MT_BUG_ON(mas->tree, 1);
6987 		}
6988 
6989 		MT_BUG_ON(mas->tree, !gaps);
6990 		for (i++ ; i < mt_slot_count(mte); i++) {
6991 			if (gaps[i] != 0) {
6992 				pr_err("gap %p[%u] beyond node limit != 0\n",
6993 				       node, i);
6994 				MT_BUG_ON(mas->tree, 1);
6995 			}
6996 		}
6997 	}
6998 
6999 	if (mte_is_root(mte))
7000 		return;
7001 
7002 	p_slot = mte_parent_slot(mas->node);
7003 	p_mn = mte_parent(mte);
7004 	MT_BUG_ON(mas->tree, max_gap > mas->max);
7005 	if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) {
7006 		pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7007 		mt_dump(mas->tree, mt_dump_hex);
7008 		MT_BUG_ON(mas->tree, 1);
7009 	}
7010 }
7011 
7012 static void mas_validate_parent_slot(struct ma_state *mas)
7013 {
7014 	struct maple_node *parent;
7015 	struct maple_enode *node;
7016 	enum maple_type p_type;
7017 	unsigned char p_slot;
7018 	void __rcu **slots;
7019 	int i;
7020 
7021 	if (mte_is_root(mas->node))
7022 		return;
7023 
7024 	p_slot = mte_parent_slot(mas->node);
7025 	p_type = mas_parent_type(mas, mas->node);
7026 	parent = mte_parent(mas->node);
7027 	slots = ma_slots(parent, p_type);
7028 	MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7029 
7030 	/* Check prev/next parent slot for duplicate node entry */
7031 
7032 	for (i = 0; i < mt_slots[p_type]; i++) {
7033 		node = mas_slot(mas, slots, i);
7034 		if (i == p_slot) {
7035 			if (node != mas->node)
7036 				pr_err("parent %p[%u] does not have %p\n",
7037 					parent, i, mas_mn(mas));
7038 			MT_BUG_ON(mas->tree, node != mas->node);
7039 		} else if (node == mas->node) {
7040 			pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7041 			       mas_mn(mas), parent, i, p_slot);
7042 			MT_BUG_ON(mas->tree, node == mas->node);
7043 		}
7044 	}
7045 }
7046 
7047 static void mas_validate_child_slot(struct ma_state *mas)
7048 {
7049 	enum maple_type type = mte_node_type(mas->node);
7050 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7051 	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7052 	struct maple_enode *child;
7053 	unsigned char i;
7054 
7055 	if (mte_is_leaf(mas->node))
7056 		return;
7057 
7058 	for (i = 0; i < mt_slots[type]; i++) {
7059 		child = mas_slot(mas, slots, i);
7060 
7061 		if (!child) {
7062 			pr_err("Non-leaf node lacks child at %p[%u]\n",
7063 			       mas_mn(mas), i);
7064 			MT_BUG_ON(mas->tree, 1);
7065 		}
7066 
7067 		if (mte_parent_slot(child) != i) {
7068 			pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7069 			       mas_mn(mas), i, mte_to_node(child),
7070 			       mte_parent_slot(child));
7071 			MT_BUG_ON(mas->tree, 1);
7072 		}
7073 
7074 		if (mte_parent(child) != mte_to_node(mas->node)) {
7075 			pr_err("child %p has parent %p not %p\n",
7076 			       mte_to_node(child), mte_parent(child),
7077 			       mte_to_node(mas->node));
7078 			MT_BUG_ON(mas->tree, 1);
7079 		}
7080 
7081 		if (i < mt_pivots[type] && pivots[i] == mas->max)
7082 			break;
7083 	}
7084 }
7085 
7086 /*
7087  * Validate all pivots are within mas->min and mas->max, check metadata ends
7088  * where the maximum ends and ensure there is no slots or pivots set outside of
7089  * the end of the data.
7090  */
7091 static void mas_validate_limits(struct ma_state *mas)
7092 {
7093 	int i;
7094 	unsigned long prev_piv = 0;
7095 	enum maple_type type = mte_node_type(mas->node);
7096 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7097 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7098 
7099 	for (i = 0; i < mt_slots[type]; i++) {
7100 		unsigned long piv;
7101 
7102 		piv = mas_safe_pivot(mas, pivots, i, type);
7103 
7104 		if (!piv && (i != 0)) {
7105 			pr_err("Missing node limit pivot at %p[%u]",
7106 			       mas_mn(mas), i);
7107 			MAS_WARN_ON(mas, 1);
7108 		}
7109 
7110 		if (prev_piv > piv) {
7111 			pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7112 				mas_mn(mas), i, piv, prev_piv);
7113 			MAS_WARN_ON(mas, piv < prev_piv);
7114 		}
7115 
7116 		if (piv < mas->min) {
7117 			pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7118 				piv, mas->min);
7119 			MAS_WARN_ON(mas, piv < mas->min);
7120 		}
7121 		if (piv > mas->max) {
7122 			pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7123 				piv, mas->max);
7124 			MAS_WARN_ON(mas, piv > mas->max);
7125 		}
7126 		prev_piv = piv;
7127 		if (piv == mas->max)
7128 			break;
7129 	}
7130 
7131 	if (mas_data_end(mas) != i) {
7132 		pr_err("node%p: data_end %u != the last slot offset %u\n",
7133 		       mas_mn(mas), mas_data_end(mas), i);
7134 		MT_BUG_ON(mas->tree, 1);
7135 	}
7136 
7137 	for (i += 1; i < mt_slots[type]; i++) {
7138 		void *entry = mas_slot(mas, slots, i);
7139 
7140 		if (entry && (i != mt_slots[type] - 1)) {
7141 			pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7142 			       i, entry);
7143 			MT_BUG_ON(mas->tree, entry != NULL);
7144 		}
7145 
7146 		if (i < mt_pivots[type]) {
7147 			unsigned long piv = pivots[i];
7148 
7149 			if (!piv)
7150 				continue;
7151 
7152 			pr_err("%p[%u] should not have piv %lu\n",
7153 			       mas_mn(mas), i, piv);
7154 			MAS_WARN_ON(mas, i < mt_pivots[type] - 1);
7155 		}
7156 	}
7157 }
7158 
7159 static void mt_validate_nulls(struct maple_tree *mt)
7160 {
7161 	void *entry, *last = (void *)1;
7162 	unsigned char offset = 0;
7163 	void __rcu **slots;
7164 	MA_STATE(mas, mt, 0, 0);
7165 
7166 	mas_start(&mas);
7167 	if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7168 		return;
7169 
7170 	while (!mte_is_leaf(mas.node))
7171 		mas_descend(&mas);
7172 
7173 	slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7174 	do {
7175 		entry = mas_slot(&mas, slots, offset);
7176 		if (!last && !entry) {
7177 			pr_err("Sequential nulls end at %p[%u]\n",
7178 				mas_mn(&mas), offset);
7179 		}
7180 		MT_BUG_ON(mt, !last && !entry);
7181 		last = entry;
7182 		if (offset == mas_data_end(&mas)) {
7183 			mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7184 			if (mas_is_none(&mas))
7185 				return;
7186 			offset = 0;
7187 			slots = ma_slots(mte_to_node(mas.node),
7188 					 mte_node_type(mas.node));
7189 		} else {
7190 			offset++;
7191 		}
7192 
7193 	} while (!mas_is_none(&mas));
7194 }
7195 
7196 /*
7197  * validate a maple tree by checking:
7198  * 1. The limits (pivots are within mas->min to mas->max)
7199  * 2. The gap is correctly set in the parents
7200  */
7201 void mt_validate(struct maple_tree *mt)
7202 {
7203 	unsigned char end;
7204 
7205 	MA_STATE(mas, mt, 0, 0);
7206 	rcu_read_lock();
7207 	mas_start(&mas);
7208 	if (!mas_searchable(&mas))
7209 		goto done;
7210 
7211 	while (!mte_is_leaf(mas.node))
7212 		mas_descend(&mas);
7213 
7214 	while (!mas_is_none(&mas)) {
7215 		MAS_WARN_ON(&mas, mte_dead_node(mas.node));
7216 		end = mas_data_end(&mas);
7217 		if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
7218 				(mas.max != ULONG_MAX))) {
7219 			pr_err("Invalid size %u of %p\n", end, mas_mn(&mas));
7220 		}
7221 
7222 		mas_validate_parent_slot(&mas);
7223 		mas_validate_limits(&mas);
7224 		mas_validate_child_slot(&mas);
7225 		if (mt_is_alloc(mt))
7226 			mas_validate_gaps(&mas);
7227 		mas_dfs_postorder(&mas, ULONG_MAX);
7228 	}
7229 	mt_validate_nulls(mt);
7230 done:
7231 	rcu_read_unlock();
7232 
7233 }
7234 EXPORT_SYMBOL_GPL(mt_validate);
7235 
7236 void mas_dump(const struct ma_state *mas)
7237 {
7238 	pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
7239 	if (mas_is_none(mas))
7240 		pr_err("(MAS_NONE) ");
7241 	else if (mas_is_ptr(mas))
7242 		pr_err("(MAS_ROOT) ");
7243 	else if (mas_is_start(mas))
7244 		 pr_err("(MAS_START) ");
7245 	else if (mas_is_paused(mas))
7246 		pr_err("(MAS_PAUSED) ");
7247 
7248 	pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
7249 	pr_err("     min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
7250 	       mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
7251 	if (mas->index > mas->last)
7252 		pr_err("Check index & last\n");
7253 }
7254 EXPORT_SYMBOL_GPL(mas_dump);
7255 
7256 void mas_wr_dump(const struct ma_wr_state *wr_mas)
7257 {
7258 	pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
7259 	       wr_mas->node, wr_mas->r_min, wr_mas->r_max);
7260 	pr_err("        type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
7261 	       wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
7262 	       wr_mas->end_piv);
7263 }
7264 EXPORT_SYMBOL_GPL(mas_wr_dump);
7265 
7266 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7267