xref: /openbmc/linux/lib/maple_tree.c (revision 3ea31e66)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Maple Tree implementation
4  * Copyright (c) 2018-2022 Oracle Corporation
5  * Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
6  *	    Matthew Wilcox <willy@infradead.org>
7  */
8 
9 /*
10  * DOC: Interesting implementation details of the Maple Tree
11  *
12  * Each node type has a number of slots for entries and a number of slots for
13  * pivots.  In the case of dense nodes, the pivots are implied by the position
14  * and are simply the slot index + the minimum of the node.
15  *
16  * In regular B-Tree terms, pivots are called keys.  The term pivot is used to
17  * indicate that the tree is specifying ranges,  Pivots may appear in the
18  * subtree with an entry attached to the value where as keys are unique to a
19  * specific position of a B-tree.  Pivot values are inclusive of the slot with
20  * the same index.
21  *
22  *
23  * The following illustrates the layout of a range64 nodes slots and pivots.
24  *
25  *
26  *  Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 |
27  *           ┬   ┬   ┬   ┬     ┬    ┬    ┬    ┬    ┬
28  *           │   │   │   │     │    │    │    │    └─ Implied maximum
29  *           │   │   │   │     │    │    │    └─ Pivot 14
30  *           │   │   │   │     │    │    └─ Pivot 13
31  *           │   │   │   │     │    └─ Pivot 12
32  *           │   │   │   │     └─ Pivot 11
33  *           │   │   │   └─ Pivot 2
34  *           │   │   └─ Pivot 1
35  *           │   └─ Pivot 0
36  *           └─  Implied minimum
37  *
38  * Slot contents:
39  *  Internal (non-leaf) nodes contain pointers to other nodes.
40  *  Leaf nodes contain entries.
41  *
42  * The location of interest is often referred to as an offset.  All offsets have
43  * a slot, but the last offset has an implied pivot from the node above (or
44  * UINT_MAX for the root node.
45  *
46  * Ranges complicate certain write activities.  When modifying any of
47  * the B-tree variants, it is known that one entry will either be added or
48  * deleted.  When modifying the Maple Tree, one store operation may overwrite
49  * the entire data set, or one half of the tree, or the middle half of the tree.
50  *
51  */
52 
53 
54 #include <linux/maple_tree.h>
55 #include <linux/xarray.h>
56 #include <linux/types.h>
57 #include <linux/export.h>
58 #include <linux/slab.h>
59 #include <linux/limits.h>
60 #include <asm/barrier.h>
61 
62 #define CREATE_TRACE_POINTS
63 #include <trace/events/maple_tree.h>
64 
65 #define MA_ROOT_PARENT 1
66 
67 /*
68  * Maple state flags
69  * * MA_STATE_BULK		- Bulk insert mode
70  * * MA_STATE_REBALANCE		- Indicate a rebalance during bulk insert
71  * * MA_STATE_PREALLOC		- Preallocated nodes, WARN_ON allocation
72  */
73 #define MA_STATE_BULK		1
74 #define MA_STATE_REBALANCE	2
75 #define MA_STATE_PREALLOC	4
76 
77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x))
78 #define ma_mnode_ptr(x) ((struct maple_node *)(x))
79 #define ma_enode_ptr(x) ((struct maple_enode *)(x))
80 static struct kmem_cache *maple_node_cache;
81 
82 #ifdef CONFIG_DEBUG_MAPLE_TREE
83 static const unsigned long mt_max[] = {
84 	[maple_dense]		= MAPLE_NODE_SLOTS,
85 	[maple_leaf_64]		= ULONG_MAX,
86 	[maple_range_64]	= ULONG_MAX,
87 	[maple_arange_64]	= ULONG_MAX,
88 };
89 #define mt_node_max(x) mt_max[mte_node_type(x)]
90 #endif
91 
92 static const unsigned char mt_slots[] = {
93 	[maple_dense]		= MAPLE_NODE_SLOTS,
94 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS,
95 	[maple_range_64]	= MAPLE_RANGE64_SLOTS,
96 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS,
97 };
98 #define mt_slot_count(x) mt_slots[mte_node_type(x)]
99 
100 static const unsigned char mt_pivots[] = {
101 	[maple_dense]		= 0,
102 	[maple_leaf_64]		= MAPLE_RANGE64_SLOTS - 1,
103 	[maple_range_64]	= MAPLE_RANGE64_SLOTS - 1,
104 	[maple_arange_64]	= MAPLE_ARANGE64_SLOTS - 1,
105 };
106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)]
107 
108 static const unsigned char mt_min_slots[] = {
109 	[maple_dense]		= MAPLE_NODE_SLOTS / 2,
110 	[maple_leaf_64]		= (MAPLE_RANGE64_SLOTS / 2) - 2,
111 	[maple_range_64]	= (MAPLE_RANGE64_SLOTS / 2) - 2,
112 	[maple_arange_64]	= (MAPLE_ARANGE64_SLOTS / 2) - 1,
113 };
114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)]
115 
116 #define MAPLE_BIG_NODE_SLOTS	(MAPLE_RANGE64_SLOTS * 2 + 2)
117 #define MAPLE_BIG_NODE_GAPS	(MAPLE_ARANGE64_SLOTS * 2 + 1)
118 
119 struct maple_big_node {
120 	struct maple_pnode *parent;
121 	unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1];
122 	union {
123 		struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS];
124 		struct {
125 			unsigned long padding[MAPLE_BIG_NODE_GAPS];
126 			unsigned long gap[MAPLE_BIG_NODE_GAPS];
127 		};
128 	};
129 	unsigned char b_end;
130 	enum maple_type type;
131 };
132 
133 /*
134  * The maple_subtree_state is used to build a tree to replace a segment of an
135  * existing tree in a more atomic way.  Any walkers of the older tree will hit a
136  * dead node and restart on updates.
137  */
138 struct maple_subtree_state {
139 	struct ma_state *orig_l;	/* Original left side of subtree */
140 	struct ma_state *orig_r;	/* Original right side of subtree */
141 	struct ma_state *l;		/* New left side of subtree */
142 	struct ma_state *m;		/* New middle of subtree (rare) */
143 	struct ma_state *r;		/* New right side of subtree */
144 	struct ma_topiary *free;	/* nodes to be freed */
145 	struct ma_topiary *destroy;	/* Nodes to be destroyed (walked and freed) */
146 	struct maple_big_node *bn;
147 };
148 
149 #ifdef CONFIG_KASAN_STACK
150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */
151 #define noinline_for_kasan noinline_for_stack
152 #else
153 #define noinline_for_kasan inline
154 #endif
155 
156 /* Functions */
157 static inline struct maple_node *mt_alloc_one(gfp_t gfp)
158 {
159 	return kmem_cache_alloc(maple_node_cache, gfp);
160 }
161 
162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
163 {
164 	return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
165 }
166 
167 static inline void mt_free_bulk(size_t size, void __rcu **nodes)
168 {
169 	kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
170 }
171 
172 static void mt_free_rcu(struct rcu_head *head)
173 {
174 	struct maple_node *node = container_of(head, struct maple_node, rcu);
175 
176 	kmem_cache_free(maple_node_cache, node);
177 }
178 
179 /*
180  * ma_free_rcu() - Use rcu callback to free a maple node
181  * @node: The node to free
182  *
183  * The maple tree uses the parent pointer to indicate this node is no longer in
184  * use and will be freed.
185  */
186 static void ma_free_rcu(struct maple_node *node)
187 {
188 	WARN_ON(node->parent != ma_parent_ptr(node));
189 	call_rcu(&node->rcu, mt_free_rcu);
190 }
191 
192 static void mas_set_height(struct ma_state *mas)
193 {
194 	unsigned int new_flags = mas->tree->ma_flags;
195 
196 	new_flags &= ~MT_FLAGS_HEIGHT_MASK;
197 	BUG_ON(mas->depth > MAPLE_HEIGHT_MAX);
198 	new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET;
199 	mas->tree->ma_flags = new_flags;
200 }
201 
202 static unsigned int mas_mt_height(struct ma_state *mas)
203 {
204 	return mt_height(mas->tree);
205 }
206 
207 static inline enum maple_type mte_node_type(const struct maple_enode *entry)
208 {
209 	return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
210 		MAPLE_NODE_TYPE_MASK;
211 }
212 
213 static inline bool ma_is_dense(const enum maple_type type)
214 {
215 	return type < maple_leaf_64;
216 }
217 
218 static inline bool ma_is_leaf(const enum maple_type type)
219 {
220 	return type < maple_range_64;
221 }
222 
223 static inline bool mte_is_leaf(const struct maple_enode *entry)
224 {
225 	return ma_is_leaf(mte_node_type(entry));
226 }
227 
228 /*
229  * We also reserve values with the bottom two bits set to '10' which are
230  * below 4096
231  */
232 static inline bool mt_is_reserved(const void *entry)
233 {
234 	return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
235 		xa_is_internal(entry);
236 }
237 
238 static inline void mas_set_err(struct ma_state *mas, long err)
239 {
240 	mas->node = MA_ERROR(err);
241 }
242 
243 static inline bool mas_is_ptr(struct ma_state *mas)
244 {
245 	return mas->node == MAS_ROOT;
246 }
247 
248 static inline bool mas_is_start(struct ma_state *mas)
249 {
250 	return mas->node == MAS_START;
251 }
252 
253 bool mas_is_err(struct ma_state *mas)
254 {
255 	return xa_is_err(mas->node);
256 }
257 
258 static inline bool mas_searchable(struct ma_state *mas)
259 {
260 	if (mas_is_none(mas))
261 		return false;
262 
263 	if (mas_is_ptr(mas))
264 		return false;
265 
266 	return true;
267 }
268 
269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
270 {
271 	return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
272 }
273 
274 /*
275  * mte_to_mat() - Convert a maple encoded node to a maple topiary node.
276  * @entry: The maple encoded node
277  *
278  * Return: a maple topiary pointer
279  */
280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry)
281 {
282 	return (struct maple_topiary *)
283 		((unsigned long)entry & ~MAPLE_NODE_MASK);
284 }
285 
286 /*
287  * mas_mn() - Get the maple state node.
288  * @mas: The maple state
289  *
290  * Return: the maple node (not encoded - bare pointer).
291  */
292 static inline struct maple_node *mas_mn(const struct ma_state *mas)
293 {
294 	return mte_to_node(mas->node);
295 }
296 
297 /*
298  * mte_set_node_dead() - Set a maple encoded node as dead.
299  * @mn: The maple encoded node.
300  */
301 static inline void mte_set_node_dead(struct maple_enode *mn)
302 {
303 	mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn));
304 	smp_wmb(); /* Needed for RCU */
305 }
306 
307 /* Bit 1 indicates the root is a node */
308 #define MAPLE_ROOT_NODE			0x02
309 /* maple_type stored bit 3-6 */
310 #define MAPLE_ENODE_TYPE_SHIFT		0x03
311 /* Bit 2 means a NULL somewhere below */
312 #define MAPLE_ENODE_NULL		0x04
313 
314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node,
315 					     enum maple_type type)
316 {
317 	return (void *)((unsigned long)node |
318 			(type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL);
319 }
320 
321 static inline void *mte_mk_root(const struct maple_enode *node)
322 {
323 	return (void *)((unsigned long)node | MAPLE_ROOT_NODE);
324 }
325 
326 static inline void *mte_safe_root(const struct maple_enode *node)
327 {
328 	return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE);
329 }
330 
331 static inline void *mte_set_full(const struct maple_enode *node)
332 {
333 	return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL);
334 }
335 
336 static inline void *mte_clear_full(const struct maple_enode *node)
337 {
338 	return (void *)((unsigned long)node | MAPLE_ENODE_NULL);
339 }
340 
341 static inline bool mte_has_null(const struct maple_enode *node)
342 {
343 	return (unsigned long)node & MAPLE_ENODE_NULL;
344 }
345 
346 static inline bool ma_is_root(struct maple_node *node)
347 {
348 	return ((unsigned long)node->parent & MA_ROOT_PARENT);
349 }
350 
351 static inline bool mte_is_root(const struct maple_enode *node)
352 {
353 	return ma_is_root(mte_to_node(node));
354 }
355 
356 static inline bool mas_is_root_limits(const struct ma_state *mas)
357 {
358 	return !mas->min && mas->max == ULONG_MAX;
359 }
360 
361 static inline bool mt_is_alloc(struct maple_tree *mt)
362 {
363 	return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
364 }
365 
366 /*
367  * The Parent Pointer
368  * Excluding root, the parent pointer is 256B aligned like all other tree nodes.
369  * When storing a 32 or 64 bit values, the offset can fit into 5 bits.  The 16
370  * bit values need an extra bit to store the offset.  This extra bit comes from
371  * a reuse of the last bit in the node type.  This is possible by using bit 1 to
372  * indicate if bit 2 is part of the type or the slot.
373  *
374  * Note types:
375  *  0x??1 = Root
376  *  0x?00 = 16 bit nodes
377  *  0x010 = 32 bit nodes
378  *  0x110 = 64 bit nodes
379  *
380  * Slot size and alignment
381  *  0b??1 : Root
382  *  0b?00 : 16 bit values, type in 0-1, slot in 2-7
383  *  0b010 : 32 bit values, type in 0-2, slot in 3-7
384  *  0b110 : 64 bit values, type in 0-2, slot in 3-7
385  */
386 
387 #define MAPLE_PARENT_ROOT		0x01
388 
389 #define MAPLE_PARENT_SLOT_SHIFT		0x03
390 #define MAPLE_PARENT_SLOT_MASK		0xF8
391 
392 #define MAPLE_PARENT_16B_SLOT_SHIFT	0x02
393 #define MAPLE_PARENT_16B_SLOT_MASK	0xFC
394 
395 #define MAPLE_PARENT_RANGE64		0x06
396 #define MAPLE_PARENT_RANGE32		0x04
397 #define MAPLE_PARENT_NOT_RANGE16	0x02
398 
399 /*
400  * mte_parent_shift() - Get the parent shift for the slot storage.
401  * @parent: The parent pointer cast as an unsigned long
402  * Return: The shift into that pointer to the star to of the slot
403  */
404 static inline unsigned long mte_parent_shift(unsigned long parent)
405 {
406 	/* Note bit 1 == 0 means 16B */
407 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
408 		return MAPLE_PARENT_SLOT_SHIFT;
409 
410 	return MAPLE_PARENT_16B_SLOT_SHIFT;
411 }
412 
413 /*
414  * mte_parent_slot_mask() - Get the slot mask for the parent.
415  * @parent: The parent pointer cast as an unsigned long.
416  * Return: The slot mask for that parent.
417  */
418 static inline unsigned long mte_parent_slot_mask(unsigned long parent)
419 {
420 	/* Note bit 1 == 0 means 16B */
421 	if (likely(parent & MAPLE_PARENT_NOT_RANGE16))
422 		return MAPLE_PARENT_SLOT_MASK;
423 
424 	return MAPLE_PARENT_16B_SLOT_MASK;
425 }
426 
427 /*
428  * mas_parent_enum() - Return the maple_type of the parent from the stored
429  * parent type.
430  * @mas: The maple state
431  * @node: The maple_enode to extract the parent's enum
432  * Return: The node->parent maple_type
433  */
434 static inline
435 enum maple_type mte_parent_enum(struct maple_enode *p_enode,
436 				struct maple_tree *mt)
437 {
438 	unsigned long p_type;
439 
440 	p_type = (unsigned long)p_enode;
441 	if (p_type & MAPLE_PARENT_ROOT)
442 		return 0; /* Validated in the caller. */
443 
444 	p_type &= MAPLE_NODE_MASK;
445 	p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type));
446 
447 	switch (p_type) {
448 	case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */
449 		if (mt_is_alloc(mt))
450 			return maple_arange_64;
451 		return maple_range_64;
452 	}
453 
454 	return 0;
455 }
456 
457 static inline
458 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode)
459 {
460 	return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree);
461 }
462 
463 /*
464  * mte_set_parent() - Set the parent node and encode the slot
465  * @enode: The encoded maple node.
466  * @parent: The encoded maple node that is the parent of @enode.
467  * @slot: The slot that @enode resides in @parent.
468  *
469  * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the
470  * parent type.
471  */
472 static inline
473 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent,
474 		    unsigned char slot)
475 {
476 	unsigned long val = (unsigned long)parent;
477 	unsigned long shift;
478 	unsigned long type;
479 	enum maple_type p_type = mte_node_type(parent);
480 
481 	BUG_ON(p_type == maple_dense);
482 	BUG_ON(p_type == maple_leaf_64);
483 
484 	switch (p_type) {
485 	case maple_range_64:
486 	case maple_arange_64:
487 		shift = MAPLE_PARENT_SLOT_SHIFT;
488 		type = MAPLE_PARENT_RANGE64;
489 		break;
490 	default:
491 	case maple_dense:
492 	case maple_leaf_64:
493 		shift = type = 0;
494 		break;
495 	}
496 
497 	val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */
498 	val |= (slot << shift) | type;
499 	mte_to_node(enode)->parent = ma_parent_ptr(val);
500 }
501 
502 /*
503  * mte_parent_slot() - get the parent slot of @enode.
504  * @enode: The encoded maple node.
505  *
506  * Return: The slot in the parent node where @enode resides.
507  */
508 static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
509 {
510 	unsigned long val = (unsigned long)mte_to_node(enode)->parent;
511 
512 	if (val & MA_ROOT_PARENT)
513 		return 0;
514 
515 	/*
516 	 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost
517 	 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT
518 	 */
519 	return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val);
520 }
521 
522 /*
523  * mte_parent() - Get the parent of @node.
524  * @node: The encoded maple node.
525  *
526  * Return: The parent maple node.
527  */
528 static inline struct maple_node *mte_parent(const struct maple_enode *enode)
529 {
530 	return (void *)((unsigned long)
531 			(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
532 }
533 
534 /*
535  * ma_dead_node() - check if the @enode is dead.
536  * @enode: The encoded maple node
537  *
538  * Return: true if dead, false otherwise.
539  */
540 static inline bool ma_dead_node(const struct maple_node *node)
541 {
542 	struct maple_node *parent;
543 
544 	/* Do not reorder reads from the node prior to the parent check */
545 	smp_rmb();
546 	parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK);
547 	return (parent == node);
548 }
549 
550 /*
551  * mte_dead_node() - check if the @enode is dead.
552  * @enode: The encoded maple node
553  *
554  * Return: true if dead, false otherwise.
555  */
556 static inline bool mte_dead_node(const struct maple_enode *enode)
557 {
558 	struct maple_node *parent, *node;
559 
560 	node = mte_to_node(enode);
561 	/* Do not reorder reads from the node prior to the parent check */
562 	smp_rmb();
563 	parent = mte_parent(enode);
564 	return (parent == node);
565 }
566 
567 /*
568  * mas_allocated() - Get the number of nodes allocated in a maple state.
569  * @mas: The maple state
570  *
571  * The ma_state alloc member is overloaded to hold a pointer to the first
572  * allocated node or to the number of requested nodes to allocate.  If bit 0 is
573  * set, then the alloc contains the number of requested nodes.  If there is an
574  * allocated node, then the total allocated nodes is in that node.
575  *
576  * Return: The total number of nodes allocated
577  */
578 static inline unsigned long mas_allocated(const struct ma_state *mas)
579 {
580 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1))
581 		return 0;
582 
583 	return mas->alloc->total;
584 }
585 
586 /*
587  * mas_set_alloc_req() - Set the requested number of allocations.
588  * @mas: the maple state
589  * @count: the number of allocations.
590  *
591  * The requested number of allocations is either in the first allocated node,
592  * located in @mas->alloc->request_count, or directly in @mas->alloc if there is
593  * no allocated node.  Set the request either in the node or do the necessary
594  * encoding to store in @mas->alloc directly.
595  */
596 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count)
597 {
598 	if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) {
599 		if (!count)
600 			mas->alloc = NULL;
601 		else
602 			mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U);
603 		return;
604 	}
605 
606 	mas->alloc->request_count = count;
607 }
608 
609 /*
610  * mas_alloc_req() - get the requested number of allocations.
611  * @mas: The maple state
612  *
613  * The alloc count is either stored directly in @mas, or in
614  * @mas->alloc->request_count if there is at least one node allocated.  Decode
615  * the request count if it's stored directly in @mas->alloc.
616  *
617  * Return: The allocation request count.
618  */
619 static inline unsigned int mas_alloc_req(const struct ma_state *mas)
620 {
621 	if ((unsigned long)mas->alloc & 0x1)
622 		return (unsigned long)(mas->alloc) >> 1;
623 	else if (mas->alloc)
624 		return mas->alloc->request_count;
625 	return 0;
626 }
627 
628 /*
629  * ma_pivots() - Get a pointer to the maple node pivots.
630  * @node - the maple node
631  * @type - the node type
632  *
633  * In the event of a dead node, this array may be %NULL
634  *
635  * Return: A pointer to the maple node pivots
636  */
637 static inline unsigned long *ma_pivots(struct maple_node *node,
638 					   enum maple_type type)
639 {
640 	switch (type) {
641 	case maple_arange_64:
642 		return node->ma64.pivot;
643 	case maple_range_64:
644 	case maple_leaf_64:
645 		return node->mr64.pivot;
646 	case maple_dense:
647 		return NULL;
648 	}
649 	return NULL;
650 }
651 
652 /*
653  * ma_gaps() - Get a pointer to the maple node gaps.
654  * @node - the maple node
655  * @type - the node type
656  *
657  * Return: A pointer to the maple node gaps
658  */
659 static inline unsigned long *ma_gaps(struct maple_node *node,
660 				     enum maple_type type)
661 {
662 	switch (type) {
663 	case maple_arange_64:
664 		return node->ma64.gap;
665 	case maple_range_64:
666 	case maple_leaf_64:
667 	case maple_dense:
668 		return NULL;
669 	}
670 	return NULL;
671 }
672 
673 /*
674  * mte_pivot() - Get the pivot at @piv of the maple encoded node.
675  * @mn: The maple encoded node.
676  * @piv: The pivot.
677  *
678  * Return: the pivot at @piv of @mn.
679  */
680 static inline unsigned long mte_pivot(const struct maple_enode *mn,
681 				 unsigned char piv)
682 {
683 	struct maple_node *node = mte_to_node(mn);
684 	enum maple_type type = mte_node_type(mn);
685 
686 	if (piv >= mt_pivots[type]) {
687 		WARN_ON(1);
688 		return 0;
689 	}
690 	switch (type) {
691 	case maple_arange_64:
692 		return node->ma64.pivot[piv];
693 	case maple_range_64:
694 	case maple_leaf_64:
695 		return node->mr64.pivot[piv];
696 	case maple_dense:
697 		return 0;
698 	}
699 	return 0;
700 }
701 
702 /*
703  * mas_safe_pivot() - get the pivot at @piv or mas->max.
704  * @mas: The maple state
705  * @pivots: The pointer to the maple node pivots
706  * @piv: The pivot to fetch
707  * @type: The maple node type
708  *
709  * Return: The pivot at @piv within the limit of the @pivots array, @mas->max
710  * otherwise.
711  */
712 static inline unsigned long
713 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
714 	       unsigned char piv, enum maple_type type)
715 {
716 	if (piv >= mt_pivots[type])
717 		return mas->max;
718 
719 	return pivots[piv];
720 }
721 
722 /*
723  * mas_safe_min() - Return the minimum for a given offset.
724  * @mas: The maple state
725  * @pivots: The pointer to the maple node pivots
726  * @offset: The offset into the pivot array
727  *
728  * Return: The minimum range value that is contained in @offset.
729  */
730 static inline unsigned long
731 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset)
732 {
733 	if (likely(offset))
734 		return pivots[offset - 1] + 1;
735 
736 	return mas->min;
737 }
738 
739 /*
740  * mas_logical_pivot() - Get the logical pivot of a given offset.
741  * @mas: The maple state
742  * @pivots: The pointer to the maple node pivots
743  * @offset: The offset into the pivot array
744  * @type: The maple node type
745  *
746  * When there is no value at a pivot (beyond the end of the data), then the
747  * pivot is actually @mas->max.
748  *
749  * Return: the logical pivot of a given @offset.
750  */
751 static inline unsigned long
752 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots,
753 		  unsigned char offset, enum maple_type type)
754 {
755 	unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type);
756 
757 	if (likely(lpiv))
758 		return lpiv;
759 
760 	if (likely(offset))
761 		return mas->max;
762 
763 	return lpiv;
764 }
765 
766 /*
767  * mte_set_pivot() - Set a pivot to a value in an encoded maple node.
768  * @mn: The encoded maple node
769  * @piv: The pivot offset
770  * @val: The value of the pivot
771  */
772 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
773 				unsigned long val)
774 {
775 	struct maple_node *node = mte_to_node(mn);
776 	enum maple_type type = mte_node_type(mn);
777 
778 	BUG_ON(piv >= mt_pivots[type]);
779 	switch (type) {
780 	default:
781 	case maple_range_64:
782 	case maple_leaf_64:
783 		node->mr64.pivot[piv] = val;
784 		break;
785 	case maple_arange_64:
786 		node->ma64.pivot[piv] = val;
787 		break;
788 	case maple_dense:
789 		break;
790 	}
791 
792 }
793 
794 /*
795  * ma_slots() - Get a pointer to the maple node slots.
796  * @mn: The maple node
797  * @mt: The maple node type
798  *
799  * Return: A pointer to the maple node slots
800  */
801 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
802 {
803 	switch (mt) {
804 	default:
805 	case maple_arange_64:
806 		return mn->ma64.slot;
807 	case maple_range_64:
808 	case maple_leaf_64:
809 		return mn->mr64.slot;
810 	case maple_dense:
811 		return mn->slot;
812 	}
813 }
814 
815 static inline bool mt_locked(const struct maple_tree *mt)
816 {
817 	return mt_external_lock(mt) ? mt_lock_is_held(mt) :
818 		lockdep_is_held(&mt->ma_lock);
819 }
820 
821 static inline void *mt_slot(const struct maple_tree *mt,
822 		void __rcu **slots, unsigned char offset)
823 {
824 	return rcu_dereference_check(slots[offset], mt_locked(mt));
825 }
826 
827 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
828 				   unsigned char offset)
829 {
830 	return rcu_dereference_protected(slots[offset], mt_locked(mt));
831 }
832 /*
833  * mas_slot_locked() - Get the slot value when holding the maple tree lock.
834  * @mas: The maple state
835  * @slots: The pointer to the slots
836  * @offset: The offset into the slots array to fetch
837  *
838  * Return: The entry stored in @slots at the @offset.
839  */
840 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
841 				       unsigned char offset)
842 {
843 	return mt_slot_locked(mas->tree, slots, offset);
844 }
845 
846 /*
847  * mas_slot() - Get the slot value when not holding the maple tree lock.
848  * @mas: The maple state
849  * @slots: The pointer to the slots
850  * @offset: The offset into the slots array to fetch
851  *
852  * Return: The entry stored in @slots at the @offset
853  */
854 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
855 			     unsigned char offset)
856 {
857 	return mt_slot(mas->tree, slots, offset);
858 }
859 
860 /*
861  * mas_root() - Get the maple tree root.
862  * @mas: The maple state.
863  *
864  * Return: The pointer to the root of the tree
865  */
866 static inline void *mas_root(struct ma_state *mas)
867 {
868 	return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
869 }
870 
871 static inline void *mt_root_locked(struct maple_tree *mt)
872 {
873 	return rcu_dereference_protected(mt->ma_root, mt_locked(mt));
874 }
875 
876 /*
877  * mas_root_locked() - Get the maple tree root when holding the maple tree lock.
878  * @mas: The maple state.
879  *
880  * Return: The pointer to the root of the tree
881  */
882 static inline void *mas_root_locked(struct ma_state *mas)
883 {
884 	return mt_root_locked(mas->tree);
885 }
886 
887 static inline struct maple_metadata *ma_meta(struct maple_node *mn,
888 					     enum maple_type mt)
889 {
890 	switch (mt) {
891 	case maple_arange_64:
892 		return &mn->ma64.meta;
893 	default:
894 		return &mn->mr64.meta;
895 	}
896 }
897 
898 /*
899  * ma_set_meta() - Set the metadata information of a node.
900  * @mn: The maple node
901  * @mt: The maple node type
902  * @offset: The offset of the highest sub-gap in this node.
903  * @end: The end of the data in this node.
904  */
905 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt,
906 			       unsigned char offset, unsigned char end)
907 {
908 	struct maple_metadata *meta = ma_meta(mn, mt);
909 
910 	meta->gap = offset;
911 	meta->end = end;
912 }
913 
914 /*
915  * mt_clear_meta() - clear the metadata information of a node, if it exists
916  * @mt: The maple tree
917  * @mn: The maple node
918  * @type: The maple node type
919  * @offset: The offset of the highest sub-gap in this node.
920  * @end: The end of the data in this node.
921  */
922 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn,
923 				  enum maple_type type)
924 {
925 	struct maple_metadata *meta;
926 	unsigned long *pivots;
927 	void __rcu **slots;
928 	void *next;
929 
930 	switch (type) {
931 	case maple_range_64:
932 		pivots = mn->mr64.pivot;
933 		if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) {
934 			slots = mn->mr64.slot;
935 			next = mt_slot_locked(mt, slots,
936 					      MAPLE_RANGE64_SLOTS - 1);
937 			if (unlikely((mte_to_node(next) &&
938 				      mte_node_type(next))))
939 				return; /* no metadata, could be node */
940 		}
941 		fallthrough;
942 	case maple_arange_64:
943 		meta = ma_meta(mn, type);
944 		break;
945 	default:
946 		return;
947 	}
948 
949 	meta->gap = 0;
950 	meta->end = 0;
951 }
952 
953 /*
954  * ma_meta_end() - Get the data end of a node from the metadata
955  * @mn: The maple node
956  * @mt: The maple node type
957  */
958 static inline unsigned char ma_meta_end(struct maple_node *mn,
959 					enum maple_type mt)
960 {
961 	struct maple_metadata *meta = ma_meta(mn, mt);
962 
963 	return meta->end;
964 }
965 
966 /*
967  * ma_meta_gap() - Get the largest gap location of a node from the metadata
968  * @mn: The maple node
969  * @mt: The maple node type
970  */
971 static inline unsigned char ma_meta_gap(struct maple_node *mn,
972 					enum maple_type mt)
973 {
974 	BUG_ON(mt != maple_arange_64);
975 
976 	return mn->ma64.meta.gap;
977 }
978 
979 /*
980  * ma_set_meta_gap() - Set the largest gap location in a nodes metadata
981  * @mn: The maple node
982  * @mn: The maple node type
983  * @offset: The location of the largest gap.
984  */
985 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt,
986 				   unsigned char offset)
987 {
988 
989 	struct maple_metadata *meta = ma_meta(mn, mt);
990 
991 	meta->gap = offset;
992 }
993 
994 /*
995  * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes.
996  * @mat - the ma_topiary, a linked list of dead nodes.
997  * @dead_enode - the node to be marked as dead and added to the tail of the list
998  *
999  * Add the @dead_enode to the linked list in @mat.
1000  */
1001 static inline void mat_add(struct ma_topiary *mat,
1002 			   struct maple_enode *dead_enode)
1003 {
1004 	mte_set_node_dead(dead_enode);
1005 	mte_to_mat(dead_enode)->next = NULL;
1006 	if (!mat->tail) {
1007 		mat->tail = mat->head = dead_enode;
1008 		return;
1009 	}
1010 
1011 	mte_to_mat(mat->tail)->next = dead_enode;
1012 	mat->tail = dead_enode;
1013 }
1014 
1015 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *);
1016 static inline void mas_free(struct ma_state *mas, struct maple_enode *used);
1017 
1018 /*
1019  * mas_mat_free() - Free all nodes in a dead list.
1020  * @mas - the maple state
1021  * @mat - the ma_topiary linked list of dead nodes to free.
1022  *
1023  * Free walk a dead list.
1024  */
1025 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat)
1026 {
1027 	struct maple_enode *next;
1028 
1029 	while (mat->head) {
1030 		next = mte_to_mat(mat->head)->next;
1031 		mas_free(mas, mat->head);
1032 		mat->head = next;
1033 	}
1034 }
1035 
1036 /*
1037  * mas_mat_destroy() - Free all nodes and subtrees in a dead list.
1038  * @mas - the maple state
1039  * @mat - the ma_topiary linked list of dead nodes to free.
1040  *
1041  * Destroy walk a dead list.
1042  */
1043 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat)
1044 {
1045 	struct maple_enode *next;
1046 
1047 	while (mat->head) {
1048 		next = mte_to_mat(mat->head)->next;
1049 		mte_destroy_walk(mat->head, mat->mtree);
1050 		mat->head = next;
1051 	}
1052 }
1053 /*
1054  * mas_descend() - Descend into the slot stored in the ma_state.
1055  * @mas - the maple state.
1056  *
1057  * Note: Not RCU safe, only use in write side or debug code.
1058  */
1059 static inline void mas_descend(struct ma_state *mas)
1060 {
1061 	enum maple_type type;
1062 	unsigned long *pivots;
1063 	struct maple_node *node;
1064 	void __rcu **slots;
1065 
1066 	node = mas_mn(mas);
1067 	type = mte_node_type(mas->node);
1068 	pivots = ma_pivots(node, type);
1069 	slots = ma_slots(node, type);
1070 
1071 	if (mas->offset)
1072 		mas->min = pivots[mas->offset - 1] + 1;
1073 	mas->max = mas_safe_pivot(mas, pivots, mas->offset, type);
1074 	mas->node = mas_slot(mas, slots, mas->offset);
1075 }
1076 
1077 /*
1078  * mte_set_gap() - Set a maple node gap.
1079  * @mn: The encoded maple node
1080  * @gap: The offset of the gap to set
1081  * @val: The gap value
1082  */
1083 static inline void mte_set_gap(const struct maple_enode *mn,
1084 				 unsigned char gap, unsigned long val)
1085 {
1086 	switch (mte_node_type(mn)) {
1087 	default:
1088 		break;
1089 	case maple_arange_64:
1090 		mte_to_node(mn)->ma64.gap[gap] = val;
1091 		break;
1092 	}
1093 }
1094 
1095 /*
1096  * mas_ascend() - Walk up a level of the tree.
1097  * @mas: The maple state
1098  *
1099  * Sets the @mas->max and @mas->min to the correct values when walking up.  This
1100  * may cause several levels of walking up to find the correct min and max.
1101  * May find a dead node which will cause a premature return.
1102  * Return: 1 on dead node, 0 otherwise
1103  */
1104 static int mas_ascend(struct ma_state *mas)
1105 {
1106 	struct maple_enode *p_enode; /* parent enode. */
1107 	struct maple_enode *a_enode; /* ancestor enode. */
1108 	struct maple_node *a_node; /* ancestor node. */
1109 	struct maple_node *p_node; /* parent node. */
1110 	unsigned char a_slot;
1111 	enum maple_type a_type;
1112 	unsigned long min, max;
1113 	unsigned long *pivots;
1114 	unsigned char offset;
1115 	bool set_max = false, set_min = false;
1116 
1117 	a_node = mas_mn(mas);
1118 	if (ma_is_root(a_node)) {
1119 		mas->offset = 0;
1120 		return 0;
1121 	}
1122 
1123 	p_node = mte_parent(mas->node);
1124 	if (unlikely(a_node == p_node))
1125 		return 1;
1126 	a_type = mas_parent_enum(mas, mas->node);
1127 	offset = mte_parent_slot(mas->node);
1128 	a_enode = mt_mk_node(p_node, a_type);
1129 
1130 	/* Check to make sure all parent information is still accurate */
1131 	if (p_node != mte_parent(mas->node))
1132 		return 1;
1133 
1134 	mas->node = a_enode;
1135 	mas->offset = offset;
1136 
1137 	if (mte_is_root(a_enode)) {
1138 		mas->max = ULONG_MAX;
1139 		mas->min = 0;
1140 		return 0;
1141 	}
1142 
1143 	min = 0;
1144 	max = ULONG_MAX;
1145 	do {
1146 		p_enode = a_enode;
1147 		a_type = mas_parent_enum(mas, p_enode);
1148 		a_node = mte_parent(p_enode);
1149 		a_slot = mte_parent_slot(p_enode);
1150 		a_enode = mt_mk_node(a_node, a_type);
1151 		pivots = ma_pivots(a_node, a_type);
1152 
1153 		if (unlikely(ma_dead_node(a_node)))
1154 			return 1;
1155 
1156 		if (!set_min && a_slot) {
1157 			set_min = true;
1158 			min = pivots[a_slot - 1] + 1;
1159 		}
1160 
1161 		if (!set_max && a_slot < mt_pivots[a_type]) {
1162 			set_max = true;
1163 			max = pivots[a_slot];
1164 		}
1165 
1166 		if (unlikely(ma_dead_node(a_node)))
1167 			return 1;
1168 
1169 		if (unlikely(ma_is_root(a_node)))
1170 			break;
1171 
1172 	} while (!set_min || !set_max);
1173 
1174 	mas->max = max;
1175 	mas->min = min;
1176 	return 0;
1177 }
1178 
1179 /*
1180  * mas_pop_node() - Get a previously allocated maple node from the maple state.
1181  * @mas: The maple state
1182  *
1183  * Return: A pointer to a maple node.
1184  */
1185 static inline struct maple_node *mas_pop_node(struct ma_state *mas)
1186 {
1187 	struct maple_alloc *ret, *node = mas->alloc;
1188 	unsigned long total = mas_allocated(mas);
1189 	unsigned int req = mas_alloc_req(mas);
1190 
1191 	/* nothing or a request pending. */
1192 	if (WARN_ON(!total))
1193 		return NULL;
1194 
1195 	if (total == 1) {
1196 		/* single allocation in this ma_state */
1197 		mas->alloc = NULL;
1198 		ret = node;
1199 		goto single_node;
1200 	}
1201 
1202 	if (node->node_count == 1) {
1203 		/* Single allocation in this node. */
1204 		mas->alloc = node->slot[0];
1205 		mas->alloc->total = node->total - 1;
1206 		ret = node;
1207 		goto new_head;
1208 	}
1209 	node->total--;
1210 	ret = node->slot[--node->node_count];
1211 	node->slot[node->node_count] = NULL;
1212 
1213 single_node:
1214 new_head:
1215 	if (req) {
1216 		req++;
1217 		mas_set_alloc_req(mas, req);
1218 	}
1219 
1220 	memset(ret, 0, sizeof(*ret));
1221 	return (struct maple_node *)ret;
1222 }
1223 
1224 /*
1225  * mas_push_node() - Push a node back on the maple state allocation.
1226  * @mas: The maple state
1227  * @used: The used maple node
1228  *
1229  * Stores the maple node back into @mas->alloc for reuse.  Updates allocated and
1230  * requested node count as necessary.
1231  */
1232 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used)
1233 {
1234 	struct maple_alloc *reuse = (struct maple_alloc *)used;
1235 	struct maple_alloc *head = mas->alloc;
1236 	unsigned long count;
1237 	unsigned int requested = mas_alloc_req(mas);
1238 
1239 	count = mas_allocated(mas);
1240 
1241 	reuse->request_count = 0;
1242 	reuse->node_count = 0;
1243 	if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) {
1244 		head->slot[head->node_count++] = reuse;
1245 		head->total++;
1246 		goto done;
1247 	}
1248 
1249 	reuse->total = 1;
1250 	if ((head) && !((unsigned long)head & 0x1)) {
1251 		reuse->slot[0] = head;
1252 		reuse->node_count = 1;
1253 		reuse->total += head->total;
1254 	}
1255 
1256 	mas->alloc = reuse;
1257 done:
1258 	if (requested > 1)
1259 		mas_set_alloc_req(mas, requested - 1);
1260 }
1261 
1262 /*
1263  * mas_alloc_nodes() - Allocate nodes into a maple state
1264  * @mas: The maple state
1265  * @gfp: The GFP Flags
1266  */
1267 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
1268 {
1269 	struct maple_alloc *node;
1270 	unsigned long allocated = mas_allocated(mas);
1271 	unsigned int requested = mas_alloc_req(mas);
1272 	unsigned int count;
1273 	void **slots = NULL;
1274 	unsigned int max_req = 0;
1275 
1276 	if (!requested)
1277 		return;
1278 
1279 	mas_set_alloc_req(mas, 0);
1280 	if (mas->mas_flags & MA_STATE_PREALLOC) {
1281 		if (allocated)
1282 			return;
1283 		WARN_ON(!allocated);
1284 	}
1285 
1286 	if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) {
1287 		node = (struct maple_alloc *)mt_alloc_one(gfp);
1288 		if (!node)
1289 			goto nomem_one;
1290 
1291 		if (allocated) {
1292 			node->slot[0] = mas->alloc;
1293 			node->node_count = 1;
1294 		} else {
1295 			node->node_count = 0;
1296 		}
1297 
1298 		mas->alloc = node;
1299 		node->total = ++allocated;
1300 		requested--;
1301 	}
1302 
1303 	node = mas->alloc;
1304 	node->request_count = 0;
1305 	while (requested) {
1306 		max_req = MAPLE_ALLOC_SLOTS;
1307 		if (node->node_count) {
1308 			unsigned int offset = node->node_count;
1309 
1310 			slots = (void **)&node->slot[offset];
1311 			max_req -= offset;
1312 		} else {
1313 			slots = (void **)&node->slot;
1314 		}
1315 
1316 		max_req = min(requested, max_req);
1317 		count = mt_alloc_bulk(gfp, max_req, slots);
1318 		if (!count)
1319 			goto nomem_bulk;
1320 
1321 		node->node_count += count;
1322 		allocated += count;
1323 		node = node->slot[0];
1324 		node->node_count = 0;
1325 		node->request_count = 0;
1326 		requested -= count;
1327 	}
1328 	mas->alloc->total = allocated;
1329 	return;
1330 
1331 nomem_bulk:
1332 	/* Clean up potential freed allocations on bulk failure */
1333 	memset(slots, 0, max_req * sizeof(unsigned long));
1334 nomem_one:
1335 	mas_set_alloc_req(mas, requested);
1336 	if (mas->alloc && !(((unsigned long)mas->alloc & 0x1)))
1337 		mas->alloc->total = allocated;
1338 	mas_set_err(mas, -ENOMEM);
1339 }
1340 
1341 /*
1342  * mas_free() - Free an encoded maple node
1343  * @mas: The maple state
1344  * @used: The encoded maple node to free.
1345  *
1346  * Uses rcu free if necessary, pushes @used back on the maple state allocations
1347  * otherwise.
1348  */
1349 static inline void mas_free(struct ma_state *mas, struct maple_enode *used)
1350 {
1351 	struct maple_node *tmp = mte_to_node(used);
1352 
1353 	if (mt_in_rcu(mas->tree))
1354 		ma_free_rcu(tmp);
1355 	else
1356 		mas_push_node(mas, tmp);
1357 }
1358 
1359 /*
1360  * mas_node_count() - Check if enough nodes are allocated and request more if
1361  * there is not enough nodes.
1362  * @mas: The maple state
1363  * @count: The number of nodes needed
1364  * @gfp: the gfp flags
1365  */
1366 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp)
1367 {
1368 	unsigned long allocated = mas_allocated(mas);
1369 
1370 	if (allocated < count) {
1371 		mas_set_alloc_req(mas, count - allocated);
1372 		mas_alloc_nodes(mas, gfp);
1373 	}
1374 }
1375 
1376 /*
1377  * mas_node_count() - Check if enough nodes are allocated and request more if
1378  * there is not enough nodes.
1379  * @mas: The maple state
1380  * @count: The number of nodes needed
1381  *
1382  * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags.
1383  */
1384 static void mas_node_count(struct ma_state *mas, int count)
1385 {
1386 	return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN);
1387 }
1388 
1389 /*
1390  * mas_start() - Sets up maple state for operations.
1391  * @mas: The maple state.
1392  *
1393  * If mas->node == MAS_START, then set the min, max and depth to
1394  * defaults.
1395  *
1396  * Return:
1397  * - If mas->node is an error or not MAS_START, return NULL.
1398  * - If it's an empty tree:     NULL & mas->node == MAS_NONE
1399  * - If it's a single entry:    The entry & mas->node == MAS_ROOT
1400  * - If it's a tree:            NULL & mas->node == safe root node.
1401  */
1402 static inline struct maple_enode *mas_start(struct ma_state *mas)
1403 {
1404 	if (likely(mas_is_start(mas))) {
1405 		struct maple_enode *root;
1406 
1407 		mas->min = 0;
1408 		mas->max = ULONG_MAX;
1409 		mas->depth = 0;
1410 
1411 retry:
1412 		root = mas_root(mas);
1413 		/* Tree with nodes */
1414 		if (likely(xa_is_node(root))) {
1415 			mas->depth = 1;
1416 			mas->node = mte_safe_root(root);
1417 			mas->offset = 0;
1418 			if (mte_dead_node(mas->node))
1419 				goto retry;
1420 
1421 			return NULL;
1422 		}
1423 
1424 		/* empty tree */
1425 		if (unlikely(!root)) {
1426 			mas->node = MAS_NONE;
1427 			mas->offset = MAPLE_NODE_SLOTS;
1428 			return NULL;
1429 		}
1430 
1431 		/* Single entry tree */
1432 		mas->node = MAS_ROOT;
1433 		mas->offset = MAPLE_NODE_SLOTS;
1434 
1435 		/* Single entry tree. */
1436 		if (mas->index > 0)
1437 			return NULL;
1438 
1439 		return root;
1440 	}
1441 
1442 	return NULL;
1443 }
1444 
1445 /*
1446  * ma_data_end() - Find the end of the data in a node.
1447  * @node: The maple node
1448  * @type: The maple node type
1449  * @pivots: The array of pivots in the node
1450  * @max: The maximum value in the node
1451  *
1452  * Uses metadata to find the end of the data when possible.
1453  * Return: The zero indexed last slot with data (may be null).
1454  */
1455 static inline unsigned char ma_data_end(struct maple_node *node,
1456 					enum maple_type type,
1457 					unsigned long *pivots,
1458 					unsigned long max)
1459 {
1460 	unsigned char offset;
1461 
1462 	if (!pivots)
1463 		return 0;
1464 
1465 	if (type == maple_arange_64)
1466 		return ma_meta_end(node, type);
1467 
1468 	offset = mt_pivots[type] - 1;
1469 	if (likely(!pivots[offset]))
1470 		return ma_meta_end(node, type);
1471 
1472 	if (likely(pivots[offset] == max))
1473 		return offset;
1474 
1475 	return mt_pivots[type];
1476 }
1477 
1478 /*
1479  * mas_data_end() - Find the end of the data (slot).
1480  * @mas: the maple state
1481  *
1482  * This method is optimized to check the metadata of a node if the node type
1483  * supports data end metadata.
1484  *
1485  * Return: The zero indexed last slot with data (may be null).
1486  */
1487 static inline unsigned char mas_data_end(struct ma_state *mas)
1488 {
1489 	enum maple_type type;
1490 	struct maple_node *node;
1491 	unsigned char offset;
1492 	unsigned long *pivots;
1493 
1494 	type = mte_node_type(mas->node);
1495 	node = mas_mn(mas);
1496 	if (type == maple_arange_64)
1497 		return ma_meta_end(node, type);
1498 
1499 	pivots = ma_pivots(node, type);
1500 	if (unlikely(ma_dead_node(node)))
1501 		return 0;
1502 
1503 	offset = mt_pivots[type] - 1;
1504 	if (likely(!pivots[offset]))
1505 		return ma_meta_end(node, type);
1506 
1507 	if (likely(pivots[offset] == mas->max))
1508 		return offset;
1509 
1510 	return mt_pivots[type];
1511 }
1512 
1513 /*
1514  * mas_leaf_max_gap() - Returns the largest gap in a leaf node
1515  * @mas - the maple state
1516  *
1517  * Return: The maximum gap in the leaf.
1518  */
1519 static unsigned long mas_leaf_max_gap(struct ma_state *mas)
1520 {
1521 	enum maple_type mt;
1522 	unsigned long pstart, gap, max_gap;
1523 	struct maple_node *mn;
1524 	unsigned long *pivots;
1525 	void __rcu **slots;
1526 	unsigned char i;
1527 	unsigned char max_piv;
1528 
1529 	mt = mte_node_type(mas->node);
1530 	mn = mas_mn(mas);
1531 	slots = ma_slots(mn, mt);
1532 	max_gap = 0;
1533 	if (unlikely(ma_is_dense(mt))) {
1534 		gap = 0;
1535 		for (i = 0; i < mt_slots[mt]; i++) {
1536 			if (slots[i]) {
1537 				if (gap > max_gap)
1538 					max_gap = gap;
1539 				gap = 0;
1540 			} else {
1541 				gap++;
1542 			}
1543 		}
1544 		if (gap > max_gap)
1545 			max_gap = gap;
1546 		return max_gap;
1547 	}
1548 
1549 	/*
1550 	 * Check the first implied pivot optimizes the loop below and slot 1 may
1551 	 * be skipped if there is a gap in slot 0.
1552 	 */
1553 	pivots = ma_pivots(mn, mt);
1554 	if (likely(!slots[0])) {
1555 		max_gap = pivots[0] - mas->min + 1;
1556 		i = 2;
1557 	} else {
1558 		i = 1;
1559 	}
1560 
1561 	/* reduce max_piv as the special case is checked before the loop */
1562 	max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1;
1563 	/*
1564 	 * Check end implied pivot which can only be a gap on the right most
1565 	 * node.
1566 	 */
1567 	if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) {
1568 		gap = ULONG_MAX - pivots[max_piv];
1569 		if (gap > max_gap)
1570 			max_gap = gap;
1571 	}
1572 
1573 	for (; i <= max_piv; i++) {
1574 		/* data == no gap. */
1575 		if (likely(slots[i]))
1576 			continue;
1577 
1578 		pstart = pivots[i - 1];
1579 		gap = pivots[i] - pstart;
1580 		if (gap > max_gap)
1581 			max_gap = gap;
1582 
1583 		/* There cannot be two gaps in a row. */
1584 		i++;
1585 	}
1586 	return max_gap;
1587 }
1588 
1589 /*
1590  * ma_max_gap() - Get the maximum gap in a maple node (non-leaf)
1591  * @node: The maple node
1592  * @gaps: The pointer to the gaps
1593  * @mt: The maple node type
1594  * @*off: Pointer to store the offset location of the gap.
1595  *
1596  * Uses the metadata data end to scan backwards across set gaps.
1597  *
1598  * Return: The maximum gap value
1599  */
1600 static inline unsigned long
1601 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt,
1602 	    unsigned char *off)
1603 {
1604 	unsigned char offset, i;
1605 	unsigned long max_gap = 0;
1606 
1607 	i = offset = ma_meta_end(node, mt);
1608 	do {
1609 		if (gaps[i] > max_gap) {
1610 			max_gap = gaps[i];
1611 			offset = i;
1612 		}
1613 	} while (i--);
1614 
1615 	*off = offset;
1616 	return max_gap;
1617 }
1618 
1619 /*
1620  * mas_max_gap() - find the largest gap in a non-leaf node and set the slot.
1621  * @mas: The maple state.
1622  *
1623  * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap.
1624  *
1625  * Return: The gap value.
1626  */
1627 static inline unsigned long mas_max_gap(struct ma_state *mas)
1628 {
1629 	unsigned long *gaps;
1630 	unsigned char offset;
1631 	enum maple_type mt;
1632 	struct maple_node *node;
1633 
1634 	mt = mte_node_type(mas->node);
1635 	if (ma_is_leaf(mt))
1636 		return mas_leaf_max_gap(mas);
1637 
1638 	node = mas_mn(mas);
1639 	offset = ma_meta_gap(node, mt);
1640 	if (offset == MAPLE_ARANGE64_META_MAX)
1641 		return 0;
1642 
1643 	gaps = ma_gaps(node, mt);
1644 	return gaps[offset];
1645 }
1646 
1647 /*
1648  * mas_parent_gap() - Set the parent gap and any gaps above, as needed
1649  * @mas: The maple state
1650  * @offset: The gap offset in the parent to set
1651  * @new: The new gap value.
1652  *
1653  * Set the parent gap then continue to set the gap upwards, using the metadata
1654  * of the parent to see if it is necessary to check the node above.
1655  */
1656 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
1657 		unsigned long new)
1658 {
1659 	unsigned long meta_gap = 0;
1660 	struct maple_node *pnode;
1661 	struct maple_enode *penode;
1662 	unsigned long *pgaps;
1663 	unsigned char meta_offset;
1664 	enum maple_type pmt;
1665 
1666 	pnode = mte_parent(mas->node);
1667 	pmt = mas_parent_enum(mas, mas->node);
1668 	penode = mt_mk_node(pnode, pmt);
1669 	pgaps = ma_gaps(pnode, pmt);
1670 
1671 ascend:
1672 	meta_offset = ma_meta_gap(pnode, pmt);
1673 	if (meta_offset == MAPLE_ARANGE64_META_MAX)
1674 		meta_gap = 0;
1675 	else
1676 		meta_gap = pgaps[meta_offset];
1677 
1678 	pgaps[offset] = new;
1679 
1680 	if (meta_gap == new)
1681 		return;
1682 
1683 	if (offset != meta_offset) {
1684 		if (meta_gap > new)
1685 			return;
1686 
1687 		ma_set_meta_gap(pnode, pmt, offset);
1688 	} else if (new < meta_gap) {
1689 		meta_offset = 15;
1690 		new = ma_max_gap(pnode, pgaps, pmt, &meta_offset);
1691 		ma_set_meta_gap(pnode, pmt, meta_offset);
1692 	}
1693 
1694 	if (ma_is_root(pnode))
1695 		return;
1696 
1697 	/* Go to the parent node. */
1698 	pnode = mte_parent(penode);
1699 	pmt = mas_parent_enum(mas, penode);
1700 	pgaps = ma_gaps(pnode, pmt);
1701 	offset = mte_parent_slot(penode);
1702 	penode = mt_mk_node(pnode, pmt);
1703 	goto ascend;
1704 }
1705 
1706 /*
1707  * mas_update_gap() - Update a nodes gaps and propagate up if necessary.
1708  * @mas - the maple state.
1709  */
1710 static inline void mas_update_gap(struct ma_state *mas)
1711 {
1712 	unsigned char pslot;
1713 	unsigned long p_gap;
1714 	unsigned long max_gap;
1715 
1716 	if (!mt_is_alloc(mas->tree))
1717 		return;
1718 
1719 	if (mte_is_root(mas->node))
1720 		return;
1721 
1722 	max_gap = mas_max_gap(mas);
1723 
1724 	pslot = mte_parent_slot(mas->node);
1725 	p_gap = ma_gaps(mte_parent(mas->node),
1726 			mas_parent_enum(mas, mas->node))[pslot];
1727 
1728 	if (p_gap != max_gap)
1729 		mas_parent_gap(mas, pslot, max_gap);
1730 }
1731 
1732 /*
1733  * mas_adopt_children() - Set the parent pointer of all nodes in @parent to
1734  * @parent with the slot encoded.
1735  * @mas - the maple state (for the tree)
1736  * @parent - the maple encoded node containing the children.
1737  */
1738 static inline void mas_adopt_children(struct ma_state *mas,
1739 		struct maple_enode *parent)
1740 {
1741 	enum maple_type type = mte_node_type(parent);
1742 	struct maple_node *node = mas_mn(mas);
1743 	void __rcu **slots = ma_slots(node, type);
1744 	unsigned long *pivots = ma_pivots(node, type);
1745 	struct maple_enode *child;
1746 	unsigned char offset;
1747 
1748 	offset = ma_data_end(node, type, pivots, mas->max);
1749 	do {
1750 		child = mas_slot_locked(mas, slots, offset);
1751 		mte_set_parent(child, parent, offset);
1752 	} while (offset--);
1753 }
1754 
1755 /*
1756  * mas_replace() - Replace a maple node in the tree with mas->node.  Uses the
1757  * parent encoding to locate the maple node in the tree.
1758  * @mas - the ma_state to use for operations.
1759  * @advanced - boolean to adopt the child nodes and free the old node (false) or
1760  * leave the node (true) and handle the adoption and free elsewhere.
1761  */
1762 static inline void mas_replace(struct ma_state *mas, bool advanced)
1763 	__must_hold(mas->tree->lock)
1764 {
1765 	struct maple_node *mn = mas_mn(mas);
1766 	struct maple_enode *old_enode;
1767 	unsigned char offset = 0;
1768 	void __rcu **slots = NULL;
1769 
1770 	if (ma_is_root(mn)) {
1771 		old_enode = mas_root_locked(mas);
1772 	} else {
1773 		offset = mte_parent_slot(mas->node);
1774 		slots = ma_slots(mte_parent(mas->node),
1775 				 mas_parent_enum(mas, mas->node));
1776 		old_enode = mas_slot_locked(mas, slots, offset);
1777 	}
1778 
1779 	if (!advanced && !mte_is_leaf(mas->node))
1780 		mas_adopt_children(mas, mas->node);
1781 
1782 	if (mte_is_root(mas->node)) {
1783 		mn->parent = ma_parent_ptr(
1784 			      ((unsigned long)mas->tree | MA_ROOT_PARENT));
1785 		rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
1786 		mas_set_height(mas);
1787 	} else {
1788 		rcu_assign_pointer(slots[offset], mas->node);
1789 	}
1790 
1791 	if (!advanced) {
1792 		mte_set_node_dead(old_enode);
1793 		mas_free(mas, old_enode);
1794 	}
1795 }
1796 
1797 /*
1798  * mas_new_child() - Find the new child of a node.
1799  * @mas: the maple state
1800  * @child: the maple state to store the child.
1801  */
1802 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child)
1803 	__must_hold(mas->tree->lock)
1804 {
1805 	enum maple_type mt;
1806 	unsigned char offset;
1807 	unsigned char end;
1808 	unsigned long *pivots;
1809 	struct maple_enode *entry;
1810 	struct maple_node *node;
1811 	void __rcu **slots;
1812 
1813 	mt = mte_node_type(mas->node);
1814 	node = mas_mn(mas);
1815 	slots = ma_slots(node, mt);
1816 	pivots = ma_pivots(node, mt);
1817 	end = ma_data_end(node, mt, pivots, mas->max);
1818 	for (offset = mas->offset; offset <= end; offset++) {
1819 		entry = mas_slot_locked(mas, slots, offset);
1820 		if (mte_parent(entry) == node) {
1821 			*child = *mas;
1822 			mas->offset = offset + 1;
1823 			child->offset = offset;
1824 			mas_descend(child);
1825 			child->offset = 0;
1826 			return true;
1827 		}
1828 	}
1829 	return false;
1830 }
1831 
1832 /*
1833  * mab_shift_right() - Shift the data in mab right. Note, does not clean out the
1834  * old data or set b_node->b_end.
1835  * @b_node: the maple_big_node
1836  * @shift: the shift count
1837  */
1838 static inline void mab_shift_right(struct maple_big_node *b_node,
1839 				 unsigned char shift)
1840 {
1841 	unsigned long size = b_node->b_end * sizeof(unsigned long);
1842 
1843 	memmove(b_node->pivot + shift, b_node->pivot, size);
1844 	memmove(b_node->slot + shift, b_node->slot, size);
1845 	if (b_node->type == maple_arange_64)
1846 		memmove(b_node->gap + shift, b_node->gap, size);
1847 }
1848 
1849 /*
1850  * mab_middle_node() - Check if a middle node is needed (unlikely)
1851  * @b_node: the maple_big_node that contains the data.
1852  * @size: the amount of data in the b_node
1853  * @split: the potential split location
1854  * @slot_count: the size that can be stored in a single node being considered.
1855  *
1856  * Return: true if a middle node is required.
1857  */
1858 static inline bool mab_middle_node(struct maple_big_node *b_node, int split,
1859 				   unsigned char slot_count)
1860 {
1861 	unsigned char size = b_node->b_end;
1862 
1863 	if (size >= 2 * slot_count)
1864 		return true;
1865 
1866 	if (!b_node->slot[split] && (size >= 2 * slot_count - 1))
1867 		return true;
1868 
1869 	return false;
1870 }
1871 
1872 /*
1873  * mab_no_null_split() - ensure the split doesn't fall on a NULL
1874  * @b_node: the maple_big_node with the data
1875  * @split: the suggested split location
1876  * @slot_count: the number of slots in the node being considered.
1877  *
1878  * Return: the split location.
1879  */
1880 static inline int mab_no_null_split(struct maple_big_node *b_node,
1881 				    unsigned char split, unsigned char slot_count)
1882 {
1883 	if (!b_node->slot[split]) {
1884 		/*
1885 		 * If the split is less than the max slot && the right side will
1886 		 * still be sufficient, then increment the split on NULL.
1887 		 */
1888 		if ((split < slot_count - 1) &&
1889 		    (b_node->b_end - split) > (mt_min_slots[b_node->type]))
1890 			split++;
1891 		else
1892 			split--;
1893 	}
1894 	return split;
1895 }
1896 
1897 /*
1898  * mab_calc_split() - Calculate the split location and if there needs to be two
1899  * splits.
1900  * @bn: The maple_big_node with the data
1901  * @mid_split: The second split, if required.  0 otherwise.
1902  *
1903  * Return: The first split location.  The middle split is set in @mid_split.
1904  */
1905 static inline int mab_calc_split(struct ma_state *mas,
1906 	 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min)
1907 {
1908 	unsigned char b_end = bn->b_end;
1909 	int split = b_end / 2; /* Assume equal split. */
1910 	unsigned char slot_min, slot_count = mt_slots[bn->type];
1911 
1912 	/*
1913 	 * To support gap tracking, all NULL entries are kept together and a node cannot
1914 	 * end on a NULL entry, with the exception of the left-most leaf.  The
1915 	 * limitation means that the split of a node must be checked for this condition
1916 	 * and be able to put more data in one direction or the other.
1917 	 */
1918 	if (unlikely((mas->mas_flags & MA_STATE_BULK))) {
1919 		*mid_split = 0;
1920 		split = b_end - mt_min_slots[bn->type];
1921 
1922 		if (!ma_is_leaf(bn->type))
1923 			return split;
1924 
1925 		mas->mas_flags |= MA_STATE_REBALANCE;
1926 		if (!bn->slot[split])
1927 			split--;
1928 		return split;
1929 	}
1930 
1931 	/*
1932 	 * Although extremely rare, it is possible to enter what is known as the 3-way
1933 	 * split scenario.  The 3-way split comes about by means of a store of a range
1934 	 * that overwrites the end and beginning of two full nodes.  The result is a set
1935 	 * of entries that cannot be stored in 2 nodes.  Sometimes, these two nodes can
1936 	 * also be located in different parent nodes which are also full.  This can
1937 	 * carry upwards all the way to the root in the worst case.
1938 	 */
1939 	if (unlikely(mab_middle_node(bn, split, slot_count))) {
1940 		split = b_end / 3;
1941 		*mid_split = split * 2;
1942 	} else {
1943 		slot_min = mt_min_slots[bn->type];
1944 
1945 		*mid_split = 0;
1946 		/*
1947 		 * Avoid having a range less than the slot count unless it
1948 		 * causes one node to be deficient.
1949 		 * NOTE: mt_min_slots is 1 based, b_end and split are zero.
1950 		 */
1951 		while (((bn->pivot[split] - min) < slot_count - 1) &&
1952 		       (split < slot_count - 1) && (b_end - split > slot_min))
1953 			split++;
1954 	}
1955 
1956 	/* Avoid ending a node on a NULL entry */
1957 	split = mab_no_null_split(bn, split, slot_count);
1958 
1959 	if (unlikely(*mid_split))
1960 		*mid_split = mab_no_null_split(bn, *mid_split, slot_count);
1961 
1962 	return split;
1963 }
1964 
1965 /*
1966  * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node
1967  * and set @b_node->b_end to the next free slot.
1968  * @mas: The maple state
1969  * @mas_start: The starting slot to copy
1970  * @mas_end: The end slot to copy (inclusively)
1971  * @b_node: The maple_big_node to place the data
1972  * @mab_start: The starting location in maple_big_node to store the data.
1973  */
1974 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start,
1975 			unsigned char mas_end, struct maple_big_node *b_node,
1976 			unsigned char mab_start)
1977 {
1978 	enum maple_type mt;
1979 	struct maple_node *node;
1980 	void __rcu **slots;
1981 	unsigned long *pivots, *gaps;
1982 	int i = mas_start, j = mab_start;
1983 	unsigned char piv_end;
1984 
1985 	node = mas_mn(mas);
1986 	mt = mte_node_type(mas->node);
1987 	pivots = ma_pivots(node, mt);
1988 	if (!i) {
1989 		b_node->pivot[j] = pivots[i++];
1990 		if (unlikely(i > mas_end))
1991 			goto complete;
1992 		j++;
1993 	}
1994 
1995 	piv_end = min(mas_end, mt_pivots[mt]);
1996 	for (; i < piv_end; i++, j++) {
1997 		b_node->pivot[j] = pivots[i];
1998 		if (unlikely(!b_node->pivot[j]))
1999 			break;
2000 
2001 		if (unlikely(mas->max == b_node->pivot[j]))
2002 			goto complete;
2003 	}
2004 
2005 	if (likely(i <= mas_end))
2006 		b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt);
2007 
2008 complete:
2009 	b_node->b_end = ++j;
2010 	j -= mab_start;
2011 	slots = ma_slots(node, mt);
2012 	memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j);
2013 	if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) {
2014 		gaps = ma_gaps(node, mt);
2015 		memcpy(b_node->gap + mab_start, gaps + mas_start,
2016 		       sizeof(unsigned long) * j);
2017 	}
2018 }
2019 
2020 /*
2021  * mas_leaf_set_meta() - Set the metadata of a leaf if possible.
2022  * @mas: The maple state
2023  * @node: The maple node
2024  * @pivots: pointer to the maple node pivots
2025  * @mt: The maple type
2026  * @end: The assumed end
2027  *
2028  * Note, end may be incremented within this function but not modified at the
2029  * source.  This is fine since the metadata is the last thing to be stored in a
2030  * node during a write.
2031  */
2032 static inline void mas_leaf_set_meta(struct ma_state *mas,
2033 		struct maple_node *node, unsigned long *pivots,
2034 		enum maple_type mt, unsigned char end)
2035 {
2036 	/* There is no room for metadata already */
2037 	if (mt_pivots[mt] <= end)
2038 		return;
2039 
2040 	if (pivots[end] && pivots[end] < mas->max)
2041 		end++;
2042 
2043 	if (end < mt_slots[mt] - 1)
2044 		ma_set_meta(node, mt, 0, end);
2045 }
2046 
2047 /*
2048  * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node.
2049  * @b_node: the maple_big_node that has the data
2050  * @mab_start: the start location in @b_node.
2051  * @mab_end: The end location in @b_node (inclusively)
2052  * @mas: The maple state with the maple encoded node.
2053  */
2054 static inline void mab_mas_cp(struct maple_big_node *b_node,
2055 			      unsigned char mab_start, unsigned char mab_end,
2056 			      struct ma_state *mas, bool new_max)
2057 {
2058 	int i, j = 0;
2059 	enum maple_type mt = mte_node_type(mas->node);
2060 	struct maple_node *node = mte_to_node(mas->node);
2061 	void __rcu **slots = ma_slots(node, mt);
2062 	unsigned long *pivots = ma_pivots(node, mt);
2063 	unsigned long *gaps = NULL;
2064 	unsigned char end;
2065 
2066 	if (mab_end - mab_start > mt_pivots[mt])
2067 		mab_end--;
2068 
2069 	if (!pivots[mt_pivots[mt] - 1])
2070 		slots[mt_pivots[mt]] = NULL;
2071 
2072 	i = mab_start;
2073 	do {
2074 		pivots[j++] = b_node->pivot[i++];
2075 	} while (i <= mab_end && likely(b_node->pivot[i]));
2076 
2077 	memcpy(slots, b_node->slot + mab_start,
2078 	       sizeof(void *) * (i - mab_start));
2079 
2080 	if (new_max)
2081 		mas->max = b_node->pivot[i - 1];
2082 
2083 	end = j - 1;
2084 	if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) {
2085 		unsigned long max_gap = 0;
2086 		unsigned char offset = 15;
2087 
2088 		gaps = ma_gaps(node, mt);
2089 		do {
2090 			gaps[--j] = b_node->gap[--i];
2091 			if (gaps[j] > max_gap) {
2092 				offset = j;
2093 				max_gap = gaps[j];
2094 			}
2095 		} while (j);
2096 
2097 		ma_set_meta(node, mt, offset, end);
2098 	} else {
2099 		mas_leaf_set_meta(mas, node, pivots, mt, end);
2100 	}
2101 }
2102 
2103 /*
2104  * mas_descend_adopt() - Descend through a sub-tree and adopt children.
2105  * @mas: the maple state with the maple encoded node of the sub-tree.
2106  *
2107  * Descend through a sub-tree and adopt children who do not have the correct
2108  * parents set.  Follow the parents which have the correct parents as they are
2109  * the new entries which need to be followed to find other incorrectly set
2110  * parents.
2111  */
2112 static inline void mas_descend_adopt(struct ma_state *mas)
2113 {
2114 	struct ma_state list[3], next[3];
2115 	int i, n;
2116 
2117 	/*
2118 	 * At each level there may be up to 3 correct parent pointers which indicates
2119 	 * the new nodes which need to be walked to find any new nodes at a lower level.
2120 	 */
2121 
2122 	for (i = 0; i < 3; i++) {
2123 		list[i] = *mas;
2124 		list[i].offset = 0;
2125 		next[i].offset = 0;
2126 	}
2127 	next[0] = *mas;
2128 
2129 	while (!mte_is_leaf(list[0].node)) {
2130 		n = 0;
2131 		for (i = 0; i < 3; i++) {
2132 			if (mas_is_none(&list[i]))
2133 				continue;
2134 
2135 			if (i && list[i-1].node == list[i].node)
2136 				continue;
2137 
2138 			while ((n < 3) && (mas_new_child(&list[i], &next[n])))
2139 				n++;
2140 
2141 			mas_adopt_children(&list[i], list[i].node);
2142 		}
2143 
2144 		while (n < 3)
2145 			next[n++].node = MAS_NONE;
2146 
2147 		/* descend by setting the list to the children */
2148 		for (i = 0; i < 3; i++)
2149 			list[i] = next[i];
2150 	}
2151 }
2152 
2153 /*
2154  * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert.
2155  * @mas: The maple state
2156  * @end: The maple node end
2157  * @mt: The maple node type
2158  */
2159 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end,
2160 				      enum maple_type mt)
2161 {
2162 	if (!(mas->mas_flags & MA_STATE_BULK))
2163 		return;
2164 
2165 	if (mte_is_root(mas->node))
2166 		return;
2167 
2168 	if (end > mt_min_slots[mt]) {
2169 		mas->mas_flags &= ~MA_STATE_REBALANCE;
2170 		return;
2171 	}
2172 }
2173 
2174 /*
2175  * mas_store_b_node() - Store an @entry into the b_node while also copying the
2176  * data from a maple encoded node.
2177  * @wr_mas: the maple write state
2178  * @b_node: the maple_big_node to fill with data
2179  * @offset_end: the offset to end copying
2180  *
2181  * Return: The actual end of the data stored in @b_node
2182  */
2183 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
2184 		struct maple_big_node *b_node, unsigned char offset_end)
2185 {
2186 	unsigned char slot;
2187 	unsigned char b_end;
2188 	/* Possible underflow of piv will wrap back to 0 before use. */
2189 	unsigned long piv;
2190 	struct ma_state *mas = wr_mas->mas;
2191 
2192 	b_node->type = wr_mas->type;
2193 	b_end = 0;
2194 	slot = mas->offset;
2195 	if (slot) {
2196 		/* Copy start data up to insert. */
2197 		mas_mab_cp(mas, 0, slot - 1, b_node, 0);
2198 		b_end = b_node->b_end;
2199 		piv = b_node->pivot[b_end - 1];
2200 	} else
2201 		piv = mas->min - 1;
2202 
2203 	if (piv + 1 < mas->index) {
2204 		/* Handle range starting after old range */
2205 		b_node->slot[b_end] = wr_mas->content;
2206 		if (!wr_mas->content)
2207 			b_node->gap[b_end] = mas->index - 1 - piv;
2208 		b_node->pivot[b_end++] = mas->index - 1;
2209 	}
2210 
2211 	/* Store the new entry. */
2212 	mas->offset = b_end;
2213 	b_node->slot[b_end] = wr_mas->entry;
2214 	b_node->pivot[b_end] = mas->last;
2215 
2216 	/* Appended. */
2217 	if (mas->last >= mas->max)
2218 		goto b_end;
2219 
2220 	/* Handle new range ending before old range ends */
2221 	piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type);
2222 	if (piv > mas->last) {
2223 		if (piv == ULONG_MAX)
2224 			mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type);
2225 
2226 		if (offset_end != slot)
2227 			wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
2228 							  offset_end);
2229 
2230 		b_node->slot[++b_end] = wr_mas->content;
2231 		if (!wr_mas->content)
2232 			b_node->gap[b_end] = piv - mas->last + 1;
2233 		b_node->pivot[b_end] = piv;
2234 	}
2235 
2236 	slot = offset_end + 1;
2237 	if (slot > wr_mas->node_end)
2238 		goto b_end;
2239 
2240 	/* Copy end data to the end of the node. */
2241 	mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
2242 	b_node->b_end--;
2243 	return;
2244 
2245 b_end:
2246 	b_node->b_end = b_end;
2247 }
2248 
2249 /*
2250  * mas_prev_sibling() - Find the previous node with the same parent.
2251  * @mas: the maple state
2252  *
2253  * Return: True if there is a previous sibling, false otherwise.
2254  */
2255 static inline bool mas_prev_sibling(struct ma_state *mas)
2256 {
2257 	unsigned int p_slot = mte_parent_slot(mas->node);
2258 
2259 	if (mte_is_root(mas->node))
2260 		return false;
2261 
2262 	if (!p_slot)
2263 		return false;
2264 
2265 	mas_ascend(mas);
2266 	mas->offset = p_slot - 1;
2267 	mas_descend(mas);
2268 	return true;
2269 }
2270 
2271 /*
2272  * mas_next_sibling() - Find the next node with the same parent.
2273  * @mas: the maple state
2274  *
2275  * Return: true if there is a next sibling, false otherwise.
2276  */
2277 static inline bool mas_next_sibling(struct ma_state *mas)
2278 {
2279 	MA_STATE(parent, mas->tree, mas->index, mas->last);
2280 
2281 	if (mte_is_root(mas->node))
2282 		return false;
2283 
2284 	parent = *mas;
2285 	mas_ascend(&parent);
2286 	parent.offset = mte_parent_slot(mas->node) + 1;
2287 	if (parent.offset > mas_data_end(&parent))
2288 		return false;
2289 
2290 	*mas = parent;
2291 	mas_descend(mas);
2292 	return true;
2293 }
2294 
2295 /*
2296  * mte_node_or_node() - Return the encoded node or MAS_NONE.
2297  * @enode: The encoded maple node.
2298  *
2299  * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
2300  *
2301  * Return: @enode or MAS_NONE
2302  */
2303 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
2304 {
2305 	if (enode)
2306 		return enode;
2307 
2308 	return ma_enode_ptr(MAS_NONE);
2309 }
2310 
2311 /*
2312  * mas_wr_node_walk() - Find the correct offset for the index in the @mas.
2313  * @wr_mas: The maple write state
2314  *
2315  * Uses mas_slot_locked() and does not need to worry about dead nodes.
2316  */
2317 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
2318 {
2319 	struct ma_state *mas = wr_mas->mas;
2320 	unsigned char count;
2321 	unsigned char offset;
2322 	unsigned long index, min, max;
2323 
2324 	if (unlikely(ma_is_dense(wr_mas->type))) {
2325 		wr_mas->r_max = wr_mas->r_min = mas->index;
2326 		mas->offset = mas->index = mas->min;
2327 		return;
2328 	}
2329 
2330 	wr_mas->node = mas_mn(wr_mas->mas);
2331 	wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
2332 	count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
2333 					       wr_mas->pivots, mas->max);
2334 	offset = mas->offset;
2335 	min = mas_safe_min(mas, wr_mas->pivots, offset);
2336 	if (unlikely(offset == count))
2337 		goto max;
2338 
2339 	max = wr_mas->pivots[offset];
2340 	index = mas->index;
2341 	if (unlikely(index <= max))
2342 		goto done;
2343 
2344 	if (unlikely(!max && offset))
2345 		goto max;
2346 
2347 	min = max + 1;
2348 	while (++offset < count) {
2349 		max = wr_mas->pivots[offset];
2350 		if (index <= max)
2351 			goto done;
2352 		else if (unlikely(!max))
2353 			break;
2354 
2355 		min = max + 1;
2356 	}
2357 
2358 max:
2359 	max = mas->max;
2360 done:
2361 	wr_mas->r_max = max;
2362 	wr_mas->r_min = min;
2363 	wr_mas->offset_end = mas->offset = offset;
2364 }
2365 
2366 /*
2367  * mas_topiary_range() - Add a range of slots to the topiary.
2368  * @mas: The maple state
2369  * @destroy: The topiary to add the slots (usually destroy)
2370  * @start: The starting slot inclusively
2371  * @end: The end slot inclusively
2372  */
2373 static inline void mas_topiary_range(struct ma_state *mas,
2374 	struct ma_topiary *destroy, unsigned char start, unsigned char end)
2375 {
2376 	void __rcu **slots;
2377 	unsigned char offset;
2378 
2379 	MT_BUG_ON(mas->tree, mte_is_leaf(mas->node));
2380 	slots = ma_slots(mas_mn(mas), mte_node_type(mas->node));
2381 	for (offset = start; offset <= end; offset++) {
2382 		struct maple_enode *enode = mas_slot_locked(mas, slots, offset);
2383 
2384 		if (mte_dead_node(enode))
2385 			continue;
2386 
2387 		mat_add(destroy, enode);
2388 	}
2389 }
2390 
2391 /*
2392  * mast_topiary() - Add the portions of the tree to the removal list; either to
2393  * be freed or discarded (destroy walk).
2394  * @mast: The maple_subtree_state.
2395  */
2396 static inline void mast_topiary(struct maple_subtree_state *mast)
2397 {
2398 	MA_WR_STATE(wr_mas, mast->orig_l, NULL);
2399 	unsigned char r_start, r_end;
2400 	unsigned char l_start, l_end;
2401 	void __rcu **l_slots, **r_slots;
2402 
2403 	wr_mas.type = mte_node_type(mast->orig_l->node);
2404 	mast->orig_l->index = mast->orig_l->last;
2405 	mas_wr_node_walk(&wr_mas);
2406 	l_start = mast->orig_l->offset + 1;
2407 	l_end = mas_data_end(mast->orig_l);
2408 	r_start = 0;
2409 	r_end = mast->orig_r->offset;
2410 
2411 	if (r_end)
2412 		r_end--;
2413 
2414 	l_slots = ma_slots(mas_mn(mast->orig_l),
2415 			   mte_node_type(mast->orig_l->node));
2416 
2417 	r_slots = ma_slots(mas_mn(mast->orig_r),
2418 			   mte_node_type(mast->orig_r->node));
2419 
2420 	if ((l_start < l_end) &&
2421 	    mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) {
2422 		l_start++;
2423 	}
2424 
2425 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) {
2426 		if (r_end)
2427 			r_end--;
2428 	}
2429 
2430 	if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node))
2431 		return;
2432 
2433 	/* At the node where left and right sides meet, add the parts between */
2434 	if (mast->orig_l->node == mast->orig_r->node) {
2435 		return mas_topiary_range(mast->orig_l, mast->destroy,
2436 					     l_start, r_end);
2437 	}
2438 
2439 	/* mast->orig_r is different and consumed. */
2440 	if (mte_is_leaf(mast->orig_r->node))
2441 		return;
2442 
2443 	if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end)))
2444 		l_end--;
2445 
2446 
2447 	if (l_start <= l_end)
2448 		mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end);
2449 
2450 	if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start)))
2451 		r_start++;
2452 
2453 	if (r_start <= r_end)
2454 		mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end);
2455 }
2456 
2457 /*
2458  * mast_rebalance_next() - Rebalance against the next node
2459  * @mast: The maple subtree state
2460  * @old_r: The encoded maple node to the right (next node).
2461  */
2462 static inline void mast_rebalance_next(struct maple_subtree_state *mast)
2463 {
2464 	unsigned char b_end = mast->bn->b_end;
2465 
2466 	mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node),
2467 		   mast->bn, b_end);
2468 	mast->orig_r->last = mast->orig_r->max;
2469 }
2470 
2471 /*
2472  * mast_rebalance_prev() - Rebalance against the previous node
2473  * @mast: The maple subtree state
2474  * @old_l: The encoded maple node to the left (previous node)
2475  */
2476 static inline void mast_rebalance_prev(struct maple_subtree_state *mast)
2477 {
2478 	unsigned char end = mas_data_end(mast->orig_l) + 1;
2479 	unsigned char b_end = mast->bn->b_end;
2480 
2481 	mab_shift_right(mast->bn, end);
2482 	mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0);
2483 	mast->l->min = mast->orig_l->min;
2484 	mast->orig_l->index = mast->orig_l->min;
2485 	mast->bn->b_end = end + b_end;
2486 	mast->l->offset += end;
2487 }
2488 
2489 /*
2490  * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring
2491  * the node to the right.  Checking the nodes to the right then the left at each
2492  * level upwards until root is reached.  Free and destroy as needed.
2493  * Data is copied into the @mast->bn.
2494  * @mast: The maple_subtree_state.
2495  */
2496 static inline
2497 bool mast_spanning_rebalance(struct maple_subtree_state *mast)
2498 {
2499 	struct ma_state r_tmp = *mast->orig_r;
2500 	struct ma_state l_tmp = *mast->orig_l;
2501 	struct maple_enode *ancestor = NULL;
2502 	unsigned char start, end;
2503 	unsigned char depth = 0;
2504 
2505 	r_tmp = *mast->orig_r;
2506 	l_tmp = *mast->orig_l;
2507 	do {
2508 		mas_ascend(mast->orig_r);
2509 		mas_ascend(mast->orig_l);
2510 		depth++;
2511 		if (!ancestor &&
2512 		    (mast->orig_r->node == mast->orig_l->node)) {
2513 			ancestor = mast->orig_r->node;
2514 			end = mast->orig_r->offset - 1;
2515 			start = mast->orig_l->offset + 1;
2516 		}
2517 
2518 		if (mast->orig_r->offset < mas_data_end(mast->orig_r)) {
2519 			if (!ancestor) {
2520 				ancestor = mast->orig_r->node;
2521 				start = 0;
2522 			}
2523 
2524 			mast->orig_r->offset++;
2525 			do {
2526 				mas_descend(mast->orig_r);
2527 				mast->orig_r->offset = 0;
2528 				depth--;
2529 			} while (depth);
2530 
2531 			mast_rebalance_next(mast);
2532 			do {
2533 				unsigned char l_off = 0;
2534 				struct maple_enode *child = r_tmp.node;
2535 
2536 				mas_ascend(&r_tmp);
2537 				if (ancestor == r_tmp.node)
2538 					l_off = start;
2539 
2540 				if (r_tmp.offset)
2541 					r_tmp.offset--;
2542 
2543 				if (l_off < r_tmp.offset)
2544 					mas_topiary_range(&r_tmp, mast->destroy,
2545 							  l_off, r_tmp.offset);
2546 
2547 				if (l_tmp.node != child)
2548 					mat_add(mast->free, child);
2549 
2550 			} while (r_tmp.node != ancestor);
2551 
2552 			*mast->orig_l = l_tmp;
2553 			return true;
2554 
2555 		} else if (mast->orig_l->offset != 0) {
2556 			if (!ancestor) {
2557 				ancestor = mast->orig_l->node;
2558 				end = mas_data_end(mast->orig_l);
2559 			}
2560 
2561 			mast->orig_l->offset--;
2562 			do {
2563 				mas_descend(mast->orig_l);
2564 				mast->orig_l->offset =
2565 					mas_data_end(mast->orig_l);
2566 				depth--;
2567 			} while (depth);
2568 
2569 			mast_rebalance_prev(mast);
2570 			do {
2571 				unsigned char r_off;
2572 				struct maple_enode *child = l_tmp.node;
2573 
2574 				mas_ascend(&l_tmp);
2575 				if (ancestor == l_tmp.node)
2576 					r_off = end;
2577 				else
2578 					r_off = mas_data_end(&l_tmp);
2579 
2580 				if (l_tmp.offset < r_off)
2581 					l_tmp.offset++;
2582 
2583 				if (l_tmp.offset < r_off)
2584 					mas_topiary_range(&l_tmp, mast->destroy,
2585 							  l_tmp.offset, r_off);
2586 
2587 				if (r_tmp.node != child)
2588 					mat_add(mast->free, child);
2589 
2590 			} while (l_tmp.node != ancestor);
2591 
2592 			*mast->orig_r = r_tmp;
2593 			return true;
2594 		}
2595 	} while (!mte_is_root(mast->orig_r->node));
2596 
2597 	*mast->orig_r = r_tmp;
2598 	*mast->orig_l = l_tmp;
2599 	return false;
2600 }
2601 
2602 /*
2603  * mast_ascend_free() - Add current original maple state nodes to the free list
2604  * and ascend.
2605  * @mast: the maple subtree state.
2606  *
2607  * Ascend the original left and right sides and add the previous nodes to the
2608  * free list.  Set the slots to point to the correct location in the new nodes.
2609  */
2610 static inline void
2611 mast_ascend_free(struct maple_subtree_state *mast)
2612 {
2613 	MA_WR_STATE(wr_mas, mast->orig_r,  NULL);
2614 	struct maple_enode *left = mast->orig_l->node;
2615 	struct maple_enode *right = mast->orig_r->node;
2616 
2617 	mas_ascend(mast->orig_l);
2618 	mas_ascend(mast->orig_r);
2619 	mat_add(mast->free, left);
2620 
2621 	if (left != right)
2622 		mat_add(mast->free, right);
2623 
2624 	mast->orig_r->offset = 0;
2625 	mast->orig_r->index = mast->r->max;
2626 	/* last should be larger than or equal to index */
2627 	if (mast->orig_r->last < mast->orig_r->index)
2628 		mast->orig_r->last = mast->orig_r->index;
2629 	/*
2630 	 * The node may not contain the value so set slot to ensure all
2631 	 * of the nodes contents are freed or destroyed.
2632 	 */
2633 	wr_mas.type = mte_node_type(mast->orig_r->node);
2634 	mas_wr_node_walk(&wr_mas);
2635 	/* Set up the left side of things */
2636 	mast->orig_l->offset = 0;
2637 	mast->orig_l->index = mast->l->min;
2638 	wr_mas.mas = mast->orig_l;
2639 	wr_mas.type = mte_node_type(mast->orig_l->node);
2640 	mas_wr_node_walk(&wr_mas);
2641 
2642 	mast->bn->type = wr_mas.type;
2643 }
2644 
2645 /*
2646  * mas_new_ma_node() - Create and return a new maple node.  Helper function.
2647  * @mas: the maple state with the allocations.
2648  * @b_node: the maple_big_node with the type encoding.
2649  *
2650  * Use the node type from the maple_big_node to allocate a new node from the
2651  * ma_state.  This function exists mainly for code readability.
2652  *
2653  * Return: A new maple encoded node
2654  */
2655 static inline struct maple_enode
2656 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node)
2657 {
2658 	return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type);
2659 }
2660 
2661 /*
2662  * mas_mab_to_node() - Set up right and middle nodes
2663  *
2664  * @mas: the maple state that contains the allocations.
2665  * @b_node: the node which contains the data.
2666  * @left: The pointer which will have the left node
2667  * @right: The pointer which may have the right node
2668  * @middle: the pointer which may have the middle node (rare)
2669  * @mid_split: the split location for the middle node
2670  *
2671  * Return: the split of left.
2672  */
2673 static inline unsigned char mas_mab_to_node(struct ma_state *mas,
2674 	struct maple_big_node *b_node, struct maple_enode **left,
2675 	struct maple_enode **right, struct maple_enode **middle,
2676 	unsigned char *mid_split, unsigned long min)
2677 {
2678 	unsigned char split = 0;
2679 	unsigned char slot_count = mt_slots[b_node->type];
2680 
2681 	*left = mas_new_ma_node(mas, b_node);
2682 	*right = NULL;
2683 	*middle = NULL;
2684 	*mid_split = 0;
2685 
2686 	if (b_node->b_end < slot_count) {
2687 		split = b_node->b_end;
2688 	} else {
2689 		split = mab_calc_split(mas, b_node, mid_split, min);
2690 		*right = mas_new_ma_node(mas, b_node);
2691 	}
2692 
2693 	if (*mid_split)
2694 		*middle = mas_new_ma_node(mas, b_node);
2695 
2696 	return split;
2697 
2698 }
2699 
2700 /*
2701  * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end
2702  * pointer.
2703  * @b_node - the big node to add the entry
2704  * @mas - the maple state to get the pivot (mas->max)
2705  * @entry - the entry to add, if NULL nothing happens.
2706  */
2707 static inline void mab_set_b_end(struct maple_big_node *b_node,
2708 				 struct ma_state *mas,
2709 				 void *entry)
2710 {
2711 	if (!entry)
2712 		return;
2713 
2714 	b_node->slot[b_node->b_end] = entry;
2715 	if (mt_is_alloc(mas->tree))
2716 		b_node->gap[b_node->b_end] = mas_max_gap(mas);
2717 	b_node->pivot[b_node->b_end++] = mas->max;
2718 }
2719 
2720 /*
2721  * mas_set_split_parent() - combine_then_separate helper function.  Sets the parent
2722  * of @mas->node to either @left or @right, depending on @slot and @split
2723  *
2724  * @mas - the maple state with the node that needs a parent
2725  * @left - possible parent 1
2726  * @right - possible parent 2
2727  * @slot - the slot the mas->node was placed
2728  * @split - the split location between @left and @right
2729  */
2730 static inline void mas_set_split_parent(struct ma_state *mas,
2731 					struct maple_enode *left,
2732 					struct maple_enode *right,
2733 					unsigned char *slot, unsigned char split)
2734 {
2735 	if (mas_is_none(mas))
2736 		return;
2737 
2738 	if ((*slot) <= split)
2739 		mte_set_parent(mas->node, left, *slot);
2740 	else if (right)
2741 		mte_set_parent(mas->node, right, (*slot) - split - 1);
2742 
2743 	(*slot)++;
2744 }
2745 
2746 /*
2747  * mte_mid_split_check() - Check if the next node passes the mid-split
2748  * @**l: Pointer to left encoded maple node.
2749  * @**m: Pointer to middle encoded maple node.
2750  * @**r: Pointer to right encoded maple node.
2751  * @slot: The offset
2752  * @*split: The split location.
2753  * @mid_split: The middle split.
2754  */
2755 static inline void mte_mid_split_check(struct maple_enode **l,
2756 				       struct maple_enode **r,
2757 				       struct maple_enode *right,
2758 				       unsigned char slot,
2759 				       unsigned char *split,
2760 				       unsigned char mid_split)
2761 {
2762 	if (*r == right)
2763 		return;
2764 
2765 	if (slot < mid_split)
2766 		return;
2767 
2768 	*l = *r;
2769 	*r = right;
2770 	*split = mid_split;
2771 }
2772 
2773 /*
2774  * mast_set_split_parents() - Helper function to set three nodes parents.  Slot
2775  * is taken from @mast->l.
2776  * @mast - the maple subtree state
2777  * @left - the left node
2778  * @right - the right node
2779  * @split - the split location.
2780  */
2781 static inline void mast_set_split_parents(struct maple_subtree_state *mast,
2782 					  struct maple_enode *left,
2783 					  struct maple_enode *middle,
2784 					  struct maple_enode *right,
2785 					  unsigned char split,
2786 					  unsigned char mid_split)
2787 {
2788 	unsigned char slot;
2789 	struct maple_enode *l = left;
2790 	struct maple_enode *r = right;
2791 
2792 	if (mas_is_none(mast->l))
2793 		return;
2794 
2795 	if (middle)
2796 		r = middle;
2797 
2798 	slot = mast->l->offset;
2799 
2800 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2801 	mas_set_split_parent(mast->l, l, r, &slot, split);
2802 
2803 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2804 	mas_set_split_parent(mast->m, l, r, &slot, split);
2805 
2806 	mte_mid_split_check(&l, &r, right, slot, &split, mid_split);
2807 	mas_set_split_parent(mast->r, l, r, &slot, split);
2808 }
2809 
2810 /*
2811  * mas_wmb_replace() - Write memory barrier and replace
2812  * @mas: The maple state
2813  * @free: the maple topiary list of nodes to free
2814  * @destroy: The maple topiary list of nodes to destroy (walk and free)
2815  *
2816  * Updates gap as necessary.
2817  */
2818 static inline void mas_wmb_replace(struct ma_state *mas,
2819 				   struct ma_topiary *free,
2820 				   struct ma_topiary *destroy)
2821 {
2822 	/* All nodes must see old data as dead prior to replacing that data */
2823 	smp_wmb(); /* Needed for RCU */
2824 
2825 	/* Insert the new data in the tree */
2826 	mas_replace(mas, true);
2827 
2828 	if (!mte_is_leaf(mas->node))
2829 		mas_descend_adopt(mas);
2830 
2831 	mas_mat_free(mas, free);
2832 
2833 	if (destroy)
2834 		mas_mat_destroy(mas, destroy);
2835 
2836 	if (mte_is_leaf(mas->node))
2837 		return;
2838 
2839 	mas_update_gap(mas);
2840 }
2841 
2842 /*
2843  * mast_new_root() - Set a new tree root during subtree creation
2844  * @mast: The maple subtree state
2845  * @mas: The maple state
2846  */
2847 static inline void mast_new_root(struct maple_subtree_state *mast,
2848 				 struct ma_state *mas)
2849 {
2850 	mas_mn(mast->l)->parent =
2851 		ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT));
2852 	if (!mte_dead_node(mast->orig_l->node) &&
2853 	    !mte_is_root(mast->orig_l->node)) {
2854 		do {
2855 			mast_ascend_free(mast);
2856 			mast_topiary(mast);
2857 		} while (!mte_is_root(mast->orig_l->node));
2858 	}
2859 	if ((mast->orig_l->node != mas->node) &&
2860 		   (mast->l->depth > mas_mt_height(mas))) {
2861 		mat_add(mast->free, mas->node);
2862 	}
2863 }
2864 
2865 /*
2866  * mast_cp_to_nodes() - Copy data out to nodes.
2867  * @mast: The maple subtree state
2868  * @left: The left encoded maple node
2869  * @middle: The middle encoded maple node
2870  * @right: The right encoded maple node
2871  * @split: The location to split between left and (middle ? middle : right)
2872  * @mid_split: The location to split between middle and right.
2873  */
2874 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
2875 	struct maple_enode *left, struct maple_enode *middle,
2876 	struct maple_enode *right, unsigned char split, unsigned char mid_split)
2877 {
2878 	bool new_lmax = true;
2879 
2880 	mast->l->node = mte_node_or_none(left);
2881 	mast->m->node = mte_node_or_none(middle);
2882 	mast->r->node = mte_node_or_none(right);
2883 
2884 	mast->l->min = mast->orig_l->min;
2885 	if (split == mast->bn->b_end) {
2886 		mast->l->max = mast->orig_r->max;
2887 		new_lmax = false;
2888 	}
2889 
2890 	mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax);
2891 
2892 	if (middle) {
2893 		mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true);
2894 		mast->m->min = mast->bn->pivot[split] + 1;
2895 		split = mid_split;
2896 	}
2897 
2898 	mast->r->max = mast->orig_r->max;
2899 	if (right) {
2900 		mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false);
2901 		mast->r->min = mast->bn->pivot[split] + 1;
2902 	}
2903 }
2904 
2905 /*
2906  * mast_combine_cp_left - Copy in the original left side of the tree into the
2907  * combined data set in the maple subtree state big node.
2908  * @mast: The maple subtree state
2909  */
2910 static inline void mast_combine_cp_left(struct maple_subtree_state *mast)
2911 {
2912 	unsigned char l_slot = mast->orig_l->offset;
2913 
2914 	if (!l_slot)
2915 		return;
2916 
2917 	mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0);
2918 }
2919 
2920 /*
2921  * mast_combine_cp_right: Copy in the original right side of the tree into the
2922  * combined data set in the maple subtree state big node.
2923  * @mast: The maple subtree state
2924  */
2925 static inline void mast_combine_cp_right(struct maple_subtree_state *mast)
2926 {
2927 	if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max)
2928 		return;
2929 
2930 	mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1,
2931 		   mt_slot_count(mast->orig_r->node), mast->bn,
2932 		   mast->bn->b_end);
2933 	mast->orig_r->last = mast->orig_r->max;
2934 }
2935 
2936 /*
2937  * mast_sufficient: Check if the maple subtree state has enough data in the big
2938  * node to create at least one sufficient node
2939  * @mast: the maple subtree state
2940  */
2941 static inline bool mast_sufficient(struct maple_subtree_state *mast)
2942 {
2943 	if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node))
2944 		return true;
2945 
2946 	return false;
2947 }
2948 
2949 /*
2950  * mast_overflow: Check if there is too much data in the subtree state for a
2951  * single node.
2952  * @mast: The maple subtree state
2953  */
2954 static inline bool mast_overflow(struct maple_subtree_state *mast)
2955 {
2956 	if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node))
2957 		return true;
2958 
2959 	return false;
2960 }
2961 
2962 static inline void *mtree_range_walk(struct ma_state *mas)
2963 {
2964 	unsigned long *pivots;
2965 	unsigned char offset;
2966 	struct maple_node *node;
2967 	struct maple_enode *next, *last;
2968 	enum maple_type type;
2969 	void __rcu **slots;
2970 	unsigned char end;
2971 	unsigned long max, min;
2972 	unsigned long prev_max, prev_min;
2973 
2974 	next = mas->node;
2975 	min = mas->min;
2976 	max = mas->max;
2977 	do {
2978 		offset = 0;
2979 		last = next;
2980 		node = mte_to_node(next);
2981 		type = mte_node_type(next);
2982 		pivots = ma_pivots(node, type);
2983 		end = ma_data_end(node, type, pivots, max);
2984 		if (unlikely(ma_dead_node(node)))
2985 			goto dead_node;
2986 
2987 		if (pivots[offset] >= mas->index) {
2988 			prev_max = max;
2989 			prev_min = min;
2990 			max = pivots[offset];
2991 			goto next;
2992 		}
2993 
2994 		do {
2995 			offset++;
2996 		} while ((offset < end) && (pivots[offset] < mas->index));
2997 
2998 		prev_min = min;
2999 		min = pivots[offset - 1] + 1;
3000 		prev_max = max;
3001 		if (likely(offset < end && pivots[offset]))
3002 			max = pivots[offset];
3003 
3004 next:
3005 		slots = ma_slots(node, type);
3006 		next = mt_slot(mas->tree, slots, offset);
3007 		if (unlikely(ma_dead_node(node)))
3008 			goto dead_node;
3009 	} while (!ma_is_leaf(type));
3010 
3011 	mas->offset = offset;
3012 	mas->index = min;
3013 	mas->last = max;
3014 	mas->min = prev_min;
3015 	mas->max = prev_max;
3016 	mas->node = last;
3017 	return (void *)next;
3018 
3019 dead_node:
3020 	mas_reset(mas);
3021 	return NULL;
3022 }
3023 
3024 /*
3025  * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers.
3026  * @mas: The starting maple state
3027  * @mast: The maple_subtree_state, keeps track of 4 maple states.
3028  * @count: The estimated count of iterations needed.
3029  *
3030  * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root
3031  * is hit.  First @b_node is split into two entries which are inserted into the
3032  * next iteration of the loop.  @b_node is returned populated with the final
3033  * iteration. @mas is used to obtain allocations.  orig_l_mas keeps track of the
3034  * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last
3035  * to account of what has been copied into the new sub-tree.  The update of
3036  * orig_l_mas->last is used in mas_consume to find the slots that will need to
3037  * be either freed or destroyed.  orig_l_mas->depth keeps track of the height of
3038  * the new sub-tree in case the sub-tree becomes the full tree.
3039  *
3040  * Return: the number of elements in b_node during the last loop.
3041  */
3042 static int mas_spanning_rebalance(struct ma_state *mas,
3043 		struct maple_subtree_state *mast, unsigned char count)
3044 {
3045 	unsigned char split, mid_split;
3046 	unsigned char slot = 0;
3047 	struct maple_enode *left = NULL, *middle = NULL, *right = NULL;
3048 
3049 	MA_STATE(l_mas, mas->tree, mas->index, mas->index);
3050 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3051 	MA_STATE(m_mas, mas->tree, mas->index, mas->index);
3052 	MA_TOPIARY(free, mas->tree);
3053 	MA_TOPIARY(destroy, mas->tree);
3054 
3055 	/*
3056 	 * The tree needs to be rebalanced and leaves need to be kept at the same level.
3057 	 * Rebalancing is done by use of the ``struct maple_topiary``.
3058 	 */
3059 	mast->l = &l_mas;
3060 	mast->m = &m_mas;
3061 	mast->r = &r_mas;
3062 	mast->free = &free;
3063 	mast->destroy = &destroy;
3064 	l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
3065 
3066 	/* Check if this is not root and has sufficient data.  */
3067 	if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
3068 	    unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type]))
3069 		mast_spanning_rebalance(mast);
3070 
3071 	mast->orig_l->depth = 0;
3072 
3073 	/*
3074 	 * Each level of the tree is examined and balanced, pushing data to the left or
3075 	 * right, or rebalancing against left or right nodes is employed to avoid
3076 	 * rippling up the tree to limit the amount of churn.  Once a new sub-section of
3077 	 * the tree is created, there may be a mix of new and old nodes.  The old nodes
3078 	 * will have the incorrect parent pointers and currently be in two trees: the
3079 	 * original tree and the partially new tree.  To remedy the parent pointers in
3080 	 * the old tree, the new data is swapped into the active tree and a walk down
3081 	 * the tree is performed and the parent pointers are updated.
3082 	 * See mas_descend_adopt() for more information..
3083 	 */
3084 	while (count--) {
3085 		mast->bn->b_end--;
3086 		mast->bn->type = mte_node_type(mast->orig_l->node);
3087 		split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle,
3088 					&mid_split, mast->orig_l->min);
3089 		mast_set_split_parents(mast, left, middle, right, split,
3090 				       mid_split);
3091 		mast_cp_to_nodes(mast, left, middle, right, split, mid_split);
3092 
3093 		/*
3094 		 * Copy data from next level in the tree to mast->bn from next
3095 		 * iteration
3096 		 */
3097 		memset(mast->bn, 0, sizeof(struct maple_big_node));
3098 		mast->bn->type = mte_node_type(left);
3099 		mast->orig_l->depth++;
3100 
3101 		/* Root already stored in l->node. */
3102 		if (mas_is_root_limits(mast->l))
3103 			goto new_root;
3104 
3105 		mast_ascend_free(mast);
3106 		mast_combine_cp_left(mast);
3107 		l_mas.offset = mast->bn->b_end;
3108 		mab_set_b_end(mast->bn, &l_mas, left);
3109 		mab_set_b_end(mast->bn, &m_mas, middle);
3110 		mab_set_b_end(mast->bn, &r_mas, right);
3111 
3112 		/* Copy anything necessary out of the right node. */
3113 		mast_combine_cp_right(mast);
3114 		mast_topiary(mast);
3115 		mast->orig_l->last = mast->orig_l->max;
3116 
3117 		if (mast_sufficient(mast))
3118 			continue;
3119 
3120 		if (mast_overflow(mast))
3121 			continue;
3122 
3123 		/* May be a new root stored in mast->bn */
3124 		if (mas_is_root_limits(mast->orig_l))
3125 			break;
3126 
3127 		mast_spanning_rebalance(mast);
3128 
3129 		/* rebalancing from other nodes may require another loop. */
3130 		if (!count)
3131 			count++;
3132 	}
3133 
3134 	l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)),
3135 				mte_node_type(mast->orig_l->node));
3136 	mast->orig_l->depth++;
3137 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true);
3138 	mte_set_parent(left, l_mas.node, slot);
3139 	if (middle)
3140 		mte_set_parent(middle, l_mas.node, ++slot);
3141 
3142 	if (right)
3143 		mte_set_parent(right, l_mas.node, ++slot);
3144 
3145 	if (mas_is_root_limits(mast->l)) {
3146 new_root:
3147 		mast_new_root(mast, mas);
3148 	} else {
3149 		mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent;
3150 	}
3151 
3152 	if (!mte_dead_node(mast->orig_l->node))
3153 		mat_add(&free, mast->orig_l->node);
3154 
3155 	mas->depth = mast->orig_l->depth;
3156 	*mast->orig_l = l_mas;
3157 	mte_set_node_dead(mas->node);
3158 
3159 	/* Set up mas for insertion. */
3160 	mast->orig_l->depth = mas->depth;
3161 	mast->orig_l->alloc = mas->alloc;
3162 	*mas = *mast->orig_l;
3163 	mas_wmb_replace(mas, &free, &destroy);
3164 	mtree_range_walk(mas);
3165 	return mast->bn->b_end;
3166 }
3167 
3168 /*
3169  * mas_rebalance() - Rebalance a given node.
3170  * @mas: The maple state
3171  * @b_node: The big maple node.
3172  *
3173  * Rebalance two nodes into a single node or two new nodes that are sufficient.
3174  * Continue upwards until tree is sufficient.
3175  *
3176  * Return: the number of elements in b_node during the last loop.
3177  */
3178 static inline int mas_rebalance(struct ma_state *mas,
3179 				struct maple_big_node *b_node)
3180 {
3181 	char empty_count = mas_mt_height(mas);
3182 	struct maple_subtree_state mast;
3183 	unsigned char shift, b_end = ++b_node->b_end;
3184 
3185 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3186 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3187 
3188 	trace_ma_op(__func__, mas);
3189 
3190 	/*
3191 	 * Rebalancing occurs if a node is insufficient.  Data is rebalanced
3192 	 * against the node to the right if it exists, otherwise the node to the
3193 	 * left of this node is rebalanced against this node.  If rebalancing
3194 	 * causes just one node to be produced instead of two, then the parent
3195 	 * is also examined and rebalanced if it is insufficient.  Every level
3196 	 * tries to combine the data in the same way.  If one node contains the
3197 	 * entire range of the tree, then that node is used as a new root node.
3198 	 */
3199 	mas_node_count(mas, 1 + empty_count * 3);
3200 	if (mas_is_err(mas))
3201 		return 0;
3202 
3203 	mast.orig_l = &l_mas;
3204 	mast.orig_r = &r_mas;
3205 	mast.bn = b_node;
3206 	mast.bn->type = mte_node_type(mas->node);
3207 
3208 	l_mas = r_mas = *mas;
3209 
3210 	if (mas_next_sibling(&r_mas)) {
3211 		mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end);
3212 		r_mas.last = r_mas.index = r_mas.max;
3213 	} else {
3214 		mas_prev_sibling(&l_mas);
3215 		shift = mas_data_end(&l_mas) + 1;
3216 		mab_shift_right(b_node, shift);
3217 		mas->offset += shift;
3218 		mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0);
3219 		b_node->b_end = shift + b_end;
3220 		l_mas.index = l_mas.last = l_mas.min;
3221 	}
3222 
3223 	return mas_spanning_rebalance(mas, &mast, empty_count);
3224 }
3225 
3226 /*
3227  * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple
3228  * state.
3229  * @mas: The maple state
3230  * @end: The end of the left-most node.
3231  *
3232  * During a mass-insert event (such as forking), it may be necessary to
3233  * rebalance the left-most node when it is not sufficient.
3234  */
3235 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end)
3236 {
3237 	enum maple_type mt = mte_node_type(mas->node);
3238 	struct maple_node reuse, *newnode, *parent, *new_left, *left, *node;
3239 	struct maple_enode *eparent;
3240 	unsigned char offset, tmp, split = mt_slots[mt] / 2;
3241 	void __rcu **l_slots, **slots;
3242 	unsigned long *l_pivs, *pivs, gap;
3243 	bool in_rcu = mt_in_rcu(mas->tree);
3244 
3245 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3246 
3247 	l_mas = *mas;
3248 	mas_prev_sibling(&l_mas);
3249 
3250 	/* set up node. */
3251 	if (in_rcu) {
3252 		/* Allocate for both left and right as well as parent. */
3253 		mas_node_count(mas, 3);
3254 		if (mas_is_err(mas))
3255 			return;
3256 
3257 		newnode = mas_pop_node(mas);
3258 	} else {
3259 		newnode = &reuse;
3260 	}
3261 
3262 	node = mas_mn(mas);
3263 	newnode->parent = node->parent;
3264 	slots = ma_slots(newnode, mt);
3265 	pivs = ma_pivots(newnode, mt);
3266 	left = mas_mn(&l_mas);
3267 	l_slots = ma_slots(left, mt);
3268 	l_pivs = ma_pivots(left, mt);
3269 	if (!l_slots[split])
3270 		split++;
3271 	tmp = mas_data_end(&l_mas) - split;
3272 
3273 	memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp);
3274 	memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp);
3275 	pivs[tmp] = l_mas.max;
3276 	memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end);
3277 	memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end);
3278 
3279 	l_mas.max = l_pivs[split];
3280 	mas->min = l_mas.max + 1;
3281 	eparent = mt_mk_node(mte_parent(l_mas.node),
3282 			     mas_parent_enum(&l_mas, l_mas.node));
3283 	tmp += end;
3284 	if (!in_rcu) {
3285 		unsigned char max_p = mt_pivots[mt];
3286 		unsigned char max_s = mt_slots[mt];
3287 
3288 		if (tmp < max_p)
3289 			memset(pivs + tmp, 0,
3290 			       sizeof(unsigned long *) * (max_p - tmp));
3291 
3292 		if (tmp < mt_slots[mt])
3293 			memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3294 
3295 		memcpy(node, newnode, sizeof(struct maple_node));
3296 		ma_set_meta(node, mt, 0, tmp - 1);
3297 		mte_set_pivot(eparent, mte_parent_slot(l_mas.node),
3298 			      l_pivs[split]);
3299 
3300 		/* Remove data from l_pivs. */
3301 		tmp = split + 1;
3302 		memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp));
3303 		memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp));
3304 		ma_set_meta(left, mt, 0, split);
3305 
3306 		goto done;
3307 	}
3308 
3309 	/* RCU requires replacing both l_mas, mas, and parent. */
3310 	mas->node = mt_mk_node(newnode, mt);
3311 	ma_set_meta(newnode, mt, 0, tmp);
3312 
3313 	new_left = mas_pop_node(mas);
3314 	new_left->parent = left->parent;
3315 	mt = mte_node_type(l_mas.node);
3316 	slots = ma_slots(new_left, mt);
3317 	pivs = ma_pivots(new_left, mt);
3318 	memcpy(slots, l_slots, sizeof(void *) * split);
3319 	memcpy(pivs, l_pivs, sizeof(unsigned long) * split);
3320 	ma_set_meta(new_left, mt, 0, split);
3321 	l_mas.node = mt_mk_node(new_left, mt);
3322 
3323 	/* replace parent. */
3324 	offset = mte_parent_slot(mas->node);
3325 	mt = mas_parent_enum(&l_mas, l_mas.node);
3326 	parent = mas_pop_node(mas);
3327 	slots = ma_slots(parent, mt);
3328 	pivs = ma_pivots(parent, mt);
3329 	memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node));
3330 	rcu_assign_pointer(slots[offset], mas->node);
3331 	rcu_assign_pointer(slots[offset - 1], l_mas.node);
3332 	pivs[offset - 1] = l_mas.max;
3333 	eparent = mt_mk_node(parent, mt);
3334 done:
3335 	gap = mas_leaf_max_gap(mas);
3336 	mte_set_gap(eparent, mte_parent_slot(mas->node), gap);
3337 	gap = mas_leaf_max_gap(&l_mas);
3338 	mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap);
3339 	mas_ascend(mas);
3340 
3341 	if (in_rcu)
3342 		mas_replace(mas, false);
3343 
3344 	mas_update_gap(mas);
3345 }
3346 
3347 /*
3348  * mas_split_final_node() - Split the final node in a subtree operation.
3349  * @mast: the maple subtree state
3350  * @mas: The maple state
3351  * @height: The height of the tree in case it's a new root.
3352  */
3353 static inline bool mas_split_final_node(struct maple_subtree_state *mast,
3354 					struct ma_state *mas, int height)
3355 {
3356 	struct maple_enode *ancestor;
3357 
3358 	if (mte_is_root(mas->node)) {
3359 		if (mt_is_alloc(mas->tree))
3360 			mast->bn->type = maple_arange_64;
3361 		else
3362 			mast->bn->type = maple_range_64;
3363 		mas->depth = height;
3364 	}
3365 	/*
3366 	 * Only a single node is used here, could be root.
3367 	 * The Big_node data should just fit in a single node.
3368 	 */
3369 	ancestor = mas_new_ma_node(mas, mast->bn);
3370 	mte_set_parent(mast->l->node, ancestor, mast->l->offset);
3371 	mte_set_parent(mast->r->node, ancestor, mast->r->offset);
3372 	mte_to_node(ancestor)->parent = mas_mn(mas)->parent;
3373 
3374 	mast->l->node = ancestor;
3375 	mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
3376 	mas->offset = mast->bn->b_end - 1;
3377 	return true;
3378 }
3379 
3380 /*
3381  * mast_fill_bnode() - Copy data into the big node in the subtree state
3382  * @mast: The maple subtree state
3383  * @mas: the maple state
3384  * @skip: The number of entries to skip for new nodes insertion.
3385  */
3386 static inline void mast_fill_bnode(struct maple_subtree_state *mast,
3387 					 struct ma_state *mas,
3388 					 unsigned char skip)
3389 {
3390 	bool cp = true;
3391 	struct maple_enode *old = mas->node;
3392 	unsigned char split;
3393 
3394 	memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap));
3395 	memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot));
3396 	memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot));
3397 	mast->bn->b_end = 0;
3398 
3399 	if (mte_is_root(mas->node)) {
3400 		cp = false;
3401 	} else {
3402 		mas_ascend(mas);
3403 		mat_add(mast->free, old);
3404 		mas->offset = mte_parent_slot(mas->node);
3405 	}
3406 
3407 	if (cp && mast->l->offset)
3408 		mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0);
3409 
3410 	split = mast->bn->b_end;
3411 	mab_set_b_end(mast->bn, mast->l, mast->l->node);
3412 	mast->r->offset = mast->bn->b_end;
3413 	mab_set_b_end(mast->bn, mast->r, mast->r->node);
3414 	if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max)
3415 		cp = false;
3416 
3417 	if (cp)
3418 		mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1,
3419 			   mast->bn, mast->bn->b_end);
3420 
3421 	mast->bn->b_end--;
3422 	mast->bn->type = mte_node_type(mas->node);
3423 }
3424 
3425 /*
3426  * mast_split_data() - Split the data in the subtree state big node into regular
3427  * nodes.
3428  * @mast: The maple subtree state
3429  * @mas: The maple state
3430  * @split: The location to split the big node
3431  */
3432 static inline void mast_split_data(struct maple_subtree_state *mast,
3433 	   struct ma_state *mas, unsigned char split)
3434 {
3435 	unsigned char p_slot;
3436 
3437 	mab_mas_cp(mast->bn, 0, split, mast->l, true);
3438 	mte_set_pivot(mast->r->node, 0, mast->r->max);
3439 	mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false);
3440 	mast->l->offset = mte_parent_slot(mas->node);
3441 	mast->l->max = mast->bn->pivot[split];
3442 	mast->r->min = mast->l->max + 1;
3443 	if (mte_is_leaf(mas->node))
3444 		return;
3445 
3446 	p_slot = mast->orig_l->offset;
3447 	mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node,
3448 			     &p_slot, split);
3449 	mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node,
3450 			     &p_slot, split);
3451 }
3452 
3453 /*
3454  * mas_push_data() - Instead of splitting a node, it is beneficial to push the
3455  * data to the right or left node if there is room.
3456  * @mas: The maple state
3457  * @height: The current height of the maple state
3458  * @mast: The maple subtree state
3459  * @left: Push left or not.
3460  *
3461  * Keeping the height of the tree low means faster lookups.
3462  *
3463  * Return: True if pushed, false otherwise.
3464  */
3465 static inline bool mas_push_data(struct ma_state *mas, int height,
3466 				 struct maple_subtree_state *mast, bool left)
3467 {
3468 	unsigned char slot_total = mast->bn->b_end;
3469 	unsigned char end, space, split;
3470 
3471 	MA_STATE(tmp_mas, mas->tree, mas->index, mas->last);
3472 	tmp_mas = *mas;
3473 	tmp_mas.depth = mast->l->depth;
3474 
3475 	if (left && !mas_prev_sibling(&tmp_mas))
3476 		return false;
3477 	else if (!left && !mas_next_sibling(&tmp_mas))
3478 		return false;
3479 
3480 	end = mas_data_end(&tmp_mas);
3481 	slot_total += end;
3482 	space = 2 * mt_slot_count(mas->node) - 2;
3483 	/* -2 instead of -1 to ensure there isn't a triple split */
3484 	if (ma_is_leaf(mast->bn->type))
3485 		space--;
3486 
3487 	if (mas->max == ULONG_MAX)
3488 		space--;
3489 
3490 	if (slot_total >= space)
3491 		return false;
3492 
3493 	/* Get the data; Fill mast->bn */
3494 	mast->bn->b_end++;
3495 	if (left) {
3496 		mab_shift_right(mast->bn, end + 1);
3497 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0);
3498 		mast->bn->b_end = slot_total + 1;
3499 	} else {
3500 		mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end);
3501 	}
3502 
3503 	/* Configure mast for splitting of mast->bn */
3504 	split = mt_slots[mast->bn->type] - 2;
3505 	if (left) {
3506 		/*  Switch mas to prev node  */
3507 		mat_add(mast->free, mas->node);
3508 		*mas = tmp_mas;
3509 		/* Start using mast->l for the left side. */
3510 		tmp_mas.node = mast->l->node;
3511 		*mast->l = tmp_mas;
3512 	} else {
3513 		mat_add(mast->free, tmp_mas.node);
3514 		tmp_mas.node = mast->r->node;
3515 		*mast->r = tmp_mas;
3516 		split = slot_total - split;
3517 	}
3518 	split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]);
3519 	/* Update parent slot for split calculation. */
3520 	if (left)
3521 		mast->orig_l->offset += end + 1;
3522 
3523 	mast_split_data(mast, mas, split);
3524 	mast_fill_bnode(mast, mas, 2);
3525 	mas_split_final_node(mast, mas, height + 1);
3526 	return true;
3527 }
3528 
3529 /*
3530  * mas_split() - Split data that is too big for one node into two.
3531  * @mas: The maple state
3532  * @b_node: The maple big node
3533  * Return: 1 on success, 0 on failure.
3534  */
3535 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
3536 {
3537 	struct maple_subtree_state mast;
3538 	int height = 0;
3539 	unsigned char mid_split, split = 0;
3540 
3541 	/*
3542 	 * Splitting is handled differently from any other B-tree; the Maple
3543 	 * Tree splits upwards.  Splitting up means that the split operation
3544 	 * occurs when the walk of the tree hits the leaves and not on the way
3545 	 * down.  The reason for splitting up is that it is impossible to know
3546 	 * how much space will be needed until the leaf is (or leaves are)
3547 	 * reached.  Since overwriting data is allowed and a range could
3548 	 * overwrite more than one range or result in changing one entry into 3
3549 	 * entries, it is impossible to know if a split is required until the
3550 	 * data is examined.
3551 	 *
3552 	 * Splitting is a balancing act between keeping allocations to a minimum
3553 	 * and avoiding a 'jitter' event where a tree is expanded to make room
3554 	 * for an entry followed by a contraction when the entry is removed.  To
3555 	 * accomplish the balance, there are empty slots remaining in both left
3556 	 * and right nodes after a split.
3557 	 */
3558 	MA_STATE(l_mas, mas->tree, mas->index, mas->last);
3559 	MA_STATE(r_mas, mas->tree, mas->index, mas->last);
3560 	MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last);
3561 	MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last);
3562 	MA_TOPIARY(mat, mas->tree);
3563 
3564 	trace_ma_op(__func__, mas);
3565 	mas->depth = mas_mt_height(mas);
3566 	/* Allocation failures will happen early. */
3567 	mas_node_count(mas, 1 + mas->depth * 2);
3568 	if (mas_is_err(mas))
3569 		return 0;
3570 
3571 	mast.l = &l_mas;
3572 	mast.r = &r_mas;
3573 	mast.orig_l = &prev_l_mas;
3574 	mast.orig_r = &prev_r_mas;
3575 	mast.free = &mat;
3576 	mast.bn = b_node;
3577 
3578 	while (height++ <= mas->depth) {
3579 		if (mt_slots[b_node->type] > b_node->b_end) {
3580 			mas_split_final_node(&mast, mas, height);
3581 			break;
3582 		}
3583 
3584 		l_mas = r_mas = *mas;
3585 		l_mas.node = mas_new_ma_node(mas, b_node);
3586 		r_mas.node = mas_new_ma_node(mas, b_node);
3587 		/*
3588 		 * Another way that 'jitter' is avoided is to terminate a split up early if the
3589 		 * left or right node has space to spare.  This is referred to as "pushing left"
3590 		 * or "pushing right" and is similar to the B* tree, except the nodes left or
3591 		 * right can rarely be reused due to RCU, but the ripple upwards is halted which
3592 		 * is a significant savings.
3593 		 */
3594 		/* Try to push left. */
3595 		if (mas_push_data(mas, height, &mast, true))
3596 			break;
3597 
3598 		/* Try to push right. */
3599 		if (mas_push_data(mas, height, &mast, false))
3600 			break;
3601 
3602 		split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min);
3603 		mast_split_data(&mast, mas, split);
3604 		/*
3605 		 * Usually correct, mab_mas_cp in the above call overwrites
3606 		 * r->max.
3607 		 */
3608 		mast.r->max = mas->max;
3609 		mast_fill_bnode(&mast, mas, 1);
3610 		prev_l_mas = *mast.l;
3611 		prev_r_mas = *mast.r;
3612 	}
3613 
3614 	/* Set the original node as dead */
3615 	mat_add(mast.free, mas->node);
3616 	mas->node = l_mas.node;
3617 	mas_wmb_replace(mas, mast.free, NULL);
3618 	mtree_range_walk(mas);
3619 	return 1;
3620 }
3621 
3622 /*
3623  * mas_reuse_node() - Reuse the node to store the data.
3624  * @wr_mas: The maple write state
3625  * @bn: The maple big node
3626  * @end: The end of the data.
3627  *
3628  * Will always return false in RCU mode.
3629  *
3630  * Return: True if node was reused, false otherwise.
3631  */
3632 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas,
3633 			  struct maple_big_node *bn, unsigned char end)
3634 {
3635 	/* Need to be rcu safe. */
3636 	if (mt_in_rcu(wr_mas->mas->tree))
3637 		return false;
3638 
3639 	if (end > bn->b_end) {
3640 		int clear = mt_slots[wr_mas->type] - bn->b_end;
3641 
3642 		memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--);
3643 		memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear);
3644 	}
3645 	mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false);
3646 	return true;
3647 }
3648 
3649 /*
3650  * mas_commit_b_node() - Commit the big node into the tree.
3651  * @wr_mas: The maple write state
3652  * @b_node: The maple big node
3653  * @end: The end of the data.
3654  */
3655 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
3656 			    struct maple_big_node *b_node, unsigned char end)
3657 {
3658 	struct maple_node *node;
3659 	unsigned char b_end = b_node->b_end;
3660 	enum maple_type b_type = b_node->type;
3661 
3662 	if ((b_end < mt_min_slots[b_type]) &&
3663 	    (!mte_is_root(wr_mas->mas->node)) &&
3664 	    (mas_mt_height(wr_mas->mas) > 1))
3665 		return mas_rebalance(wr_mas->mas, b_node);
3666 
3667 	if (b_end >= mt_slots[b_type])
3668 		return mas_split(wr_mas->mas, b_node);
3669 
3670 	if (mas_reuse_node(wr_mas, b_node, end))
3671 		goto reuse_node;
3672 
3673 	mas_node_count(wr_mas->mas, 1);
3674 	if (mas_is_err(wr_mas->mas))
3675 		return 0;
3676 
3677 	node = mas_pop_node(wr_mas->mas);
3678 	node->parent = mas_mn(wr_mas->mas)->parent;
3679 	wr_mas->mas->node = mt_mk_node(node, b_type);
3680 	mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false);
3681 	mas_replace(wr_mas->mas, false);
3682 reuse_node:
3683 	mas_update_gap(wr_mas->mas);
3684 	return 1;
3685 }
3686 
3687 /*
3688  * mas_root_expand() - Expand a root to a node
3689  * @mas: The maple state
3690  * @entry: The entry to store into the tree
3691  */
3692 static inline int mas_root_expand(struct ma_state *mas, void *entry)
3693 {
3694 	void *contents = mas_root_locked(mas);
3695 	enum maple_type type = maple_leaf_64;
3696 	struct maple_node *node;
3697 	void __rcu **slots;
3698 	unsigned long *pivots;
3699 	int slot = 0;
3700 
3701 	mas_node_count(mas, 1);
3702 	if (unlikely(mas_is_err(mas)))
3703 		return 0;
3704 
3705 	node = mas_pop_node(mas);
3706 	pivots = ma_pivots(node, type);
3707 	slots = ma_slots(node, type);
3708 	node->parent = ma_parent_ptr(
3709 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3710 	mas->node = mt_mk_node(node, type);
3711 
3712 	if (mas->index) {
3713 		if (contents) {
3714 			rcu_assign_pointer(slots[slot], contents);
3715 			if (likely(mas->index > 1))
3716 				slot++;
3717 		}
3718 		pivots[slot++] = mas->index - 1;
3719 	}
3720 
3721 	rcu_assign_pointer(slots[slot], entry);
3722 	mas->offset = slot;
3723 	pivots[slot] = mas->last;
3724 	if (mas->last != ULONG_MAX)
3725 		slot++;
3726 	mas->depth = 1;
3727 	mas_set_height(mas);
3728 	ma_set_meta(node, maple_leaf_64, 0, slot);
3729 	/* swap the new root into the tree */
3730 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
3731 	return slot;
3732 }
3733 
3734 static inline void mas_store_root(struct ma_state *mas, void *entry)
3735 {
3736 	if (likely((mas->last != 0) || (mas->index != 0)))
3737 		mas_root_expand(mas, entry);
3738 	else if (((unsigned long) (entry) & 3) == 2)
3739 		mas_root_expand(mas, entry);
3740 	else {
3741 		rcu_assign_pointer(mas->tree->ma_root, entry);
3742 		mas->node = MAS_START;
3743 	}
3744 }
3745 
3746 /*
3747  * mas_is_span_wr() - Check if the write needs to be treated as a write that
3748  * spans the node.
3749  * @mas: The maple state
3750  * @piv: The pivot value being written
3751  * @type: The maple node type
3752  * @entry: The data to write
3753  *
3754  * Spanning writes are writes that start in one node and end in another OR if
3755  * the write of a %NULL will cause the node to end with a %NULL.
3756  *
3757  * Return: True if this is a spanning write, false otherwise.
3758  */
3759 static bool mas_is_span_wr(struct ma_wr_state *wr_mas)
3760 {
3761 	unsigned long max;
3762 	unsigned long last = wr_mas->mas->last;
3763 	unsigned long piv = wr_mas->r_max;
3764 	enum maple_type type = wr_mas->type;
3765 	void *entry = wr_mas->entry;
3766 
3767 	/* Contained in this pivot */
3768 	if (piv > last)
3769 		return false;
3770 
3771 	max = wr_mas->mas->max;
3772 	if (unlikely(ma_is_leaf(type))) {
3773 		/* Fits in the node, but may span slots. */
3774 		if (last < max)
3775 			return false;
3776 
3777 		/* Writes to the end of the node but not null. */
3778 		if ((last == max) && entry)
3779 			return false;
3780 
3781 		/*
3782 		 * Writing ULONG_MAX is not a spanning write regardless of the
3783 		 * value being written as long as the range fits in the node.
3784 		 */
3785 		if ((last == ULONG_MAX) && (last == max))
3786 			return false;
3787 	} else if (piv == last) {
3788 		if (entry)
3789 			return false;
3790 
3791 		/* Detect spanning store wr walk */
3792 		if (last == ULONG_MAX)
3793 			return false;
3794 	}
3795 
3796 	trace_ma_write(__func__, wr_mas->mas, piv, entry);
3797 
3798 	return true;
3799 }
3800 
3801 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas)
3802 {
3803 	wr_mas->type = mte_node_type(wr_mas->mas->node);
3804 	mas_wr_node_walk(wr_mas);
3805 	wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type);
3806 }
3807 
3808 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas)
3809 {
3810 	wr_mas->mas->max = wr_mas->r_max;
3811 	wr_mas->mas->min = wr_mas->r_min;
3812 	wr_mas->mas->node = wr_mas->content;
3813 	wr_mas->mas->offset = 0;
3814 	wr_mas->mas->depth++;
3815 }
3816 /*
3817  * mas_wr_walk() - Walk the tree for a write.
3818  * @wr_mas: The maple write state
3819  *
3820  * Uses mas_slot_locked() and does not need to worry about dead nodes.
3821  *
3822  * Return: True if it's contained in a node, false on spanning write.
3823  */
3824 static bool mas_wr_walk(struct ma_wr_state *wr_mas)
3825 {
3826 	struct ma_state *mas = wr_mas->mas;
3827 
3828 	while (true) {
3829 		mas_wr_walk_descend(wr_mas);
3830 		if (unlikely(mas_is_span_wr(wr_mas)))
3831 			return false;
3832 
3833 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3834 						  mas->offset);
3835 		if (ma_is_leaf(wr_mas->type))
3836 			return true;
3837 
3838 		mas_wr_walk_traverse(wr_mas);
3839 	}
3840 
3841 	return true;
3842 }
3843 
3844 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
3845 {
3846 	struct ma_state *mas = wr_mas->mas;
3847 
3848 	while (true) {
3849 		mas_wr_walk_descend(wr_mas);
3850 		wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
3851 						  mas->offset);
3852 		if (ma_is_leaf(wr_mas->type))
3853 			return true;
3854 		mas_wr_walk_traverse(wr_mas);
3855 
3856 	}
3857 	return true;
3858 }
3859 /*
3860  * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
3861  * @l_wr_mas: The left maple write state
3862  * @r_wr_mas: The right maple write state
3863  */
3864 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas,
3865 					    struct ma_wr_state *r_wr_mas)
3866 {
3867 	struct ma_state *r_mas = r_wr_mas->mas;
3868 	struct ma_state *l_mas = l_wr_mas->mas;
3869 	unsigned char l_slot;
3870 
3871 	l_slot = l_mas->offset;
3872 	if (!l_wr_mas->content)
3873 		l_mas->index = l_wr_mas->r_min;
3874 
3875 	if ((l_mas->index == l_wr_mas->r_min) &&
3876 		 (l_slot &&
3877 		  !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) {
3878 		if (l_slot > 1)
3879 			l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1;
3880 		else
3881 			l_mas->index = l_mas->min;
3882 
3883 		l_mas->offset = l_slot - 1;
3884 	}
3885 
3886 	if (!r_wr_mas->content) {
3887 		if (r_mas->last < r_wr_mas->r_max)
3888 			r_mas->last = r_wr_mas->r_max;
3889 		r_mas->offset++;
3890 	} else if ((r_mas->last == r_wr_mas->r_max) &&
3891 	    (r_mas->last < r_mas->max) &&
3892 	    !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) {
3893 		r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots,
3894 					     r_wr_mas->type, r_mas->offset + 1);
3895 		r_mas->offset++;
3896 	}
3897 }
3898 
3899 static inline void *mas_state_walk(struct ma_state *mas)
3900 {
3901 	void *entry;
3902 
3903 	entry = mas_start(mas);
3904 	if (mas_is_none(mas))
3905 		return NULL;
3906 
3907 	if (mas_is_ptr(mas))
3908 		return entry;
3909 
3910 	return mtree_range_walk(mas);
3911 }
3912 
3913 /*
3914  * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up
3915  * to date.
3916  *
3917  * @mas: The maple state.
3918  *
3919  * Note: Leaves mas in undesirable state.
3920  * Return: The entry for @mas->index or %NULL on dead node.
3921  */
3922 static inline void *mtree_lookup_walk(struct ma_state *mas)
3923 {
3924 	unsigned long *pivots;
3925 	unsigned char offset;
3926 	struct maple_node *node;
3927 	struct maple_enode *next;
3928 	enum maple_type type;
3929 	void __rcu **slots;
3930 	unsigned char end;
3931 	unsigned long max;
3932 
3933 	next = mas->node;
3934 	max = ULONG_MAX;
3935 	do {
3936 		offset = 0;
3937 		node = mte_to_node(next);
3938 		type = mte_node_type(next);
3939 		pivots = ma_pivots(node, type);
3940 		end = ma_data_end(node, type, pivots, max);
3941 		if (unlikely(ma_dead_node(node)))
3942 			goto dead_node;
3943 		do {
3944 			if (pivots[offset] >= mas->index) {
3945 				max = pivots[offset];
3946 				break;
3947 			}
3948 		} while (++offset < end);
3949 
3950 		slots = ma_slots(node, type);
3951 		next = mt_slot(mas->tree, slots, offset);
3952 		if (unlikely(ma_dead_node(node)))
3953 			goto dead_node;
3954 	} while (!ma_is_leaf(type));
3955 
3956 	return (void *)next;
3957 
3958 dead_node:
3959 	mas_reset(mas);
3960 	return NULL;
3961 }
3962 
3963 /*
3964  * mas_new_root() - Create a new root node that only contains the entry passed
3965  * in.
3966  * @mas: The maple state
3967  * @entry: The entry to store.
3968  *
3969  * Only valid when the index == 0 and the last == ULONG_MAX
3970  *
3971  * Return 0 on error, 1 on success.
3972  */
3973 static inline int mas_new_root(struct ma_state *mas, void *entry)
3974 {
3975 	struct maple_enode *root = mas_root_locked(mas);
3976 	enum maple_type type = maple_leaf_64;
3977 	struct maple_node *node;
3978 	void __rcu **slots;
3979 	unsigned long *pivots;
3980 
3981 	if (!entry && !mas->index && mas->last == ULONG_MAX) {
3982 		mas->depth = 0;
3983 		mas_set_height(mas);
3984 		rcu_assign_pointer(mas->tree->ma_root, entry);
3985 		mas->node = MAS_START;
3986 		goto done;
3987 	}
3988 
3989 	mas_node_count(mas, 1);
3990 	if (mas_is_err(mas))
3991 		return 0;
3992 
3993 	node = mas_pop_node(mas);
3994 	pivots = ma_pivots(node, type);
3995 	slots = ma_slots(node, type);
3996 	node->parent = ma_parent_ptr(
3997 		      ((unsigned long)mas->tree | MA_ROOT_PARENT));
3998 	mas->node = mt_mk_node(node, type);
3999 	rcu_assign_pointer(slots[0], entry);
4000 	pivots[0] = mas->last;
4001 	mas->depth = 1;
4002 	mas_set_height(mas);
4003 	rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node));
4004 
4005 done:
4006 	if (xa_is_node(root))
4007 		mte_destroy_walk(root, mas->tree);
4008 
4009 	return 1;
4010 }
4011 /*
4012  * mas_wr_spanning_store() - Create a subtree with the store operation completed
4013  * and new nodes where necessary, then place the sub-tree in the actual tree.
4014  * Note that mas is expected to point to the node which caused the store to
4015  * span.
4016  * @wr_mas: The maple write state
4017  *
4018  * Return: 0 on error, positive on success.
4019  */
4020 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
4021 {
4022 	struct maple_subtree_state mast;
4023 	struct maple_big_node b_node;
4024 	struct ma_state *mas;
4025 	unsigned char height;
4026 
4027 	/* Left and Right side of spanning store */
4028 	MA_STATE(l_mas, NULL, 0, 0);
4029 	MA_STATE(r_mas, NULL, 0, 0);
4030 
4031 	MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry);
4032 	MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry);
4033 
4034 	/*
4035 	 * A store operation that spans multiple nodes is called a spanning
4036 	 * store and is handled early in the store call stack by the function
4037 	 * mas_is_span_wr().  When a spanning store is identified, the maple
4038 	 * state is duplicated.  The first maple state walks the left tree path
4039 	 * to ``index``, the duplicate walks the right tree path to ``last``.
4040 	 * The data in the two nodes are combined into a single node, two nodes,
4041 	 * or possibly three nodes (see the 3-way split above).  A ``NULL``
4042 	 * written to the last entry of a node is considered a spanning store as
4043 	 * a rebalance is required for the operation to complete and an overflow
4044 	 * of data may happen.
4045 	 */
4046 	mas = wr_mas->mas;
4047 	trace_ma_op(__func__, mas);
4048 
4049 	if (unlikely(!mas->index && mas->last == ULONG_MAX))
4050 		return mas_new_root(mas, wr_mas->entry);
4051 	/*
4052 	 * Node rebalancing may occur due to this store, so there may be three new
4053 	 * entries per level plus a new root.
4054 	 */
4055 	height = mas_mt_height(mas);
4056 	mas_node_count(mas, 1 + height * 3);
4057 	if (mas_is_err(mas))
4058 		return 0;
4059 
4060 	/*
4061 	 * Set up right side.  Need to get to the next offset after the spanning
4062 	 * store to ensure it's not NULL and to combine both the next node and
4063 	 * the node with the start together.
4064 	 */
4065 	r_mas = *mas;
4066 	/* Avoid overflow, walk to next slot in the tree. */
4067 	if (r_mas.last + 1)
4068 		r_mas.last++;
4069 
4070 	r_mas.index = r_mas.last;
4071 	mas_wr_walk_index(&r_wr_mas);
4072 	r_mas.last = r_mas.index = mas->last;
4073 
4074 	/* Set up left side. */
4075 	l_mas = *mas;
4076 	mas_wr_walk_index(&l_wr_mas);
4077 
4078 	if (!wr_mas->entry) {
4079 		mas_extend_spanning_null(&l_wr_mas, &r_wr_mas);
4080 		mas->offset = l_mas.offset;
4081 		mas->index = l_mas.index;
4082 		mas->last = l_mas.last = r_mas.last;
4083 	}
4084 
4085 	/* expanding NULLs may make this cover the entire range */
4086 	if (!l_mas.index && r_mas.last == ULONG_MAX) {
4087 		mas_set_range(mas, 0, ULONG_MAX);
4088 		return mas_new_root(mas, wr_mas->entry);
4089 	}
4090 
4091 	memset(&b_node, 0, sizeof(struct maple_big_node));
4092 	/* Copy l_mas and store the value in b_node. */
4093 	mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
4094 	/* Copy r_mas into b_node. */
4095 	if (r_mas.offset <= r_wr_mas.node_end)
4096 		mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
4097 			   &b_node, b_node.b_end + 1);
4098 	else
4099 		b_node.b_end++;
4100 
4101 	/* Stop spanning searches by searching for just index. */
4102 	l_mas.index = l_mas.last = mas->index;
4103 
4104 	mast.bn = &b_node;
4105 	mast.orig_l = &l_mas;
4106 	mast.orig_r = &r_mas;
4107 	/* Combine l_mas and r_mas and split them up evenly again. */
4108 	return mas_spanning_rebalance(mas, &mast, height + 1);
4109 }
4110 
4111 /*
4112  * mas_wr_node_store() - Attempt to store the value in a node
4113  * @wr_mas: The maple write state
4114  *
4115  * Attempts to reuse the node, but may allocate.
4116  *
4117  * Return: True if stored, false otherwise
4118  */
4119 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas)
4120 {
4121 	struct ma_state *mas = wr_mas->mas;
4122 	void __rcu **dst_slots;
4123 	unsigned long *dst_pivots;
4124 	unsigned char dst_offset;
4125 	unsigned char new_end = wr_mas->node_end;
4126 	unsigned char offset;
4127 	unsigned char node_slots = mt_slots[wr_mas->type];
4128 	struct maple_node reuse, *newnode;
4129 	unsigned char copy_size, max_piv = mt_pivots[wr_mas->type];
4130 	bool in_rcu = mt_in_rcu(mas->tree);
4131 
4132 	offset = mas->offset;
4133 	if (mas->last == wr_mas->r_max) {
4134 		/* runs right to the end of the node */
4135 		if (mas->last == mas->max)
4136 			new_end = offset;
4137 		/* don't copy this offset */
4138 		wr_mas->offset_end++;
4139 	} else if (mas->last < wr_mas->r_max) {
4140 		/* new range ends in this range */
4141 		if (unlikely(wr_mas->r_max == ULONG_MAX))
4142 			mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
4143 
4144 		new_end++;
4145 	} else {
4146 		if (wr_mas->end_piv == mas->last)
4147 			wr_mas->offset_end++;
4148 
4149 		new_end -= wr_mas->offset_end - offset - 1;
4150 	}
4151 
4152 	/* new range starts within a range */
4153 	if (wr_mas->r_min < mas->index)
4154 		new_end++;
4155 
4156 	/* Not enough room */
4157 	if (new_end >= node_slots)
4158 		return false;
4159 
4160 	/* Not enough data. */
4161 	if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) &&
4162 	    !(mas->mas_flags & MA_STATE_BULK))
4163 		return false;
4164 
4165 	/* set up node. */
4166 	if (in_rcu) {
4167 		mas_node_count(mas, 1);
4168 		if (mas_is_err(mas))
4169 			return false;
4170 
4171 		newnode = mas_pop_node(mas);
4172 	} else {
4173 		memset(&reuse, 0, sizeof(struct maple_node));
4174 		newnode = &reuse;
4175 	}
4176 
4177 	newnode->parent = mas_mn(mas)->parent;
4178 	dst_pivots = ma_pivots(newnode, wr_mas->type);
4179 	dst_slots = ma_slots(newnode, wr_mas->type);
4180 	/* Copy from start to insert point */
4181 	memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1));
4182 	memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1));
4183 	dst_offset = offset;
4184 
4185 	/* Handle insert of new range starting after old range */
4186 	if (wr_mas->r_min < mas->index) {
4187 		mas->offset++;
4188 		rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content);
4189 		dst_pivots[dst_offset++] = mas->index - 1;
4190 	}
4191 
4192 	/* Store the new entry and range end. */
4193 	if (dst_offset < max_piv)
4194 		dst_pivots[dst_offset] = mas->last;
4195 	mas->offset = dst_offset;
4196 	rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry);
4197 
4198 	/*
4199 	 * this range wrote to the end of the node or it overwrote the rest of
4200 	 * the data
4201 	 */
4202 	if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) {
4203 		new_end = dst_offset;
4204 		goto done;
4205 	}
4206 
4207 	dst_offset++;
4208 	/* Copy to the end of node if necessary. */
4209 	copy_size = wr_mas->node_end - wr_mas->offset_end + 1;
4210 	memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end,
4211 	       sizeof(void *) * copy_size);
4212 	if (dst_offset < max_piv) {
4213 		if (copy_size > max_piv - dst_offset)
4214 			copy_size = max_piv - dst_offset;
4215 
4216 		memcpy(dst_pivots + dst_offset,
4217 		       wr_mas->pivots + wr_mas->offset_end,
4218 		       sizeof(unsigned long) * copy_size);
4219 	}
4220 
4221 	if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1))
4222 		dst_pivots[new_end] = mas->max;
4223 
4224 done:
4225 	mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
4226 	if (in_rcu) {
4227 		mte_set_node_dead(mas->node);
4228 		mas->node = mt_mk_node(newnode, wr_mas->type);
4229 		mas_replace(mas, false);
4230 	} else {
4231 		memcpy(wr_mas->node, newnode, sizeof(struct maple_node));
4232 	}
4233 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4234 	mas_update_gap(mas);
4235 	return true;
4236 }
4237 
4238 /*
4239  * mas_wr_slot_store: Attempt to store a value in a slot.
4240  * @wr_mas: the maple write state
4241  *
4242  * Return: True if stored, false otherwise
4243  */
4244 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas)
4245 {
4246 	struct ma_state *mas = wr_mas->mas;
4247 	unsigned long lmax; /* Logical max. */
4248 	unsigned char offset = mas->offset;
4249 
4250 	if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) ||
4251 				  (offset != wr_mas->node_end)))
4252 		return false;
4253 
4254 	if (offset == wr_mas->node_end - 1)
4255 		lmax = mas->max;
4256 	else
4257 		lmax = wr_mas->pivots[offset + 1];
4258 
4259 	/* going to overwrite too many slots. */
4260 	if (lmax < mas->last)
4261 		return false;
4262 
4263 	if (wr_mas->r_min == mas->index) {
4264 		/* overwriting two or more ranges with one. */
4265 		if (lmax == mas->last)
4266 			return false;
4267 
4268 		/* Overwriting all of offset and a portion of offset + 1. */
4269 		rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry);
4270 		wr_mas->pivots[offset] = mas->last;
4271 		goto done;
4272 	}
4273 
4274 	/* Doesn't end on the next range end. */
4275 	if (lmax != mas->last)
4276 		return false;
4277 
4278 	/* Overwriting a portion of offset and all of offset + 1 */
4279 	if ((offset + 1 < mt_pivots[wr_mas->type]) &&
4280 	    (wr_mas->entry || wr_mas->pivots[offset + 1]))
4281 		wr_mas->pivots[offset + 1] = mas->last;
4282 
4283 	rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry);
4284 	wr_mas->pivots[offset] = mas->index - 1;
4285 	mas->offset++; /* Keep mas accurate. */
4286 
4287 done:
4288 	trace_ma_write(__func__, mas, 0, wr_mas->entry);
4289 	mas_update_gap(mas);
4290 	return true;
4291 }
4292 
4293 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
4294 {
4295 	while ((wr_mas->mas->last > wr_mas->end_piv) &&
4296 	       (wr_mas->offset_end < wr_mas->node_end))
4297 		wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end];
4298 
4299 	if (wr_mas->mas->last > wr_mas->end_piv)
4300 		wr_mas->end_piv = wr_mas->mas->max;
4301 }
4302 
4303 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
4304 {
4305 	struct ma_state *mas = wr_mas->mas;
4306 
4307 	if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end])
4308 		mas->last = wr_mas->end_piv;
4309 
4310 	/* Check next slot(s) if we are overwriting the end */
4311 	if ((mas->last == wr_mas->end_piv) &&
4312 	    (wr_mas->node_end != wr_mas->offset_end) &&
4313 	    !wr_mas->slots[wr_mas->offset_end + 1]) {
4314 		wr_mas->offset_end++;
4315 		if (wr_mas->offset_end == wr_mas->node_end)
4316 			mas->last = mas->max;
4317 		else
4318 			mas->last = wr_mas->pivots[wr_mas->offset_end];
4319 		wr_mas->end_piv = mas->last;
4320 	}
4321 
4322 	if (!wr_mas->content) {
4323 		/* If this one is null, the next and prev are not */
4324 		mas->index = wr_mas->r_min;
4325 	} else {
4326 		/* Check prev slot if we are overwriting the start */
4327 		if (mas->index == wr_mas->r_min && mas->offset &&
4328 		    !wr_mas->slots[mas->offset - 1]) {
4329 			mas->offset--;
4330 			wr_mas->r_min = mas->index =
4331 				mas_safe_min(mas, wr_mas->pivots, mas->offset);
4332 			wr_mas->r_max = wr_mas->pivots[mas->offset];
4333 		}
4334 	}
4335 }
4336 
4337 static inline bool mas_wr_append(struct ma_wr_state *wr_mas)
4338 {
4339 	unsigned char end = wr_mas->node_end;
4340 	unsigned char new_end = end + 1;
4341 	struct ma_state *mas = wr_mas->mas;
4342 	unsigned char node_pivots = mt_pivots[wr_mas->type];
4343 
4344 	if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) {
4345 		if (new_end < node_pivots)
4346 			wr_mas->pivots[new_end] = wr_mas->pivots[end];
4347 
4348 		if (new_end < node_pivots)
4349 			ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4350 
4351 		rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry);
4352 		mas->offset = new_end;
4353 		wr_mas->pivots[end] = mas->index - 1;
4354 
4355 		return true;
4356 	}
4357 
4358 	if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) {
4359 		if (new_end < node_pivots)
4360 			wr_mas->pivots[new_end] = wr_mas->pivots[end];
4361 
4362 		rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content);
4363 		if (new_end < node_pivots)
4364 			ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end);
4365 
4366 		wr_mas->pivots[end] = mas->last;
4367 		rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry);
4368 		return true;
4369 	}
4370 
4371 	return false;
4372 }
4373 
4374 /*
4375  * mas_wr_bnode() - Slow path for a modification.
4376  * @wr_mas: The write maple state
4377  *
4378  * This is where split, rebalance end up.
4379  */
4380 static void mas_wr_bnode(struct ma_wr_state *wr_mas)
4381 {
4382 	struct maple_big_node b_node;
4383 
4384 	trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
4385 	memset(&b_node, 0, sizeof(struct maple_big_node));
4386 	mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
4387 	mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
4388 }
4389 
4390 static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
4391 {
4392 	unsigned char node_slots;
4393 	unsigned char node_size;
4394 	struct ma_state *mas = wr_mas->mas;
4395 
4396 	/* Direct replacement */
4397 	if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) {
4398 		rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry);
4399 		if (!!wr_mas->entry ^ !!wr_mas->content)
4400 			mas_update_gap(mas);
4401 		return;
4402 	}
4403 
4404 	/* Attempt to append */
4405 	node_slots = mt_slots[wr_mas->type];
4406 	node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2;
4407 	if (mas->max == ULONG_MAX)
4408 		node_size++;
4409 
4410 	/* slot and node store will not fit, go to the slow path */
4411 	if (unlikely(node_size >= node_slots))
4412 		goto slow_path;
4413 
4414 	if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) &&
4415 	    (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) {
4416 		if (!wr_mas->content || !wr_mas->entry)
4417 			mas_update_gap(mas);
4418 		return;
4419 	}
4420 
4421 	if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas))
4422 		return;
4423 	else if (mas_wr_node_store(wr_mas))
4424 		return;
4425 
4426 	if (mas_is_err(mas))
4427 		return;
4428 
4429 slow_path:
4430 	mas_wr_bnode(wr_mas);
4431 }
4432 
4433 /*
4434  * mas_wr_store_entry() - Internal call to store a value
4435  * @mas: The maple state
4436  * @entry: The entry to store.
4437  *
4438  * Return: The contents that was stored at the index.
4439  */
4440 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas)
4441 {
4442 	struct ma_state *mas = wr_mas->mas;
4443 
4444 	wr_mas->content = mas_start(mas);
4445 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4446 		mas_store_root(mas, wr_mas->entry);
4447 		return wr_mas->content;
4448 	}
4449 
4450 	if (unlikely(!mas_wr_walk(wr_mas))) {
4451 		mas_wr_spanning_store(wr_mas);
4452 		return wr_mas->content;
4453 	}
4454 
4455 	/* At this point, we are at the leaf node that needs to be altered. */
4456 	wr_mas->end_piv = wr_mas->r_max;
4457 	mas_wr_end_piv(wr_mas);
4458 
4459 	if (!wr_mas->entry)
4460 		mas_wr_extend_null(wr_mas);
4461 
4462 	/* New root for a single pointer */
4463 	if (unlikely(!mas->index && mas->last == ULONG_MAX)) {
4464 		mas_new_root(mas, wr_mas->entry);
4465 		return wr_mas->content;
4466 	}
4467 
4468 	mas_wr_modify(wr_mas);
4469 	return wr_mas->content;
4470 }
4471 
4472 /**
4473  * mas_insert() - Internal call to insert a value
4474  * @mas: The maple state
4475  * @entry: The entry to store
4476  *
4477  * Return: %NULL or the contents that already exists at the requested index
4478  * otherwise.  The maple state needs to be checked for error conditions.
4479  */
4480 static inline void *mas_insert(struct ma_state *mas, void *entry)
4481 {
4482 	MA_WR_STATE(wr_mas, mas, entry);
4483 
4484 	/*
4485 	 * Inserting a new range inserts either 0, 1, or 2 pivots within the
4486 	 * tree.  If the insert fits exactly into an existing gap with a value
4487 	 * of NULL, then the slot only needs to be written with the new value.
4488 	 * If the range being inserted is adjacent to another range, then only a
4489 	 * single pivot needs to be inserted (as well as writing the entry).  If
4490 	 * the new range is within a gap but does not touch any other ranges,
4491 	 * then two pivots need to be inserted: the start - 1, and the end.  As
4492 	 * usual, the entry must be written.  Most operations require a new node
4493 	 * to be allocated and replace an existing node to ensure RCU safety,
4494 	 * when in RCU mode.  The exception to requiring a newly allocated node
4495 	 * is when inserting at the end of a node (appending).  When done
4496 	 * carefully, appending can reuse the node in place.
4497 	 */
4498 	wr_mas.content = mas_start(mas);
4499 	if (wr_mas.content)
4500 		goto exists;
4501 
4502 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
4503 		mas_store_root(mas, entry);
4504 		return NULL;
4505 	}
4506 
4507 	/* spanning writes always overwrite something */
4508 	if (!mas_wr_walk(&wr_mas))
4509 		goto exists;
4510 
4511 	/* At this point, we are at the leaf node that needs to be altered. */
4512 	wr_mas.offset_end = mas->offset;
4513 	wr_mas.end_piv = wr_mas.r_max;
4514 
4515 	if (wr_mas.content || (mas->last > wr_mas.r_max))
4516 		goto exists;
4517 
4518 	if (!entry)
4519 		return NULL;
4520 
4521 	mas_wr_modify(&wr_mas);
4522 	return wr_mas.content;
4523 
4524 exists:
4525 	mas_set_err(mas, -EEXIST);
4526 	return wr_mas.content;
4527 
4528 }
4529 
4530 /*
4531  * mas_prev_node() - Find the prev non-null entry at the same level in the
4532  * tree.  The prev value will be mas->node[mas->offset] or MAS_NONE.
4533  * @mas: The maple state
4534  * @min: The lower limit to search
4535  *
4536  * The prev node value will be mas->node[mas->offset] or MAS_NONE.
4537  * Return: 1 if the node is dead, 0 otherwise.
4538  */
4539 static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
4540 {
4541 	enum maple_type mt;
4542 	int offset, level;
4543 	void __rcu **slots;
4544 	struct maple_node *node;
4545 	struct maple_enode *enode;
4546 	unsigned long *pivots;
4547 
4548 	if (mas_is_none(mas))
4549 		return 0;
4550 
4551 	level = 0;
4552 	do {
4553 		node = mas_mn(mas);
4554 		if (ma_is_root(node))
4555 			goto no_entry;
4556 
4557 		/* Walk up. */
4558 		if (unlikely(mas_ascend(mas)))
4559 			return 1;
4560 		offset = mas->offset;
4561 		level++;
4562 	} while (!offset);
4563 
4564 	offset--;
4565 	mt = mte_node_type(mas->node);
4566 	node = mas_mn(mas);
4567 	slots = ma_slots(node, mt);
4568 	pivots = ma_pivots(node, mt);
4569 	if (unlikely(ma_dead_node(node)))
4570 		return 1;
4571 
4572 	mas->max = pivots[offset];
4573 	if (offset)
4574 		mas->min = pivots[offset - 1] + 1;
4575 	if (unlikely(ma_dead_node(node)))
4576 		return 1;
4577 
4578 	if (mas->max < min)
4579 		goto no_entry_min;
4580 
4581 	while (level > 1) {
4582 		level--;
4583 		enode = mas_slot(mas, slots, offset);
4584 		if (unlikely(ma_dead_node(node)))
4585 			return 1;
4586 
4587 		mas->node = enode;
4588 		mt = mte_node_type(mas->node);
4589 		node = mas_mn(mas);
4590 		slots = ma_slots(node, mt);
4591 		pivots = ma_pivots(node, mt);
4592 		offset = ma_data_end(node, mt, pivots, mas->max);
4593 		if (unlikely(ma_dead_node(node)))
4594 			return 1;
4595 
4596 		if (offset)
4597 			mas->min = pivots[offset - 1] + 1;
4598 
4599 		if (offset < mt_pivots[mt])
4600 			mas->max = pivots[offset];
4601 
4602 		if (mas->max < min)
4603 			goto no_entry;
4604 	}
4605 
4606 	mas->node = mas_slot(mas, slots, offset);
4607 	if (unlikely(ma_dead_node(node)))
4608 		return 1;
4609 
4610 	mas->offset = mas_data_end(mas);
4611 	if (unlikely(mte_dead_node(mas->node)))
4612 		return 1;
4613 
4614 	return 0;
4615 
4616 no_entry_min:
4617 	mas->offset = offset;
4618 	if (offset)
4619 		mas->min = pivots[offset - 1] + 1;
4620 no_entry:
4621 	if (unlikely(ma_dead_node(node)))
4622 		return 1;
4623 
4624 	mas->node = MAS_NONE;
4625 	return 0;
4626 }
4627 
4628 /*
4629  * mas_next_node() - Get the next node at the same level in the tree.
4630  * @mas: The maple state
4631  * @max: The maximum pivot value to check.
4632  *
4633  * The next value will be mas->node[mas->offset] or MAS_NONE.
4634  * Return: 1 on dead node, 0 otherwise.
4635  */
4636 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
4637 				unsigned long max)
4638 {
4639 	unsigned long min, pivot;
4640 	unsigned long *pivots;
4641 	struct maple_enode *enode;
4642 	int level = 0;
4643 	unsigned char offset;
4644 	unsigned char node_end;
4645 	enum maple_type mt;
4646 	void __rcu **slots;
4647 
4648 	if (mas->max >= max)
4649 		goto no_entry;
4650 
4651 	level = 0;
4652 	do {
4653 		if (ma_is_root(node))
4654 			goto no_entry;
4655 
4656 		min = mas->max + 1;
4657 		if (min > max)
4658 			goto no_entry;
4659 
4660 		if (unlikely(mas_ascend(mas)))
4661 			return 1;
4662 
4663 		offset = mas->offset;
4664 		level++;
4665 		node = mas_mn(mas);
4666 		mt = mte_node_type(mas->node);
4667 		pivots = ma_pivots(node, mt);
4668 		node_end = ma_data_end(node, mt, pivots, mas->max);
4669 		if (unlikely(ma_dead_node(node)))
4670 			return 1;
4671 
4672 	} while (unlikely(offset == node_end));
4673 
4674 	slots = ma_slots(node, mt);
4675 	pivot = mas_safe_pivot(mas, pivots, ++offset, mt);
4676 	while (unlikely(level > 1)) {
4677 		/* Descend, if necessary */
4678 		enode = mas_slot(mas, slots, offset);
4679 		if (unlikely(ma_dead_node(node)))
4680 			return 1;
4681 
4682 		mas->node = enode;
4683 		level--;
4684 		node = mas_mn(mas);
4685 		mt = mte_node_type(mas->node);
4686 		slots = ma_slots(node, mt);
4687 		pivots = ma_pivots(node, mt);
4688 		if (unlikely(ma_dead_node(node)))
4689 			return 1;
4690 
4691 		offset = 0;
4692 		pivot = pivots[0];
4693 	}
4694 
4695 	enode = mas_slot(mas, slots, offset);
4696 	if (unlikely(ma_dead_node(node)))
4697 		return 1;
4698 
4699 	mas->node = enode;
4700 	mas->min = min;
4701 	mas->max = pivot;
4702 	return 0;
4703 
4704 no_entry:
4705 	if (unlikely(ma_dead_node(node)))
4706 		return 1;
4707 
4708 	mas->node = MAS_NONE;
4709 	return 0;
4710 }
4711 
4712 /*
4713  * mas_next_nentry() - Get the next node entry
4714  * @mas: The maple state
4715  * @max: The maximum value to check
4716  * @*range_start: Pointer to store the start of the range.
4717  *
4718  * Sets @mas->offset to the offset of the next node entry, @mas->last to the
4719  * pivot of the entry.
4720  *
4721  * Return: The next entry, %NULL otherwise
4722  */
4723 static inline void *mas_next_nentry(struct ma_state *mas,
4724 	    struct maple_node *node, unsigned long max, enum maple_type type)
4725 {
4726 	unsigned char count;
4727 	unsigned long pivot;
4728 	unsigned long *pivots;
4729 	void __rcu **slots;
4730 	void *entry;
4731 
4732 	if (mas->last == mas->max) {
4733 		mas->index = mas->max;
4734 		return NULL;
4735 	}
4736 
4737 	slots = ma_slots(node, type);
4738 	pivots = ma_pivots(node, type);
4739 	count = ma_data_end(node, type, pivots, mas->max);
4740 	if (unlikely(ma_dead_node(node)))
4741 		return NULL;
4742 
4743 	mas->index = mas_safe_min(mas, pivots, mas->offset);
4744 	if (unlikely(ma_dead_node(node)))
4745 		return NULL;
4746 
4747 	if (mas->index > max)
4748 		return NULL;
4749 
4750 	if (mas->offset > count)
4751 		return NULL;
4752 
4753 	while (mas->offset < count) {
4754 		pivot = pivots[mas->offset];
4755 		entry = mas_slot(mas, slots, mas->offset);
4756 		if (ma_dead_node(node))
4757 			return NULL;
4758 
4759 		if (entry)
4760 			goto found;
4761 
4762 		if (pivot >= max)
4763 			return NULL;
4764 
4765 		mas->index = pivot + 1;
4766 		mas->offset++;
4767 	}
4768 
4769 	if (mas->index > mas->max) {
4770 		mas->index = mas->last;
4771 		return NULL;
4772 	}
4773 
4774 	pivot = mas_safe_pivot(mas, pivots, mas->offset, type);
4775 	entry = mas_slot(mas, slots, mas->offset);
4776 	if (ma_dead_node(node))
4777 		return NULL;
4778 
4779 	if (!pivot)
4780 		return NULL;
4781 
4782 	if (!entry)
4783 		return NULL;
4784 
4785 found:
4786 	mas->last = pivot;
4787 	return entry;
4788 }
4789 
4790 static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
4791 {
4792 retry:
4793 	mas_set(mas, index);
4794 	mas_state_walk(mas);
4795 	if (mas_is_start(mas))
4796 		goto retry;
4797 }
4798 
4799 /*
4800  * mas_next_entry() - Internal function to get the next entry.
4801  * @mas: The maple state
4802  * @limit: The maximum range start.
4803  *
4804  * Set the @mas->node to the next entry and the range_start to
4805  * the beginning value for the entry.  Does not check beyond @limit.
4806  * Sets @mas->index and @mas->last to the limit if it is hit.
4807  * Restarts on dead nodes.
4808  *
4809  * Return: the next entry or %NULL.
4810  */
4811 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
4812 {
4813 	void *entry = NULL;
4814 	struct maple_enode *prev_node;
4815 	struct maple_node *node;
4816 	unsigned char offset;
4817 	unsigned long last;
4818 	enum maple_type mt;
4819 
4820 	if (mas->index > limit) {
4821 		mas->index = mas->last = limit;
4822 		mas_pause(mas);
4823 		return NULL;
4824 	}
4825 	last = mas->last;
4826 retry:
4827 	offset = mas->offset;
4828 	prev_node = mas->node;
4829 	node = mas_mn(mas);
4830 	mt = mte_node_type(mas->node);
4831 	mas->offset++;
4832 	if (unlikely(mas->offset >= mt_slots[mt])) {
4833 		mas->offset = mt_slots[mt] - 1;
4834 		goto next_node;
4835 	}
4836 
4837 	while (!mas_is_none(mas)) {
4838 		entry = mas_next_nentry(mas, node, limit, mt);
4839 		if (unlikely(ma_dead_node(node))) {
4840 			mas_rewalk(mas, last);
4841 			goto retry;
4842 		}
4843 
4844 		if (likely(entry))
4845 			return entry;
4846 
4847 		if (unlikely((mas->index > limit)))
4848 			break;
4849 
4850 next_node:
4851 		prev_node = mas->node;
4852 		offset = mas->offset;
4853 		if (unlikely(mas_next_node(mas, node, limit))) {
4854 			mas_rewalk(mas, last);
4855 			goto retry;
4856 		}
4857 		mas->offset = 0;
4858 		node = mas_mn(mas);
4859 		mt = mte_node_type(mas->node);
4860 	}
4861 
4862 	mas->index = mas->last = limit;
4863 	mas->offset = offset;
4864 	mas->node = prev_node;
4865 	return NULL;
4866 }
4867 
4868 /*
4869  * mas_prev_nentry() - Get the previous node entry.
4870  * @mas: The maple state.
4871  * @limit: The lower limit to check for a value.
4872  *
4873  * Return: the entry, %NULL otherwise.
4874  */
4875 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit,
4876 				    unsigned long index)
4877 {
4878 	unsigned long pivot, min;
4879 	unsigned char offset;
4880 	struct maple_node *mn;
4881 	enum maple_type mt;
4882 	unsigned long *pivots;
4883 	void __rcu **slots;
4884 	void *entry;
4885 
4886 retry:
4887 	if (!mas->offset)
4888 		return NULL;
4889 
4890 	mn = mas_mn(mas);
4891 	mt = mte_node_type(mas->node);
4892 	offset = mas->offset - 1;
4893 	if (offset >= mt_slots[mt])
4894 		offset = mt_slots[mt] - 1;
4895 
4896 	slots = ma_slots(mn, mt);
4897 	pivots = ma_pivots(mn, mt);
4898 	if (unlikely(ma_dead_node(mn))) {
4899 		mas_rewalk(mas, index);
4900 		goto retry;
4901 	}
4902 
4903 	if (offset == mt_pivots[mt])
4904 		pivot = mas->max;
4905 	else
4906 		pivot = pivots[offset];
4907 
4908 	if (unlikely(ma_dead_node(mn))) {
4909 		mas_rewalk(mas, index);
4910 		goto retry;
4911 	}
4912 
4913 	while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) ||
4914 	       !pivot))
4915 		pivot = pivots[--offset];
4916 
4917 	min = mas_safe_min(mas, pivots, offset);
4918 	entry = mas_slot(mas, slots, offset);
4919 	if (unlikely(ma_dead_node(mn))) {
4920 		mas_rewalk(mas, index);
4921 		goto retry;
4922 	}
4923 
4924 	if (likely(entry)) {
4925 		mas->offset = offset;
4926 		mas->last = pivot;
4927 		mas->index = min;
4928 	}
4929 	return entry;
4930 }
4931 
4932 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min)
4933 {
4934 	void *entry;
4935 
4936 	if (mas->index < min) {
4937 		mas->index = mas->last = min;
4938 		mas->node = MAS_NONE;
4939 		return NULL;
4940 	}
4941 retry:
4942 	while (likely(!mas_is_none(mas))) {
4943 		entry = mas_prev_nentry(mas, min, mas->index);
4944 		if (unlikely(mas->last < min))
4945 			goto not_found;
4946 
4947 		if (likely(entry))
4948 			return entry;
4949 
4950 		if (unlikely(mas_prev_node(mas, min))) {
4951 			mas_rewalk(mas, mas->index);
4952 			goto retry;
4953 		}
4954 
4955 		mas->offset++;
4956 	}
4957 
4958 	mas->offset--;
4959 not_found:
4960 	mas->index = mas->last = min;
4961 	return NULL;
4962 }
4963 
4964 /*
4965  * mas_rev_awalk() - Internal function.  Reverse allocation walk.  Find the
4966  * highest gap address of a given size in a given node and descend.
4967  * @mas: The maple state
4968  * @size: The needed size.
4969  *
4970  * Return: True if found in a leaf, false otherwise.
4971  *
4972  */
4973 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size)
4974 {
4975 	enum maple_type type = mte_node_type(mas->node);
4976 	struct maple_node *node = mas_mn(mas);
4977 	unsigned long *pivots, *gaps;
4978 	void __rcu **slots;
4979 	unsigned long gap = 0;
4980 	unsigned long max, min;
4981 	unsigned char offset;
4982 
4983 	if (unlikely(mas_is_err(mas)))
4984 		return true;
4985 
4986 	if (ma_is_dense(type)) {
4987 		/* dense nodes. */
4988 		mas->offset = (unsigned char)(mas->index - mas->min);
4989 		return true;
4990 	}
4991 
4992 	pivots = ma_pivots(node, type);
4993 	slots = ma_slots(node, type);
4994 	gaps = ma_gaps(node, type);
4995 	offset = mas->offset;
4996 	min = mas_safe_min(mas, pivots, offset);
4997 	/* Skip out of bounds. */
4998 	while (mas->last < min)
4999 		min = mas_safe_min(mas, pivots, --offset);
5000 
5001 	max = mas_safe_pivot(mas, pivots, offset, type);
5002 	while (mas->index <= max) {
5003 		gap = 0;
5004 		if (gaps)
5005 			gap = gaps[offset];
5006 		else if (!mas_slot(mas, slots, offset))
5007 			gap = max - min + 1;
5008 
5009 		if (gap) {
5010 			if ((size <= gap) && (size <= mas->last - min + 1))
5011 				break;
5012 
5013 			if (!gaps) {
5014 				/* Skip the next slot, it cannot be a gap. */
5015 				if (offset < 2)
5016 					goto ascend;
5017 
5018 				offset -= 2;
5019 				max = pivots[offset];
5020 				min = mas_safe_min(mas, pivots, offset);
5021 				continue;
5022 			}
5023 		}
5024 
5025 		if (!offset)
5026 			goto ascend;
5027 
5028 		offset--;
5029 		max = min - 1;
5030 		min = mas_safe_min(mas, pivots, offset);
5031 	}
5032 
5033 	if (unlikely((mas->index > max) || (size - 1 > max - mas->index)))
5034 		goto no_space;
5035 
5036 	if (unlikely(ma_is_leaf(type))) {
5037 		mas->offset = offset;
5038 		mas->min = min;
5039 		mas->max = min + gap - 1;
5040 		return true;
5041 	}
5042 
5043 	/* descend, only happens under lock. */
5044 	mas->node = mas_slot(mas, slots, offset);
5045 	mas->min = min;
5046 	mas->max = max;
5047 	mas->offset = mas_data_end(mas);
5048 	return false;
5049 
5050 ascend:
5051 	if (!mte_is_root(mas->node))
5052 		return false;
5053 
5054 no_space:
5055 	mas_set_err(mas, -EBUSY);
5056 	return false;
5057 }
5058 
5059 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size)
5060 {
5061 	enum maple_type type = mte_node_type(mas->node);
5062 	unsigned long pivot, min, gap = 0;
5063 	unsigned char offset;
5064 	unsigned long *gaps;
5065 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
5066 	void __rcu **slots = ma_slots(mas_mn(mas), type);
5067 	bool found = false;
5068 
5069 	if (ma_is_dense(type)) {
5070 		mas->offset = (unsigned char)(mas->index - mas->min);
5071 		return true;
5072 	}
5073 
5074 	gaps = ma_gaps(mte_to_node(mas->node), type);
5075 	offset = mas->offset;
5076 	min = mas_safe_min(mas, pivots, offset);
5077 	for (; offset < mt_slots[type]; offset++) {
5078 		pivot = mas_safe_pivot(mas, pivots, offset, type);
5079 		if (offset && !pivot)
5080 			break;
5081 
5082 		/* Not within lower bounds */
5083 		if (mas->index > pivot)
5084 			goto next_slot;
5085 
5086 		if (gaps)
5087 			gap = gaps[offset];
5088 		else if (!mas_slot(mas, slots, offset))
5089 			gap = min(pivot, mas->last) - max(mas->index, min) + 1;
5090 		else
5091 			goto next_slot;
5092 
5093 		if (gap >= size) {
5094 			if (ma_is_leaf(type)) {
5095 				found = true;
5096 				goto done;
5097 			}
5098 			if (mas->index <= pivot) {
5099 				mas->node = mas_slot(mas, slots, offset);
5100 				mas->min = min;
5101 				mas->max = pivot;
5102 				offset = 0;
5103 				break;
5104 			}
5105 		}
5106 next_slot:
5107 		min = pivot + 1;
5108 		if (mas->last <= pivot) {
5109 			mas_set_err(mas, -EBUSY);
5110 			return true;
5111 		}
5112 	}
5113 
5114 	if (mte_is_root(mas->node))
5115 		found = true;
5116 done:
5117 	mas->offset = offset;
5118 	return found;
5119 }
5120 
5121 /**
5122  * mas_walk() - Search for @mas->index in the tree.
5123  * @mas: The maple state.
5124  *
5125  * mas->index and mas->last will be set to the range if there is a value.  If
5126  * mas->node is MAS_NONE, reset to MAS_START.
5127  *
5128  * Return: the entry at the location or %NULL.
5129  */
5130 void *mas_walk(struct ma_state *mas)
5131 {
5132 	void *entry;
5133 
5134 retry:
5135 	entry = mas_state_walk(mas);
5136 	if (mas_is_start(mas))
5137 		goto retry;
5138 
5139 	if (mas_is_ptr(mas)) {
5140 		if (!mas->index) {
5141 			mas->last = 0;
5142 		} else {
5143 			mas->index = 1;
5144 			mas->last = ULONG_MAX;
5145 		}
5146 		return entry;
5147 	}
5148 
5149 	if (mas_is_none(mas)) {
5150 		mas->index = 0;
5151 		mas->last = ULONG_MAX;
5152 	}
5153 
5154 	return entry;
5155 }
5156 EXPORT_SYMBOL_GPL(mas_walk);
5157 
5158 static inline bool mas_rewind_node(struct ma_state *mas)
5159 {
5160 	unsigned char slot;
5161 
5162 	do {
5163 		if (mte_is_root(mas->node)) {
5164 			slot = mas->offset;
5165 			if (!slot)
5166 				return false;
5167 		} else {
5168 			mas_ascend(mas);
5169 			slot = mas->offset;
5170 		}
5171 	} while (!slot);
5172 
5173 	mas->offset = --slot;
5174 	return true;
5175 }
5176 
5177 /*
5178  * mas_skip_node() - Internal function.  Skip over a node.
5179  * @mas: The maple state.
5180  *
5181  * Return: true if there is another node, false otherwise.
5182  */
5183 static inline bool mas_skip_node(struct ma_state *mas)
5184 {
5185 	if (mas_is_err(mas))
5186 		return false;
5187 
5188 	do {
5189 		if (mte_is_root(mas->node)) {
5190 			if (mas->offset >= mas_data_end(mas)) {
5191 				mas_set_err(mas, -EBUSY);
5192 				return false;
5193 			}
5194 		} else {
5195 			mas_ascend(mas);
5196 		}
5197 	} while (mas->offset >= mas_data_end(mas));
5198 
5199 	mas->offset++;
5200 	return true;
5201 }
5202 
5203 /*
5204  * mas_awalk() - Allocation walk.  Search from low address to high, for a gap of
5205  * @size
5206  * @mas: The maple state
5207  * @size: The size of the gap required
5208  *
5209  * Search between @mas->index and @mas->last for a gap of @size.
5210  */
5211 static inline void mas_awalk(struct ma_state *mas, unsigned long size)
5212 {
5213 	struct maple_enode *last = NULL;
5214 
5215 	/*
5216 	 * There are 4 options:
5217 	 * go to child (descend)
5218 	 * go back to parent (ascend)
5219 	 * no gap found. (return, slot == MAPLE_NODE_SLOTS)
5220 	 * found the gap. (return, slot != MAPLE_NODE_SLOTS)
5221 	 */
5222 	while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) {
5223 		if (last == mas->node)
5224 			mas_skip_node(mas);
5225 		else
5226 			last = mas->node;
5227 	}
5228 }
5229 
5230 /*
5231  * mas_fill_gap() - Fill a located gap with @entry.
5232  * @mas: The maple state
5233  * @entry: The value to store
5234  * @slot: The offset into the node to store the @entry
5235  * @size: The size of the entry
5236  * @index: The start location
5237  */
5238 static inline void mas_fill_gap(struct ma_state *mas, void *entry,
5239 		unsigned char slot, unsigned long size, unsigned long *index)
5240 {
5241 	MA_WR_STATE(wr_mas, mas, entry);
5242 	unsigned char pslot = mte_parent_slot(mas->node);
5243 	struct maple_enode *mn = mas->node;
5244 	unsigned long *pivots;
5245 	enum maple_type ptype;
5246 	/*
5247 	 * mas->index is the start address for the search
5248 	 *  which may no longer be needed.
5249 	 * mas->last is the end address for the search
5250 	 */
5251 
5252 	*index = mas->index;
5253 	mas->last = mas->index + size - 1;
5254 
5255 	/*
5256 	 * It is possible that using mas->max and mas->min to correctly
5257 	 * calculate the index and last will cause an issue in the gap
5258 	 * calculation, so fix the ma_state here
5259 	 */
5260 	mas_ascend(mas);
5261 	ptype = mte_node_type(mas->node);
5262 	pivots = ma_pivots(mas_mn(mas), ptype);
5263 	mas->max = mas_safe_pivot(mas, pivots, pslot, ptype);
5264 	mas->min = mas_safe_min(mas, pivots, pslot);
5265 	mas->node = mn;
5266 	mas->offset = slot;
5267 	mas_wr_store_entry(&wr_mas);
5268 }
5269 
5270 /*
5271  * mas_sparse_area() - Internal function.  Return upper or lower limit when
5272  * searching for a gap in an empty tree.
5273  * @mas: The maple state
5274  * @min: the minimum range
5275  * @max: The maximum range
5276  * @size: The size of the gap
5277  * @fwd: Searching forward or back
5278  */
5279 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min,
5280 				unsigned long max, unsigned long size, bool fwd)
5281 {
5282 	unsigned long start = 0;
5283 
5284 	if (!unlikely(mas_is_none(mas)))
5285 		start++;
5286 	/* mas_is_ptr */
5287 
5288 	if (start < min)
5289 		start = min;
5290 
5291 	if (fwd) {
5292 		mas->index = start;
5293 		mas->last = start + size - 1;
5294 		return;
5295 	}
5296 
5297 	mas->index = max;
5298 }
5299 
5300 /*
5301  * mas_empty_area() - Get the lowest address within the range that is
5302  * sufficient for the size requested.
5303  * @mas: The maple state
5304  * @min: The lowest value of the range
5305  * @max: The highest value of the range
5306  * @size: The size needed
5307  */
5308 int mas_empty_area(struct ma_state *mas, unsigned long min,
5309 		unsigned long max, unsigned long size)
5310 {
5311 	unsigned char offset;
5312 	unsigned long *pivots;
5313 	enum maple_type mt;
5314 
5315 	if (mas_is_start(mas))
5316 		mas_start(mas);
5317 	else if (mas->offset >= 2)
5318 		mas->offset -= 2;
5319 	else if (!mas_skip_node(mas))
5320 		return -EBUSY;
5321 
5322 	/* Empty set */
5323 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5324 		mas_sparse_area(mas, min, max, size, true);
5325 		return 0;
5326 	}
5327 
5328 	/* The start of the window can only be within these values */
5329 	mas->index = min;
5330 	mas->last = max;
5331 	mas_awalk(mas, size);
5332 
5333 	if (unlikely(mas_is_err(mas)))
5334 		return xa_err(mas->node);
5335 
5336 	offset = mas->offset;
5337 	if (unlikely(offset == MAPLE_NODE_SLOTS))
5338 		return -EBUSY;
5339 
5340 	mt = mte_node_type(mas->node);
5341 	pivots = ma_pivots(mas_mn(mas), mt);
5342 	if (offset)
5343 		mas->min = pivots[offset - 1] + 1;
5344 
5345 	if (offset < mt_pivots[mt])
5346 		mas->max = pivots[offset];
5347 
5348 	if (mas->index < mas->min)
5349 		mas->index = mas->min;
5350 
5351 	mas->last = mas->index + size - 1;
5352 	return 0;
5353 }
5354 EXPORT_SYMBOL_GPL(mas_empty_area);
5355 
5356 /*
5357  * mas_empty_area_rev() - Get the highest address within the range that is
5358  * sufficient for the size requested.
5359  * @mas: The maple state
5360  * @min: The lowest value of the range
5361  * @max: The highest value of the range
5362  * @size: The size needed
5363  */
5364 int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
5365 		unsigned long max, unsigned long size)
5366 {
5367 	struct maple_enode *last = mas->node;
5368 
5369 	if (mas_is_start(mas)) {
5370 		mas_start(mas);
5371 		mas->offset = mas_data_end(mas);
5372 	} else if (mas->offset >= 2) {
5373 		mas->offset -= 2;
5374 	} else if (!mas_rewind_node(mas)) {
5375 		return -EBUSY;
5376 	}
5377 
5378 	/* Empty set. */
5379 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5380 		mas_sparse_area(mas, min, max, size, false);
5381 		return 0;
5382 	}
5383 
5384 	/* The start of the window can only be within these values. */
5385 	mas->index = min;
5386 	mas->last = max;
5387 
5388 	while (!mas_rev_awalk(mas, size)) {
5389 		if (last == mas->node) {
5390 			if (!mas_rewind_node(mas))
5391 				return -EBUSY;
5392 		} else {
5393 			last = mas->node;
5394 		}
5395 	}
5396 
5397 	if (mas_is_err(mas))
5398 		return xa_err(mas->node);
5399 
5400 	if (unlikely(mas->offset == MAPLE_NODE_SLOTS))
5401 		return -EBUSY;
5402 
5403 	/*
5404 	 * mas_rev_awalk() has set mas->min and mas->max to the gap values.  If
5405 	 * the maximum is outside the window we are searching, then use the last
5406 	 * location in the search.
5407 	 * mas->max and mas->min is the range of the gap.
5408 	 * mas->index and mas->last are currently set to the search range.
5409 	 */
5410 
5411 	/* Trim the upper limit to the max. */
5412 	if (mas->max <= mas->last)
5413 		mas->last = mas->max;
5414 
5415 	mas->index = mas->last - size + 1;
5416 	return 0;
5417 }
5418 EXPORT_SYMBOL_GPL(mas_empty_area_rev);
5419 
5420 static inline int mas_alloc(struct ma_state *mas, void *entry,
5421 		unsigned long size, unsigned long *index)
5422 {
5423 	unsigned long min;
5424 
5425 	mas_start(mas);
5426 	if (mas_is_none(mas) || mas_is_ptr(mas)) {
5427 		mas_root_expand(mas, entry);
5428 		if (mas_is_err(mas))
5429 			return xa_err(mas->node);
5430 
5431 		if (!mas->index)
5432 			return mte_pivot(mas->node, 0);
5433 		return mte_pivot(mas->node, 1);
5434 	}
5435 
5436 	/* Must be walking a tree. */
5437 	mas_awalk(mas, size);
5438 	if (mas_is_err(mas))
5439 		return xa_err(mas->node);
5440 
5441 	if (mas->offset == MAPLE_NODE_SLOTS)
5442 		goto no_gap;
5443 
5444 	/*
5445 	 * At this point, mas->node points to the right node and we have an
5446 	 * offset that has a sufficient gap.
5447 	 */
5448 	min = mas->min;
5449 	if (mas->offset)
5450 		min = mte_pivot(mas->node, mas->offset - 1) + 1;
5451 
5452 	if (mas->index < min)
5453 		mas->index = min;
5454 
5455 	mas_fill_gap(mas, entry, mas->offset, size, index);
5456 	return 0;
5457 
5458 no_gap:
5459 	return -EBUSY;
5460 }
5461 
5462 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min,
5463 				unsigned long max, void *entry,
5464 				unsigned long size, unsigned long *index)
5465 {
5466 	int ret = 0;
5467 
5468 	ret = mas_empty_area_rev(mas, min, max, size);
5469 	if (ret)
5470 		return ret;
5471 
5472 	if (mas_is_err(mas))
5473 		return xa_err(mas->node);
5474 
5475 	if (mas->offset == MAPLE_NODE_SLOTS)
5476 		goto no_gap;
5477 
5478 	mas_fill_gap(mas, entry, mas->offset, size, index);
5479 	return 0;
5480 
5481 no_gap:
5482 	return -EBUSY;
5483 }
5484 
5485 /*
5486  * mte_dead_leaves() - Mark all leaves of a node as dead.
5487  * @mas: The maple state
5488  * @slots: Pointer to the slot array
5489  * @type: The maple node type
5490  *
5491  * Must hold the write lock.
5492  *
5493  * Return: The number of leaves marked as dead.
5494  */
5495 static inline
5496 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt,
5497 			      void __rcu **slots)
5498 {
5499 	struct maple_node *node;
5500 	enum maple_type type;
5501 	void *entry;
5502 	int offset;
5503 
5504 	for (offset = 0; offset < mt_slot_count(enode); offset++) {
5505 		entry = mt_slot(mt, slots, offset);
5506 		type = mte_node_type(entry);
5507 		node = mte_to_node(entry);
5508 		/* Use both node and type to catch LE & BE metadata */
5509 		if (!node || !type)
5510 			break;
5511 
5512 		mte_set_node_dead(entry);
5513 		node->type = type;
5514 		rcu_assign_pointer(slots[offset], node);
5515 	}
5516 
5517 	return offset;
5518 }
5519 
5520 /**
5521  * mte_dead_walk() - Walk down a dead tree to just before the leaves
5522  * @enode: The maple encoded node
5523  * @offset: The starting offset
5524  *
5525  * Note: This can only be used from the RCU callback context.
5526  */
5527 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset)
5528 {
5529 	struct maple_node *node, *next;
5530 	void __rcu **slots = NULL;
5531 
5532 	next = mte_to_node(*enode);
5533 	do {
5534 		*enode = ma_enode_ptr(next);
5535 		node = mte_to_node(*enode);
5536 		slots = ma_slots(node, node->type);
5537 		next = rcu_dereference_protected(slots[offset],
5538 					lock_is_held(&rcu_callback_map));
5539 		offset = 0;
5540 	} while (!ma_is_leaf(next->type));
5541 
5542 	return slots;
5543 }
5544 
5545 /**
5546  * mt_free_walk() - Walk & free a tree in the RCU callback context
5547  * @head: The RCU head that's within the node.
5548  *
5549  * Note: This can only be used from the RCU callback context.
5550  */
5551 static void mt_free_walk(struct rcu_head *head)
5552 {
5553 	void __rcu **slots;
5554 	struct maple_node *node, *start;
5555 	struct maple_enode *enode;
5556 	unsigned char offset;
5557 	enum maple_type type;
5558 
5559 	node = container_of(head, struct maple_node, rcu);
5560 
5561 	if (ma_is_leaf(node->type))
5562 		goto free_leaf;
5563 
5564 	start = node;
5565 	enode = mt_mk_node(node, node->type);
5566 	slots = mte_dead_walk(&enode, 0);
5567 	node = mte_to_node(enode);
5568 	do {
5569 		mt_free_bulk(node->slot_len, slots);
5570 		offset = node->parent_slot + 1;
5571 		enode = node->piv_parent;
5572 		if (mte_to_node(enode) == node)
5573 			goto free_leaf;
5574 
5575 		type = mte_node_type(enode);
5576 		slots = ma_slots(mte_to_node(enode), type);
5577 		if ((offset < mt_slots[type]) &&
5578 		    rcu_dereference_protected(slots[offset],
5579 					      lock_is_held(&rcu_callback_map)))
5580 			slots = mte_dead_walk(&enode, offset);
5581 		node = mte_to_node(enode);
5582 	} while ((node != start) || (node->slot_len < offset));
5583 
5584 	slots = ma_slots(node, node->type);
5585 	mt_free_bulk(node->slot_len, slots);
5586 
5587 free_leaf:
5588 	mt_free_rcu(&node->rcu);
5589 }
5590 
5591 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode,
5592 	struct maple_tree *mt, struct maple_enode *prev, unsigned char offset)
5593 {
5594 	struct maple_node *node;
5595 	struct maple_enode *next = *enode;
5596 	void __rcu **slots = NULL;
5597 	enum maple_type type;
5598 	unsigned char next_offset = 0;
5599 
5600 	do {
5601 		*enode = next;
5602 		node = mte_to_node(*enode);
5603 		type = mte_node_type(*enode);
5604 		slots = ma_slots(node, type);
5605 		next = mt_slot_locked(mt, slots, next_offset);
5606 		if ((mte_dead_node(next)))
5607 			next = mt_slot_locked(mt, slots, ++next_offset);
5608 
5609 		mte_set_node_dead(*enode);
5610 		node->type = type;
5611 		node->piv_parent = prev;
5612 		node->parent_slot = offset;
5613 		offset = next_offset;
5614 		next_offset = 0;
5615 		prev = *enode;
5616 	} while (!mte_is_leaf(next));
5617 
5618 	return slots;
5619 }
5620 
5621 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt,
5622 			    bool free)
5623 {
5624 	void __rcu **slots;
5625 	struct maple_node *node = mte_to_node(enode);
5626 	struct maple_enode *start;
5627 
5628 	if (mte_is_leaf(enode)) {
5629 		node->type = mte_node_type(enode);
5630 		goto free_leaf;
5631 	}
5632 
5633 	start = enode;
5634 	slots = mte_destroy_descend(&enode, mt, start, 0);
5635 	node = mte_to_node(enode); // Updated in the above call.
5636 	do {
5637 		enum maple_type type;
5638 		unsigned char offset;
5639 		struct maple_enode *parent, *tmp;
5640 
5641 		node->slot_len = mte_dead_leaves(enode, mt, slots);
5642 		if (free)
5643 			mt_free_bulk(node->slot_len, slots);
5644 		offset = node->parent_slot + 1;
5645 		enode = node->piv_parent;
5646 		if (mte_to_node(enode) == node)
5647 			goto free_leaf;
5648 
5649 		type = mte_node_type(enode);
5650 		slots = ma_slots(mte_to_node(enode), type);
5651 		if (offset >= mt_slots[type])
5652 			goto next;
5653 
5654 		tmp = mt_slot_locked(mt, slots, offset);
5655 		if (mte_node_type(tmp) && mte_to_node(tmp)) {
5656 			parent = enode;
5657 			enode = tmp;
5658 			slots = mte_destroy_descend(&enode, mt, parent, offset);
5659 		}
5660 next:
5661 		node = mte_to_node(enode);
5662 	} while (start != enode);
5663 
5664 	node = mte_to_node(enode);
5665 	node->slot_len = mte_dead_leaves(enode, mt, slots);
5666 	if (free)
5667 		mt_free_bulk(node->slot_len, slots);
5668 
5669 free_leaf:
5670 	if (free)
5671 		mt_free_rcu(&node->rcu);
5672 	else
5673 		mt_clear_meta(mt, node, node->type);
5674 }
5675 
5676 /*
5677  * mte_destroy_walk() - Free a tree or sub-tree.
5678  * @enode: the encoded maple node (maple_enode) to start
5679  * @mt: the tree to free - needed for node types.
5680  *
5681  * Must hold the write lock.
5682  */
5683 static inline void mte_destroy_walk(struct maple_enode *enode,
5684 				    struct maple_tree *mt)
5685 {
5686 	struct maple_node *node = mte_to_node(enode);
5687 
5688 	if (mt_in_rcu(mt)) {
5689 		mt_destroy_walk(enode, mt, false);
5690 		call_rcu(&node->rcu, mt_free_walk);
5691 	} else {
5692 		mt_destroy_walk(enode, mt, true);
5693 	}
5694 }
5695 
5696 static void mas_wr_store_setup(struct ma_wr_state *wr_mas)
5697 {
5698 	if (unlikely(mas_is_paused(wr_mas->mas)))
5699 		mas_reset(wr_mas->mas);
5700 
5701 	if (!mas_is_start(wr_mas->mas)) {
5702 		if (mas_is_none(wr_mas->mas)) {
5703 			mas_reset(wr_mas->mas);
5704 		} else {
5705 			wr_mas->r_max = wr_mas->mas->max;
5706 			wr_mas->type = mte_node_type(wr_mas->mas->node);
5707 			if (mas_is_span_wr(wr_mas))
5708 				mas_reset(wr_mas->mas);
5709 		}
5710 	}
5711 }
5712 
5713 /* Interface */
5714 
5715 /**
5716  * mas_store() - Store an @entry.
5717  * @mas: The maple state.
5718  * @entry: The entry to store.
5719  *
5720  * The @mas->index and @mas->last is used to set the range for the @entry.
5721  * Note: The @mas should have pre-allocated entries to ensure there is memory to
5722  * store the entry.  Please see mas_expected_entries()/mas_destroy() for more details.
5723  *
5724  * Return: the first entry between mas->index and mas->last or %NULL.
5725  */
5726 void *mas_store(struct ma_state *mas, void *entry)
5727 {
5728 	MA_WR_STATE(wr_mas, mas, entry);
5729 
5730 	trace_ma_write(__func__, mas, 0, entry);
5731 #ifdef CONFIG_DEBUG_MAPLE_TREE
5732 	if (mas->index > mas->last)
5733 		pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry);
5734 	MT_BUG_ON(mas->tree, mas->index > mas->last);
5735 	if (mas->index > mas->last) {
5736 		mas_set_err(mas, -EINVAL);
5737 		return NULL;
5738 	}
5739 
5740 #endif
5741 
5742 	/*
5743 	 * Storing is the same operation as insert with the added caveat that it
5744 	 * can overwrite entries.  Although this seems simple enough, one may
5745 	 * want to examine what happens if a single store operation was to
5746 	 * overwrite multiple entries within a self-balancing B-Tree.
5747 	 */
5748 	mas_wr_store_setup(&wr_mas);
5749 	mas_wr_store_entry(&wr_mas);
5750 	return wr_mas.content;
5751 }
5752 EXPORT_SYMBOL_GPL(mas_store);
5753 
5754 /**
5755  * mas_store_gfp() - Store a value into the tree.
5756  * @mas: The maple state
5757  * @entry: The entry to store
5758  * @gfp: The GFP_FLAGS to use for allocations if necessary.
5759  *
5760  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
5761  * be allocated.
5762  */
5763 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp)
5764 {
5765 	MA_WR_STATE(wr_mas, mas, entry);
5766 
5767 	mas_wr_store_setup(&wr_mas);
5768 	trace_ma_write(__func__, mas, 0, entry);
5769 retry:
5770 	mas_wr_store_entry(&wr_mas);
5771 	if (unlikely(mas_nomem(mas, gfp)))
5772 		goto retry;
5773 
5774 	if (unlikely(mas_is_err(mas)))
5775 		return xa_err(mas->node);
5776 
5777 	return 0;
5778 }
5779 EXPORT_SYMBOL_GPL(mas_store_gfp);
5780 
5781 /**
5782  * mas_store_prealloc() - Store a value into the tree using memory
5783  * preallocated in the maple state.
5784  * @mas: The maple state
5785  * @entry: The entry to store.
5786  */
5787 void mas_store_prealloc(struct ma_state *mas, void *entry)
5788 {
5789 	MA_WR_STATE(wr_mas, mas, entry);
5790 
5791 	mas_wr_store_setup(&wr_mas);
5792 	trace_ma_write(__func__, mas, 0, entry);
5793 	mas_wr_store_entry(&wr_mas);
5794 	BUG_ON(mas_is_err(mas));
5795 	mas_destroy(mas);
5796 }
5797 EXPORT_SYMBOL_GPL(mas_store_prealloc);
5798 
5799 /**
5800  * mas_preallocate() - Preallocate enough nodes for a store operation
5801  * @mas: The maple state
5802  * @gfp: The GFP_FLAGS to use for allocations.
5803  *
5804  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5805  */
5806 int mas_preallocate(struct ma_state *mas, gfp_t gfp)
5807 {
5808 	int ret;
5809 
5810 	mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp);
5811 	mas->mas_flags |= MA_STATE_PREALLOC;
5812 	if (likely(!mas_is_err(mas)))
5813 		return 0;
5814 
5815 	mas_set_alloc_req(mas, 0);
5816 	ret = xa_err(mas->node);
5817 	mas_reset(mas);
5818 	mas_destroy(mas);
5819 	mas_reset(mas);
5820 	return ret;
5821 }
5822 
5823 /*
5824  * mas_destroy() - destroy a maple state.
5825  * @mas: The maple state
5826  *
5827  * Upon completion, check the left-most node and rebalance against the node to
5828  * the right if necessary.  Frees any allocated nodes associated with this maple
5829  * state.
5830  */
5831 void mas_destroy(struct ma_state *mas)
5832 {
5833 	struct maple_alloc *node;
5834 	unsigned long total;
5835 
5836 	/*
5837 	 * When using mas_for_each() to insert an expected number of elements,
5838 	 * it is possible that the number inserted is less than the expected
5839 	 * number.  To fix an invalid final node, a check is performed here to
5840 	 * rebalance the previous node with the final node.
5841 	 */
5842 	if (mas->mas_flags & MA_STATE_REBALANCE) {
5843 		unsigned char end;
5844 
5845 		if (mas_is_start(mas))
5846 			mas_start(mas);
5847 
5848 		mtree_range_walk(mas);
5849 		end = mas_data_end(mas) + 1;
5850 		if (end < mt_min_slot_count(mas->node) - 1)
5851 			mas_destroy_rebalance(mas, end);
5852 
5853 		mas->mas_flags &= ~MA_STATE_REBALANCE;
5854 	}
5855 	mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC);
5856 
5857 	total = mas_allocated(mas);
5858 	while (total) {
5859 		node = mas->alloc;
5860 		mas->alloc = node->slot[0];
5861 		if (node->node_count > 1) {
5862 			size_t count = node->node_count - 1;
5863 
5864 			mt_free_bulk(count, (void __rcu **)&node->slot[1]);
5865 			total -= count;
5866 		}
5867 		kmem_cache_free(maple_node_cache, node);
5868 		total--;
5869 	}
5870 
5871 	mas->alloc = NULL;
5872 }
5873 EXPORT_SYMBOL_GPL(mas_destroy);
5874 
5875 /*
5876  * mas_expected_entries() - Set the expected number of entries that will be inserted.
5877  * @mas: The maple state
5878  * @nr_entries: The number of expected entries.
5879  *
5880  * This will attempt to pre-allocate enough nodes to store the expected number
5881  * of entries.  The allocations will occur using the bulk allocator interface
5882  * for speed.  Please call mas_destroy() on the @mas after inserting the entries
5883  * to ensure any unused nodes are freed.
5884  *
5885  * Return: 0 on success, -ENOMEM if memory could not be allocated.
5886  */
5887 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
5888 {
5889 	int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2;
5890 	struct maple_enode *enode = mas->node;
5891 	int nr_nodes;
5892 	int ret;
5893 
5894 	/*
5895 	 * Sometimes it is necessary to duplicate a tree to a new tree, such as
5896 	 * forking a process and duplicating the VMAs from one tree to a new
5897 	 * tree.  When such a situation arises, it is known that the new tree is
5898 	 * not going to be used until the entire tree is populated.  For
5899 	 * performance reasons, it is best to use a bulk load with RCU disabled.
5900 	 * This allows for optimistic splitting that favours the left and reuse
5901 	 * of nodes during the operation.
5902 	 */
5903 
5904 	/* Optimize splitting for bulk insert in-order */
5905 	mas->mas_flags |= MA_STATE_BULK;
5906 
5907 	/*
5908 	 * Avoid overflow, assume a gap between each entry and a trailing null.
5909 	 * If this is wrong, it just means allocation can happen during
5910 	 * insertion of entries.
5911 	 */
5912 	nr_nodes = max(nr_entries, nr_entries * 2 + 1);
5913 	if (!mt_is_alloc(mas->tree))
5914 		nonleaf_cap = MAPLE_RANGE64_SLOTS - 2;
5915 
5916 	/* Leaves; reduce slots to keep space for expansion */
5917 	nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2);
5918 	/* Internal nodes */
5919 	nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap);
5920 	/* Add working room for split (2 nodes) + new parents */
5921 	mas_node_count(mas, nr_nodes + 3);
5922 
5923 	/* Detect if allocations run out */
5924 	mas->mas_flags |= MA_STATE_PREALLOC;
5925 
5926 	if (!mas_is_err(mas))
5927 		return 0;
5928 
5929 	ret = xa_err(mas->node);
5930 	mas->node = enode;
5931 	mas_destroy(mas);
5932 	return ret;
5933 
5934 }
5935 EXPORT_SYMBOL_GPL(mas_expected_entries);
5936 
5937 /**
5938  * mas_next() - Get the next entry.
5939  * @mas: The maple state
5940  * @max: The maximum index to check.
5941  *
5942  * Returns the next entry after @mas->index.
5943  * Must hold rcu_read_lock or the write lock.
5944  * Can return the zero entry.
5945  *
5946  * Return: The next entry or %NULL
5947  */
5948 void *mas_next(struct ma_state *mas, unsigned long max)
5949 {
5950 	if (mas_is_none(mas) || mas_is_paused(mas))
5951 		mas->node = MAS_START;
5952 
5953 	if (mas_is_start(mas))
5954 		mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
5955 
5956 	if (mas_is_ptr(mas)) {
5957 		if (!mas->index) {
5958 			mas->index = 1;
5959 			mas->last = ULONG_MAX;
5960 		}
5961 		return NULL;
5962 	}
5963 
5964 	if (mas->last == ULONG_MAX)
5965 		return NULL;
5966 
5967 	/* Retries on dead nodes handled by mas_next_entry */
5968 	return mas_next_entry(mas, max);
5969 }
5970 EXPORT_SYMBOL_GPL(mas_next);
5971 
5972 /**
5973  * mt_next() - get the next value in the maple tree
5974  * @mt: The maple tree
5975  * @index: The start index
5976  * @max: The maximum index to check
5977  *
5978  * Return: The entry at @index or higher, or %NULL if nothing is found.
5979  */
5980 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
5981 {
5982 	void *entry = NULL;
5983 	MA_STATE(mas, mt, index, index);
5984 
5985 	rcu_read_lock();
5986 	entry = mas_next(&mas, max);
5987 	rcu_read_unlock();
5988 	return entry;
5989 }
5990 EXPORT_SYMBOL_GPL(mt_next);
5991 
5992 /**
5993  * mas_prev() - Get the previous entry
5994  * @mas: The maple state
5995  * @min: The minimum value to check.
5996  *
5997  * Must hold rcu_read_lock or the write lock.
5998  * Will reset mas to MAS_START if the node is MAS_NONE.  Will stop on not
5999  * searchable nodes.
6000  *
6001  * Return: the previous value or %NULL.
6002  */
6003 void *mas_prev(struct ma_state *mas, unsigned long min)
6004 {
6005 	if (!mas->index) {
6006 		/* Nothing comes before 0 */
6007 		mas->last = 0;
6008 		mas->node = MAS_NONE;
6009 		return NULL;
6010 	}
6011 
6012 	if (unlikely(mas_is_ptr(mas)))
6013 		return NULL;
6014 
6015 	if (mas_is_none(mas) || mas_is_paused(mas))
6016 		mas->node = MAS_START;
6017 
6018 	if (mas_is_start(mas)) {
6019 		mas_walk(mas);
6020 		if (!mas->index)
6021 			return NULL;
6022 	}
6023 
6024 	if (mas_is_ptr(mas)) {
6025 		if (!mas->index) {
6026 			mas->last = 0;
6027 			return NULL;
6028 		}
6029 
6030 		mas->index = mas->last = 0;
6031 		return mas_root_locked(mas);
6032 	}
6033 	return mas_prev_entry(mas, min);
6034 }
6035 EXPORT_SYMBOL_GPL(mas_prev);
6036 
6037 /**
6038  * mt_prev() - get the previous value in the maple tree
6039  * @mt: The maple tree
6040  * @index: The start index
6041  * @min: The minimum index to check
6042  *
6043  * Return: The entry at @index or lower, or %NULL if nothing is found.
6044  */
6045 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min)
6046 {
6047 	void *entry = NULL;
6048 	MA_STATE(mas, mt, index, index);
6049 
6050 	rcu_read_lock();
6051 	entry = mas_prev(&mas, min);
6052 	rcu_read_unlock();
6053 	return entry;
6054 }
6055 EXPORT_SYMBOL_GPL(mt_prev);
6056 
6057 /**
6058  * mas_pause() - Pause a mas_find/mas_for_each to drop the lock.
6059  * @mas: The maple state to pause
6060  *
6061  * Some users need to pause a walk and drop the lock they're holding in
6062  * order to yield to a higher priority thread or carry out an operation
6063  * on an entry.  Those users should call this function before they drop
6064  * the lock.  It resets the @mas to be suitable for the next iteration
6065  * of the loop after the user has reacquired the lock.  If most entries
6066  * found during a walk require you to call mas_pause(), the mt_for_each()
6067  * iterator may be more appropriate.
6068  *
6069  */
6070 void mas_pause(struct ma_state *mas)
6071 {
6072 	mas->node = MAS_PAUSE;
6073 }
6074 EXPORT_SYMBOL_GPL(mas_pause);
6075 
6076 /**
6077  * mas_find() - On the first call, find the entry at or after mas->index up to
6078  * %max.  Otherwise, find the entry after mas->index.
6079  * @mas: The maple state
6080  * @max: The maximum value to check.
6081  *
6082  * Must hold rcu_read_lock or the write lock.
6083  * If an entry exists, last and index are updated accordingly.
6084  * May set @mas->node to MAS_NONE.
6085  *
6086  * Return: The entry or %NULL.
6087  */
6088 void *mas_find(struct ma_state *mas, unsigned long max)
6089 {
6090 	if (unlikely(mas_is_paused(mas))) {
6091 		if (unlikely(mas->last == ULONG_MAX)) {
6092 			mas->node = MAS_NONE;
6093 			return NULL;
6094 		}
6095 		mas->node = MAS_START;
6096 		mas->index = ++mas->last;
6097 	}
6098 
6099 	if (unlikely(mas_is_none(mas)))
6100 		mas->node = MAS_START;
6101 
6102 	if (unlikely(mas_is_start(mas))) {
6103 		/* First run or continue */
6104 		void *entry;
6105 
6106 		if (mas->index > max)
6107 			return NULL;
6108 
6109 		entry = mas_walk(mas);
6110 		if (entry)
6111 			return entry;
6112 	}
6113 
6114 	if (unlikely(!mas_searchable(mas)))
6115 		return NULL;
6116 
6117 	/* Retries on dead nodes handled by mas_next_entry */
6118 	return mas_next_entry(mas, max);
6119 }
6120 EXPORT_SYMBOL_GPL(mas_find);
6121 
6122 /**
6123  * mas_find_rev: On the first call, find the first non-null entry at or below
6124  * mas->index down to %min.  Otherwise find the first non-null entry below
6125  * mas->index down to %min.
6126  * @mas: The maple state
6127  * @min: The minimum value to check.
6128  *
6129  * Must hold rcu_read_lock or the write lock.
6130  * If an entry exists, last and index are updated accordingly.
6131  * May set @mas->node to MAS_NONE.
6132  *
6133  * Return: The entry or %NULL.
6134  */
6135 void *mas_find_rev(struct ma_state *mas, unsigned long min)
6136 {
6137 	if (unlikely(mas_is_paused(mas))) {
6138 		if (unlikely(mas->last == ULONG_MAX)) {
6139 			mas->node = MAS_NONE;
6140 			return NULL;
6141 		}
6142 		mas->node = MAS_START;
6143 		mas->last = --mas->index;
6144 	}
6145 
6146 	if (unlikely(mas_is_start(mas))) {
6147 		/* First run or continue */
6148 		void *entry;
6149 
6150 		if (mas->index < min)
6151 			return NULL;
6152 
6153 		entry = mas_walk(mas);
6154 		if (entry)
6155 			return entry;
6156 	}
6157 
6158 	if (unlikely(!mas_searchable(mas)))
6159 		return NULL;
6160 
6161 	if (mas->index < min)
6162 		return NULL;
6163 
6164 	/* Retries on dead nodes handled by mas_prev_entry */
6165 	return mas_prev_entry(mas, min);
6166 }
6167 EXPORT_SYMBOL_GPL(mas_find_rev);
6168 
6169 /**
6170  * mas_erase() - Find the range in which index resides and erase the entire
6171  * range.
6172  * @mas: The maple state
6173  *
6174  * Must hold the write lock.
6175  * Searches for @mas->index, sets @mas->index and @mas->last to the range and
6176  * erases that range.
6177  *
6178  * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated.
6179  */
6180 void *mas_erase(struct ma_state *mas)
6181 {
6182 	void *entry;
6183 	MA_WR_STATE(wr_mas, mas, NULL);
6184 
6185 	if (mas_is_none(mas) || mas_is_paused(mas))
6186 		mas->node = MAS_START;
6187 
6188 	/* Retry unnecessary when holding the write lock. */
6189 	entry = mas_state_walk(mas);
6190 	if (!entry)
6191 		return NULL;
6192 
6193 write_retry:
6194 	/* Must reset to ensure spanning writes of last slot are detected */
6195 	mas_reset(mas);
6196 	mas_wr_store_setup(&wr_mas);
6197 	mas_wr_store_entry(&wr_mas);
6198 	if (mas_nomem(mas, GFP_KERNEL))
6199 		goto write_retry;
6200 
6201 	return entry;
6202 }
6203 EXPORT_SYMBOL_GPL(mas_erase);
6204 
6205 /**
6206  * mas_nomem() - Check if there was an error allocating and do the allocation
6207  * if necessary If there are allocations, then free them.
6208  * @mas: The maple state
6209  * @gfp: The GFP_FLAGS to use for allocations
6210  * Return: true on allocation, false otherwise.
6211  */
6212 bool mas_nomem(struct ma_state *mas, gfp_t gfp)
6213 	__must_hold(mas->tree->lock)
6214 {
6215 	if (likely(mas->node != MA_ERROR(-ENOMEM))) {
6216 		mas_destroy(mas);
6217 		return false;
6218 	}
6219 
6220 	if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) {
6221 		mtree_unlock(mas->tree);
6222 		mas_alloc_nodes(mas, gfp);
6223 		mtree_lock(mas->tree);
6224 	} else {
6225 		mas_alloc_nodes(mas, gfp);
6226 	}
6227 
6228 	if (!mas_allocated(mas))
6229 		return false;
6230 
6231 	mas->node = MAS_START;
6232 	return true;
6233 }
6234 
6235 void __init maple_tree_init(void)
6236 {
6237 	maple_node_cache = kmem_cache_create("maple_node",
6238 			sizeof(struct maple_node), sizeof(struct maple_node),
6239 			SLAB_PANIC, NULL);
6240 }
6241 
6242 /**
6243  * mtree_load() - Load a value stored in a maple tree
6244  * @mt: The maple tree
6245  * @index: The index to load
6246  *
6247  * Return: the entry or %NULL
6248  */
6249 void *mtree_load(struct maple_tree *mt, unsigned long index)
6250 {
6251 	MA_STATE(mas, mt, index, index);
6252 	void *entry;
6253 
6254 	trace_ma_read(__func__, &mas);
6255 	rcu_read_lock();
6256 retry:
6257 	entry = mas_start(&mas);
6258 	if (unlikely(mas_is_none(&mas)))
6259 		goto unlock;
6260 
6261 	if (unlikely(mas_is_ptr(&mas))) {
6262 		if (index)
6263 			entry = NULL;
6264 
6265 		goto unlock;
6266 	}
6267 
6268 	entry = mtree_lookup_walk(&mas);
6269 	if (!entry && unlikely(mas_is_start(&mas)))
6270 		goto retry;
6271 unlock:
6272 	rcu_read_unlock();
6273 	if (xa_is_zero(entry))
6274 		return NULL;
6275 
6276 	return entry;
6277 }
6278 EXPORT_SYMBOL(mtree_load);
6279 
6280 /**
6281  * mtree_store_range() - Store an entry at a given range.
6282  * @mt: The maple tree
6283  * @index: The start of the range
6284  * @last: The end of the range
6285  * @entry: The entry to store
6286  * @gfp: The GFP_FLAGS to use for allocations
6287  *
6288  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6289  * be allocated.
6290  */
6291 int mtree_store_range(struct maple_tree *mt, unsigned long index,
6292 		unsigned long last, void *entry, gfp_t gfp)
6293 {
6294 	MA_STATE(mas, mt, index, last);
6295 	MA_WR_STATE(wr_mas, &mas, entry);
6296 
6297 	trace_ma_write(__func__, &mas, 0, entry);
6298 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6299 		return -EINVAL;
6300 
6301 	if (index > last)
6302 		return -EINVAL;
6303 
6304 	mtree_lock(mt);
6305 retry:
6306 	mas_wr_store_entry(&wr_mas);
6307 	if (mas_nomem(&mas, gfp))
6308 		goto retry;
6309 
6310 	mtree_unlock(mt);
6311 	if (mas_is_err(&mas))
6312 		return xa_err(mas.node);
6313 
6314 	return 0;
6315 }
6316 EXPORT_SYMBOL(mtree_store_range);
6317 
6318 /**
6319  * mtree_store() - Store an entry at a given index.
6320  * @mt: The maple tree
6321  * @index: The index to store the value
6322  * @entry: The entry to store
6323  * @gfp: The GFP_FLAGS to use for allocations
6324  *
6325  * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not
6326  * be allocated.
6327  */
6328 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry,
6329 		 gfp_t gfp)
6330 {
6331 	return mtree_store_range(mt, index, index, entry, gfp);
6332 }
6333 EXPORT_SYMBOL(mtree_store);
6334 
6335 /**
6336  * mtree_insert_range() - Insert an entry at a give range if there is no value.
6337  * @mt: The maple tree
6338  * @first: The start of the range
6339  * @last: The end of the range
6340  * @entry: The entry to store
6341  * @gfp: The GFP_FLAGS to use for allocations.
6342  *
6343  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6344  * request, -ENOMEM if memory could not be allocated.
6345  */
6346 int mtree_insert_range(struct maple_tree *mt, unsigned long first,
6347 		unsigned long last, void *entry, gfp_t gfp)
6348 {
6349 	MA_STATE(ms, mt, first, last);
6350 
6351 	if (WARN_ON_ONCE(xa_is_advanced(entry)))
6352 		return -EINVAL;
6353 
6354 	if (first > last)
6355 		return -EINVAL;
6356 
6357 	mtree_lock(mt);
6358 retry:
6359 	mas_insert(&ms, entry);
6360 	if (mas_nomem(&ms, gfp))
6361 		goto retry;
6362 
6363 	mtree_unlock(mt);
6364 	if (mas_is_err(&ms))
6365 		return xa_err(ms.node);
6366 
6367 	return 0;
6368 }
6369 EXPORT_SYMBOL(mtree_insert_range);
6370 
6371 /**
6372  * mtree_insert() - Insert an entry at a give index if there is no value.
6373  * @mt: The maple tree
6374  * @index : The index to store the value
6375  * @entry: The entry to store
6376  * @gfp: The FGP_FLAGS to use for allocations.
6377  *
6378  * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid
6379  * request, -ENOMEM if memory could not be allocated.
6380  */
6381 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry,
6382 		 gfp_t gfp)
6383 {
6384 	return mtree_insert_range(mt, index, index, entry, gfp);
6385 }
6386 EXPORT_SYMBOL(mtree_insert);
6387 
6388 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp,
6389 		void *entry, unsigned long size, unsigned long min,
6390 		unsigned long max, gfp_t gfp)
6391 {
6392 	int ret = 0;
6393 
6394 	MA_STATE(mas, mt, min, max - size);
6395 	if (!mt_is_alloc(mt))
6396 		return -EINVAL;
6397 
6398 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6399 		return -EINVAL;
6400 
6401 	if (min > max)
6402 		return -EINVAL;
6403 
6404 	if (max < size)
6405 		return -EINVAL;
6406 
6407 	if (!size)
6408 		return -EINVAL;
6409 
6410 	mtree_lock(mt);
6411 retry:
6412 	mas.offset = 0;
6413 	mas.index = min;
6414 	mas.last = max - size;
6415 	ret = mas_alloc(&mas, entry, size, startp);
6416 	if (mas_nomem(&mas, gfp))
6417 		goto retry;
6418 
6419 	mtree_unlock(mt);
6420 	return ret;
6421 }
6422 EXPORT_SYMBOL(mtree_alloc_range);
6423 
6424 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp,
6425 		void *entry, unsigned long size, unsigned long min,
6426 		unsigned long max, gfp_t gfp)
6427 {
6428 	int ret = 0;
6429 
6430 	MA_STATE(mas, mt, min, max - size);
6431 	if (!mt_is_alloc(mt))
6432 		return -EINVAL;
6433 
6434 	if (WARN_ON_ONCE(mt_is_reserved(entry)))
6435 		return -EINVAL;
6436 
6437 	if (min >= max)
6438 		return -EINVAL;
6439 
6440 	if (max < size - 1)
6441 		return -EINVAL;
6442 
6443 	if (!size)
6444 		return -EINVAL;
6445 
6446 	mtree_lock(mt);
6447 retry:
6448 	ret = mas_rev_alloc(&mas, min, max, entry, size, startp);
6449 	if (mas_nomem(&mas, gfp))
6450 		goto retry;
6451 
6452 	mtree_unlock(mt);
6453 	return ret;
6454 }
6455 EXPORT_SYMBOL(mtree_alloc_rrange);
6456 
6457 /**
6458  * mtree_erase() - Find an index and erase the entire range.
6459  * @mt: The maple tree
6460  * @index: The index to erase
6461  *
6462  * Erasing is the same as a walk to an entry then a store of a NULL to that
6463  * ENTIRE range.  In fact, it is implemented as such using the advanced API.
6464  *
6465  * Return: The entry stored at the @index or %NULL
6466  */
6467 void *mtree_erase(struct maple_tree *mt, unsigned long index)
6468 {
6469 	void *entry = NULL;
6470 
6471 	MA_STATE(mas, mt, index, index);
6472 	trace_ma_op(__func__, &mas);
6473 
6474 	mtree_lock(mt);
6475 	entry = mas_erase(&mas);
6476 	mtree_unlock(mt);
6477 
6478 	return entry;
6479 }
6480 EXPORT_SYMBOL(mtree_erase);
6481 
6482 /**
6483  * __mt_destroy() - Walk and free all nodes of a locked maple tree.
6484  * @mt: The maple tree
6485  *
6486  * Note: Does not handle locking.
6487  */
6488 void __mt_destroy(struct maple_tree *mt)
6489 {
6490 	void *root = mt_root_locked(mt);
6491 
6492 	rcu_assign_pointer(mt->ma_root, NULL);
6493 	if (xa_is_node(root))
6494 		mte_destroy_walk(root, mt);
6495 
6496 	mt->ma_flags = 0;
6497 }
6498 EXPORT_SYMBOL_GPL(__mt_destroy);
6499 
6500 /**
6501  * mtree_destroy() - Destroy a maple tree
6502  * @mt: The maple tree
6503  *
6504  * Frees all resources used by the tree.  Handles locking.
6505  */
6506 void mtree_destroy(struct maple_tree *mt)
6507 {
6508 	mtree_lock(mt);
6509 	__mt_destroy(mt);
6510 	mtree_unlock(mt);
6511 }
6512 EXPORT_SYMBOL(mtree_destroy);
6513 
6514 /**
6515  * mt_find() - Search from the start up until an entry is found.
6516  * @mt: The maple tree
6517  * @index: Pointer which contains the start location of the search
6518  * @max: The maximum value to check
6519  *
6520  * Handles locking.  @index will be incremented to one beyond the range.
6521  *
6522  * Return: The entry at or after the @index or %NULL
6523  */
6524 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max)
6525 {
6526 	MA_STATE(mas, mt, *index, *index);
6527 	void *entry;
6528 #ifdef CONFIG_DEBUG_MAPLE_TREE
6529 	unsigned long copy = *index;
6530 #endif
6531 
6532 	trace_ma_read(__func__, &mas);
6533 
6534 	if ((*index) > max)
6535 		return NULL;
6536 
6537 	rcu_read_lock();
6538 retry:
6539 	entry = mas_state_walk(&mas);
6540 	if (mas_is_start(&mas))
6541 		goto retry;
6542 
6543 	if (unlikely(xa_is_zero(entry)))
6544 		entry = NULL;
6545 
6546 	if (entry)
6547 		goto unlock;
6548 
6549 	while (mas_searchable(&mas) && (mas.index < max)) {
6550 		entry = mas_next_entry(&mas, max);
6551 		if (likely(entry && !xa_is_zero(entry)))
6552 			break;
6553 	}
6554 
6555 	if (unlikely(xa_is_zero(entry)))
6556 		entry = NULL;
6557 unlock:
6558 	rcu_read_unlock();
6559 	if (likely(entry)) {
6560 		*index = mas.last + 1;
6561 #ifdef CONFIG_DEBUG_MAPLE_TREE
6562 		if ((*index) && (*index) <= copy)
6563 			pr_err("index not increased! %lx <= %lx\n",
6564 			       *index, copy);
6565 		MT_BUG_ON(mt, (*index) && ((*index) <= copy));
6566 #endif
6567 	}
6568 
6569 	return entry;
6570 }
6571 EXPORT_SYMBOL(mt_find);
6572 
6573 /**
6574  * mt_find_after() - Search from the start up until an entry is found.
6575  * @mt: The maple tree
6576  * @index: Pointer which contains the start location of the search
6577  * @max: The maximum value to check
6578  *
6579  * Handles locking, detects wrapping on index == 0
6580  *
6581  * Return: The entry at or after the @index or %NULL
6582  */
6583 void *mt_find_after(struct maple_tree *mt, unsigned long *index,
6584 		    unsigned long max)
6585 {
6586 	if (!(*index))
6587 		return NULL;
6588 
6589 	return mt_find(mt, index, max);
6590 }
6591 EXPORT_SYMBOL(mt_find_after);
6592 
6593 #ifdef CONFIG_DEBUG_MAPLE_TREE
6594 atomic_t maple_tree_tests_run;
6595 EXPORT_SYMBOL_GPL(maple_tree_tests_run);
6596 atomic_t maple_tree_tests_passed;
6597 EXPORT_SYMBOL_GPL(maple_tree_tests_passed);
6598 
6599 #ifndef __KERNEL__
6600 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int);
6601 void mt_set_non_kernel(unsigned int val)
6602 {
6603 	kmem_cache_set_non_kernel(maple_node_cache, val);
6604 }
6605 
6606 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *);
6607 unsigned long mt_get_alloc_size(void)
6608 {
6609 	return kmem_cache_get_alloc(maple_node_cache);
6610 }
6611 
6612 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *);
6613 void mt_zero_nr_tallocated(void)
6614 {
6615 	kmem_cache_zero_nr_tallocated(maple_node_cache);
6616 }
6617 
6618 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *);
6619 unsigned int mt_nr_tallocated(void)
6620 {
6621 	return kmem_cache_nr_tallocated(maple_node_cache);
6622 }
6623 
6624 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *);
6625 unsigned int mt_nr_allocated(void)
6626 {
6627 	return kmem_cache_nr_allocated(maple_node_cache);
6628 }
6629 
6630 /*
6631  * mas_dead_node() - Check if the maple state is pointing to a dead node.
6632  * @mas: The maple state
6633  * @index: The index to restore in @mas.
6634  *
6635  * Used in test code.
6636  * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
6637  */
6638 static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
6639 {
6640 	if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
6641 		return 0;
6642 
6643 	if (likely(!mte_dead_node(mas->node)))
6644 		return 0;
6645 
6646 	mas_rewalk(mas, index);
6647 	return 1;
6648 }
6649 
6650 void mt_cache_shrink(void)
6651 {
6652 }
6653 #else
6654 /*
6655  * mt_cache_shrink() - For testing, don't use this.
6656  *
6657  * Certain testcases can trigger an OOM when combined with other memory
6658  * debugging configuration options.  This function is used to reduce the
6659  * possibility of an out of memory even due to kmem_cache objects remaining
6660  * around for longer than usual.
6661  */
6662 void mt_cache_shrink(void)
6663 {
6664 	kmem_cache_shrink(maple_node_cache);
6665 
6666 }
6667 EXPORT_SYMBOL_GPL(mt_cache_shrink);
6668 
6669 #endif /* not defined __KERNEL__ */
6670 /*
6671  * mas_get_slot() - Get the entry in the maple state node stored at @offset.
6672  * @mas: The maple state
6673  * @offset: The offset into the slot array to fetch.
6674  *
6675  * Return: The entry stored at @offset.
6676  */
6677 static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
6678 		unsigned char offset)
6679 {
6680 	return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)),
6681 			offset);
6682 }
6683 
6684 
6685 /*
6686  * mas_first_entry() - Go the first leaf and find the first entry.
6687  * @mas: the maple state.
6688  * @limit: the maximum index to check.
6689  * @*r_start: Pointer to set to the range start.
6690  *
6691  * Sets mas->offset to the offset of the entry, r_start to the range minimum.
6692  *
6693  * Return: The first entry or MAS_NONE.
6694  */
6695 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn,
6696 		unsigned long limit, enum maple_type mt)
6697 
6698 {
6699 	unsigned long max;
6700 	unsigned long *pivots;
6701 	void __rcu **slots;
6702 	void *entry = NULL;
6703 
6704 	mas->index = mas->min;
6705 	if (mas->index > limit)
6706 		goto none;
6707 
6708 	max = mas->max;
6709 	mas->offset = 0;
6710 	while (likely(!ma_is_leaf(mt))) {
6711 		MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6712 		slots = ma_slots(mn, mt);
6713 		entry = mas_slot(mas, slots, 0);
6714 		pivots = ma_pivots(mn, mt);
6715 		if (unlikely(ma_dead_node(mn)))
6716 			return NULL;
6717 		max = pivots[0];
6718 		mas->node = entry;
6719 		mn = mas_mn(mas);
6720 		mt = mte_node_type(mas->node);
6721 	}
6722 	MT_BUG_ON(mas->tree, mte_dead_node(mas->node));
6723 
6724 	mas->max = max;
6725 	slots = ma_slots(mn, mt);
6726 	entry = mas_slot(mas, slots, 0);
6727 	if (unlikely(ma_dead_node(mn)))
6728 		return NULL;
6729 
6730 	/* Slot 0 or 1 must be set */
6731 	if (mas->index > limit)
6732 		goto none;
6733 
6734 	if (likely(entry))
6735 		return entry;
6736 
6737 	mas->offset = 1;
6738 	entry = mas_slot(mas, slots, 1);
6739 	pivots = ma_pivots(mn, mt);
6740 	if (unlikely(ma_dead_node(mn)))
6741 		return NULL;
6742 
6743 	mas->index = pivots[0] + 1;
6744 	if (mas->index > limit)
6745 		goto none;
6746 
6747 	if (likely(entry))
6748 		return entry;
6749 
6750 none:
6751 	if (likely(!ma_dead_node(mn)))
6752 		mas->node = MAS_NONE;
6753 	return NULL;
6754 }
6755 
6756 /* Depth first search, post-order */
6757 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
6758 {
6759 
6760 	struct maple_enode *p = MAS_NONE, *mn = mas->node;
6761 	unsigned long p_min, p_max;
6762 
6763 	mas_next_node(mas, mas_mn(mas), max);
6764 	if (!mas_is_none(mas))
6765 		return;
6766 
6767 	if (mte_is_root(mn))
6768 		return;
6769 
6770 	mas->node = mn;
6771 	mas_ascend(mas);
6772 	while (mas->node != MAS_NONE) {
6773 		p = mas->node;
6774 		p_min = mas->min;
6775 		p_max = mas->max;
6776 		mas_prev_node(mas, 0);
6777 	}
6778 
6779 	if (p == MAS_NONE)
6780 		return;
6781 
6782 	mas->node = p;
6783 	mas->max = p_max;
6784 	mas->min = p_min;
6785 }
6786 
6787 /* Tree validations */
6788 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6789 		unsigned long min, unsigned long max, unsigned int depth);
6790 static void mt_dump_range(unsigned long min, unsigned long max,
6791 			  unsigned int depth)
6792 {
6793 	static const char spaces[] = "                                ";
6794 
6795 	if (min == max)
6796 		pr_info("%.*s%lu: ", depth * 2, spaces, min);
6797 	else
6798 		pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max);
6799 }
6800 
6801 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max,
6802 			  unsigned int depth)
6803 {
6804 	mt_dump_range(min, max, depth);
6805 
6806 	if (xa_is_value(entry))
6807 		pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry),
6808 				xa_to_value(entry), entry);
6809 	else if (xa_is_zero(entry))
6810 		pr_cont("zero (%ld)\n", xa_to_internal(entry));
6811 	else if (mt_is_reserved(entry))
6812 		pr_cont("UNKNOWN ENTRY (%p)\n", entry);
6813 	else
6814 		pr_cont("%p\n", entry);
6815 }
6816 
6817 static void mt_dump_range64(const struct maple_tree *mt, void *entry,
6818 			unsigned long min, unsigned long max, unsigned int depth)
6819 {
6820 	struct maple_range_64 *node = &mte_to_node(entry)->mr64;
6821 	bool leaf = mte_is_leaf(entry);
6822 	unsigned long first = min;
6823 	int i;
6824 
6825 	pr_cont(" contents: ");
6826 	for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++)
6827 		pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6828 	pr_cont("%p\n", node->slot[i]);
6829 	for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) {
6830 		unsigned long last = max;
6831 
6832 		if (i < (MAPLE_RANGE64_SLOTS - 1))
6833 			last = node->pivot[i];
6834 		else if (!node->slot[i] && max != mt_node_max(entry))
6835 			break;
6836 		if (last == 0 && i > 0)
6837 			break;
6838 		if (leaf)
6839 			mt_dump_entry(mt_slot(mt, node->slot, i),
6840 					first, last, depth + 1);
6841 		else if (node->slot[i])
6842 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6843 					first, last, depth + 1);
6844 
6845 		if (last == max)
6846 			break;
6847 		if (last > max) {
6848 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6849 					node, last, max, i);
6850 			break;
6851 		}
6852 		first = last + 1;
6853 	}
6854 }
6855 
6856 static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
6857 			unsigned long min, unsigned long max, unsigned int depth)
6858 {
6859 	struct maple_arange_64 *node = &mte_to_node(entry)->ma64;
6860 	bool leaf = mte_is_leaf(entry);
6861 	unsigned long first = min;
6862 	int i;
6863 
6864 	pr_cont(" contents: ");
6865 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++)
6866 		pr_cont("%lu ", node->gap[i]);
6867 	pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap);
6868 	for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++)
6869 		pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
6870 	pr_cont("%p\n", node->slot[i]);
6871 	for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) {
6872 		unsigned long last = max;
6873 
6874 		if (i < (MAPLE_ARANGE64_SLOTS - 1))
6875 			last = node->pivot[i];
6876 		else if (!node->slot[i])
6877 			break;
6878 		if (last == 0 && i > 0)
6879 			break;
6880 		if (leaf)
6881 			mt_dump_entry(mt_slot(mt, node->slot, i),
6882 					first, last, depth + 1);
6883 		else if (node->slot[i])
6884 			mt_dump_node(mt, mt_slot(mt, node->slot, i),
6885 					first, last, depth + 1);
6886 
6887 		if (last == max)
6888 			break;
6889 		if (last > max) {
6890 			pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
6891 					node, last, max, i);
6892 			break;
6893 		}
6894 		first = last + 1;
6895 	}
6896 }
6897 
6898 static void mt_dump_node(const struct maple_tree *mt, void *entry,
6899 		unsigned long min, unsigned long max, unsigned int depth)
6900 {
6901 	struct maple_node *node = mte_to_node(entry);
6902 	unsigned int type = mte_node_type(entry);
6903 	unsigned int i;
6904 
6905 	mt_dump_range(min, max, depth);
6906 
6907 	pr_cont("node %p depth %d type %d parent %p", node, depth, type,
6908 			node ? node->parent : NULL);
6909 	switch (type) {
6910 	case maple_dense:
6911 		pr_cont("\n");
6912 		for (i = 0; i < MAPLE_NODE_SLOTS; i++) {
6913 			if (min + i > max)
6914 				pr_cont("OUT OF RANGE: ");
6915 			mt_dump_entry(mt_slot(mt, node->slot, i),
6916 					min + i, min + i, depth);
6917 		}
6918 		break;
6919 	case maple_leaf_64:
6920 	case maple_range_64:
6921 		mt_dump_range64(mt, entry, min, max, depth);
6922 		break;
6923 	case maple_arange_64:
6924 		mt_dump_arange64(mt, entry, min, max, depth);
6925 		break;
6926 
6927 	default:
6928 		pr_cont(" UNKNOWN TYPE\n");
6929 	}
6930 }
6931 
6932 void mt_dump(const struct maple_tree *mt)
6933 {
6934 	void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt));
6935 
6936 	pr_info("maple_tree(%p) flags %X, height %u root %p\n",
6937 		 mt, mt->ma_flags, mt_height(mt), entry);
6938 	if (!xa_is_node(entry))
6939 		mt_dump_entry(entry, 0, 0, 0);
6940 	else if (entry)
6941 		mt_dump_node(mt, entry, 0, mt_node_max(entry), 0);
6942 }
6943 EXPORT_SYMBOL_GPL(mt_dump);
6944 
6945 /*
6946  * Calculate the maximum gap in a node and check if that's what is reported in
6947  * the parent (unless root).
6948  */
6949 static void mas_validate_gaps(struct ma_state *mas)
6950 {
6951 	struct maple_enode *mte = mas->node;
6952 	struct maple_node *p_mn;
6953 	unsigned long gap = 0, max_gap = 0;
6954 	unsigned long p_end, p_start = mas->min;
6955 	unsigned char p_slot;
6956 	unsigned long *gaps = NULL;
6957 	unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte));
6958 	int i;
6959 
6960 	if (ma_is_dense(mte_node_type(mte))) {
6961 		for (i = 0; i < mt_slot_count(mte); i++) {
6962 			if (mas_get_slot(mas, i)) {
6963 				if (gap > max_gap)
6964 					max_gap = gap;
6965 				gap = 0;
6966 				continue;
6967 			}
6968 			gap++;
6969 		}
6970 		goto counted;
6971 	}
6972 
6973 	gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte));
6974 	for (i = 0; i < mt_slot_count(mte); i++) {
6975 		p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte));
6976 
6977 		if (!gaps) {
6978 			if (mas_get_slot(mas, i)) {
6979 				gap = 0;
6980 				goto not_empty;
6981 			}
6982 
6983 			gap += p_end - p_start + 1;
6984 		} else {
6985 			void *entry = mas_get_slot(mas, i);
6986 
6987 			gap = gaps[i];
6988 			if (!entry) {
6989 				if (gap != p_end - p_start + 1) {
6990 					pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n",
6991 						mas_mn(mas), i,
6992 						mas_get_slot(mas, i), gap,
6993 						p_end, p_start);
6994 					mt_dump(mas->tree);
6995 
6996 					MT_BUG_ON(mas->tree,
6997 						gap != p_end - p_start + 1);
6998 				}
6999 			} else {
7000 				if (gap > p_end - p_start + 1) {
7001 					pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n",
7002 					mas_mn(mas), i, gap, p_end, p_start,
7003 					p_end - p_start + 1);
7004 					MT_BUG_ON(mas->tree,
7005 						gap > p_end - p_start + 1);
7006 				}
7007 			}
7008 		}
7009 
7010 		if (gap > max_gap)
7011 			max_gap = gap;
7012 not_empty:
7013 		p_start = p_end + 1;
7014 		if (p_end >= mas->max)
7015 			break;
7016 	}
7017 
7018 counted:
7019 	if (mte_is_root(mte))
7020 		return;
7021 
7022 	p_slot = mte_parent_slot(mas->node);
7023 	p_mn = mte_parent(mte);
7024 	MT_BUG_ON(mas->tree, max_gap > mas->max);
7025 	if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) {
7026 		pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap);
7027 		mt_dump(mas->tree);
7028 	}
7029 
7030 	MT_BUG_ON(mas->tree,
7031 		  ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap);
7032 }
7033 
7034 static void mas_validate_parent_slot(struct ma_state *mas)
7035 {
7036 	struct maple_node *parent;
7037 	struct maple_enode *node;
7038 	enum maple_type p_type = mas_parent_enum(mas, mas->node);
7039 	unsigned char p_slot = mte_parent_slot(mas->node);
7040 	void __rcu **slots;
7041 	int i;
7042 
7043 	if (mte_is_root(mas->node))
7044 		return;
7045 
7046 	parent = mte_parent(mas->node);
7047 	slots = ma_slots(parent, p_type);
7048 	MT_BUG_ON(mas->tree, mas_mn(mas) == parent);
7049 
7050 	/* Check prev/next parent slot for duplicate node entry */
7051 
7052 	for (i = 0; i < mt_slots[p_type]; i++) {
7053 		node = mas_slot(mas, slots, i);
7054 		if (i == p_slot) {
7055 			if (node != mas->node)
7056 				pr_err("parent %p[%u] does not have %p\n",
7057 					parent, i, mas_mn(mas));
7058 			MT_BUG_ON(mas->tree, node != mas->node);
7059 		} else if (node == mas->node) {
7060 			pr_err("Invalid child %p at parent %p[%u] p_slot %u\n",
7061 			       mas_mn(mas), parent, i, p_slot);
7062 			MT_BUG_ON(mas->tree, node == mas->node);
7063 		}
7064 	}
7065 }
7066 
7067 static void mas_validate_child_slot(struct ma_state *mas)
7068 {
7069 	enum maple_type type = mte_node_type(mas->node);
7070 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7071 	unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type);
7072 	struct maple_enode *child;
7073 	unsigned char i;
7074 
7075 	if (mte_is_leaf(mas->node))
7076 		return;
7077 
7078 	for (i = 0; i < mt_slots[type]; i++) {
7079 		child = mas_slot(mas, slots, i);
7080 		if (!pivots[i] || pivots[i] == mas->max)
7081 			break;
7082 
7083 		if (!child)
7084 			break;
7085 
7086 		if (mte_parent_slot(child) != i) {
7087 			pr_err("Slot error at %p[%u]: child %p has pslot %u\n",
7088 			       mas_mn(mas), i, mte_to_node(child),
7089 			       mte_parent_slot(child));
7090 			MT_BUG_ON(mas->tree, 1);
7091 		}
7092 
7093 		if (mte_parent(child) != mte_to_node(mas->node)) {
7094 			pr_err("child %p has parent %p not %p\n",
7095 			       mte_to_node(child), mte_parent(child),
7096 			       mte_to_node(mas->node));
7097 			MT_BUG_ON(mas->tree, 1);
7098 		}
7099 	}
7100 }
7101 
7102 /*
7103  * Validate all pivots are within mas->min and mas->max.
7104  */
7105 static void mas_validate_limits(struct ma_state *mas)
7106 {
7107 	int i;
7108 	unsigned long prev_piv = 0;
7109 	enum maple_type type = mte_node_type(mas->node);
7110 	void __rcu **slots = ma_slots(mte_to_node(mas->node), type);
7111 	unsigned long *pivots = ma_pivots(mas_mn(mas), type);
7112 
7113 	/* all limits are fine here. */
7114 	if (mte_is_root(mas->node))
7115 		return;
7116 
7117 	for (i = 0; i < mt_slots[type]; i++) {
7118 		unsigned long piv;
7119 
7120 		piv = mas_safe_pivot(mas, pivots, i, type);
7121 
7122 		if (!piv && (i != 0))
7123 			break;
7124 
7125 		if (!mte_is_leaf(mas->node)) {
7126 			void *entry = mas_slot(mas, slots, i);
7127 
7128 			if (!entry)
7129 				pr_err("%p[%u] cannot be null\n",
7130 				       mas_mn(mas), i);
7131 
7132 			MT_BUG_ON(mas->tree, !entry);
7133 		}
7134 
7135 		if (prev_piv > piv) {
7136 			pr_err("%p[%u] piv %lu < prev_piv %lu\n",
7137 				mas_mn(mas), i, piv, prev_piv);
7138 			MT_BUG_ON(mas->tree, piv < prev_piv);
7139 		}
7140 
7141 		if (piv < mas->min) {
7142 			pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i,
7143 				piv, mas->min);
7144 			MT_BUG_ON(mas->tree, piv < mas->min);
7145 		}
7146 		if (piv > mas->max) {
7147 			pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i,
7148 				piv, mas->max);
7149 			MT_BUG_ON(mas->tree, piv > mas->max);
7150 		}
7151 		prev_piv = piv;
7152 		if (piv == mas->max)
7153 			break;
7154 	}
7155 	for (i += 1; i < mt_slots[type]; i++) {
7156 		void *entry = mas_slot(mas, slots, i);
7157 
7158 		if (entry && (i != mt_slots[type] - 1)) {
7159 			pr_err("%p[%u] should not have entry %p\n", mas_mn(mas),
7160 			       i, entry);
7161 			MT_BUG_ON(mas->tree, entry != NULL);
7162 		}
7163 
7164 		if (i < mt_pivots[type]) {
7165 			unsigned long piv = pivots[i];
7166 
7167 			if (!piv)
7168 				continue;
7169 
7170 			pr_err("%p[%u] should not have piv %lu\n",
7171 			       mas_mn(mas), i, piv);
7172 			MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1);
7173 		}
7174 	}
7175 }
7176 
7177 static void mt_validate_nulls(struct maple_tree *mt)
7178 {
7179 	void *entry, *last = (void *)1;
7180 	unsigned char offset = 0;
7181 	void __rcu **slots;
7182 	MA_STATE(mas, mt, 0, 0);
7183 
7184 	mas_start(&mas);
7185 	if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
7186 		return;
7187 
7188 	while (!mte_is_leaf(mas.node))
7189 		mas_descend(&mas);
7190 
7191 	slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node));
7192 	do {
7193 		entry = mas_slot(&mas, slots, offset);
7194 		if (!last && !entry) {
7195 			pr_err("Sequential nulls end at %p[%u]\n",
7196 				mas_mn(&mas), offset);
7197 		}
7198 		MT_BUG_ON(mt, !last && !entry);
7199 		last = entry;
7200 		if (offset == mas_data_end(&mas)) {
7201 			mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
7202 			if (mas_is_none(&mas))
7203 				return;
7204 			offset = 0;
7205 			slots = ma_slots(mte_to_node(mas.node),
7206 					 mte_node_type(mas.node));
7207 		} else {
7208 			offset++;
7209 		}
7210 
7211 	} while (!mas_is_none(&mas));
7212 }
7213 
7214 /*
7215  * validate a maple tree by checking:
7216  * 1. The limits (pivots are within mas->min to mas->max)
7217  * 2. The gap is correctly set in the parents
7218  */
7219 void mt_validate(struct maple_tree *mt)
7220 {
7221 	unsigned char end;
7222 
7223 	MA_STATE(mas, mt, 0, 0);
7224 	rcu_read_lock();
7225 	mas_start(&mas);
7226 	if (!mas_searchable(&mas))
7227 		goto done;
7228 
7229 	mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node));
7230 	while (!mas_is_none(&mas)) {
7231 		MT_BUG_ON(mas.tree, mte_dead_node(mas.node));
7232 		if (!mte_is_root(mas.node)) {
7233 			end = mas_data_end(&mas);
7234 			if ((end < mt_min_slot_count(mas.node)) &&
7235 			    (mas.max != ULONG_MAX)) {
7236 				pr_err("Invalid size %u of %p\n", end,
7237 				mas_mn(&mas));
7238 				MT_BUG_ON(mas.tree, 1);
7239 			}
7240 
7241 		}
7242 		mas_validate_parent_slot(&mas);
7243 		mas_validate_child_slot(&mas);
7244 		mas_validate_limits(&mas);
7245 		if (mt_is_alloc(mt))
7246 			mas_validate_gaps(&mas);
7247 		mas_dfs_postorder(&mas, ULONG_MAX);
7248 	}
7249 	mt_validate_nulls(mt);
7250 done:
7251 	rcu_read_unlock();
7252 
7253 }
7254 EXPORT_SYMBOL_GPL(mt_validate);
7255 
7256 #endif /* CONFIG_DEBUG_MAPLE_TREE */
7257