1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 /* Functions */ 150 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 151 { 152 return kmem_cache_alloc(maple_node_cache, gfp | __GFP_ZERO); 153 } 154 155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 156 { 157 return kmem_cache_alloc_bulk(maple_node_cache, gfp | __GFP_ZERO, size, 158 nodes); 159 } 160 161 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 162 { 163 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 164 } 165 166 static void mt_free_rcu(struct rcu_head *head) 167 { 168 struct maple_node *node = container_of(head, struct maple_node, rcu); 169 170 kmem_cache_free(maple_node_cache, node); 171 } 172 173 /* 174 * ma_free_rcu() - Use rcu callback to free a maple node 175 * @node: The node to free 176 * 177 * The maple tree uses the parent pointer to indicate this node is no longer in 178 * use and will be freed. 179 */ 180 static void ma_free_rcu(struct maple_node *node) 181 { 182 node->parent = ma_parent_ptr(node); 183 call_rcu(&node->rcu, mt_free_rcu); 184 } 185 186 187 static void mas_set_height(struct ma_state *mas) 188 { 189 unsigned int new_flags = mas->tree->ma_flags; 190 191 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 192 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); 193 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 194 mas->tree->ma_flags = new_flags; 195 } 196 197 static unsigned int mas_mt_height(struct ma_state *mas) 198 { 199 return mt_height(mas->tree); 200 } 201 202 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 203 { 204 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 205 MAPLE_NODE_TYPE_MASK; 206 } 207 208 static inline bool ma_is_dense(const enum maple_type type) 209 { 210 return type < maple_leaf_64; 211 } 212 213 static inline bool ma_is_leaf(const enum maple_type type) 214 { 215 return type < maple_range_64; 216 } 217 218 static inline bool mte_is_leaf(const struct maple_enode *entry) 219 { 220 return ma_is_leaf(mte_node_type(entry)); 221 } 222 223 /* 224 * We also reserve values with the bottom two bits set to '10' which are 225 * below 4096 226 */ 227 static inline bool mt_is_reserved(const void *entry) 228 { 229 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 230 xa_is_internal(entry); 231 } 232 233 static inline void mas_set_err(struct ma_state *mas, long err) 234 { 235 mas->node = MA_ERROR(err); 236 } 237 238 static inline bool mas_is_ptr(struct ma_state *mas) 239 { 240 return mas->node == MAS_ROOT; 241 } 242 243 static inline bool mas_is_start(struct ma_state *mas) 244 { 245 return mas->node == MAS_START; 246 } 247 248 bool mas_is_err(struct ma_state *mas) 249 { 250 return xa_is_err(mas->node); 251 } 252 253 static inline bool mas_searchable(struct ma_state *mas) 254 { 255 if (mas_is_none(mas)) 256 return false; 257 258 if (mas_is_ptr(mas)) 259 return false; 260 261 return true; 262 } 263 264 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 265 { 266 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 267 } 268 269 /* 270 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 271 * @entry: The maple encoded node 272 * 273 * Return: a maple topiary pointer 274 */ 275 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 276 { 277 return (struct maple_topiary *) 278 ((unsigned long)entry & ~MAPLE_NODE_MASK); 279 } 280 281 /* 282 * mas_mn() - Get the maple state node. 283 * @mas: The maple state 284 * 285 * Return: the maple node (not encoded - bare pointer). 286 */ 287 static inline struct maple_node *mas_mn(const struct ma_state *mas) 288 { 289 return mte_to_node(mas->node); 290 } 291 292 /* 293 * mte_set_node_dead() - Set a maple encoded node as dead. 294 * @mn: The maple encoded node. 295 */ 296 static inline void mte_set_node_dead(struct maple_enode *mn) 297 { 298 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 299 smp_wmb(); /* Needed for RCU */ 300 } 301 302 /* Bit 1 indicates the root is a node */ 303 #define MAPLE_ROOT_NODE 0x02 304 /* maple_type stored bit 3-6 */ 305 #define MAPLE_ENODE_TYPE_SHIFT 0x03 306 /* Bit 2 means a NULL somewhere below */ 307 #define MAPLE_ENODE_NULL 0x04 308 309 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 310 enum maple_type type) 311 { 312 return (void *)((unsigned long)node | 313 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 314 } 315 316 static inline void *mte_mk_root(const struct maple_enode *node) 317 { 318 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 319 } 320 321 static inline void *mte_safe_root(const struct maple_enode *node) 322 { 323 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 324 } 325 326 static inline void mte_set_full(const struct maple_enode *node) 327 { 328 node = (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 329 } 330 331 static inline void mte_clear_full(const struct maple_enode *node) 332 { 333 node = (void *)((unsigned long)node | MAPLE_ENODE_NULL); 334 } 335 336 static inline bool ma_is_root(struct maple_node *node) 337 { 338 return ((unsigned long)node->parent & MA_ROOT_PARENT); 339 } 340 341 static inline bool mte_is_root(const struct maple_enode *node) 342 { 343 return ma_is_root(mte_to_node(node)); 344 } 345 346 static inline bool mas_is_root_limits(const struct ma_state *mas) 347 { 348 return !mas->min && mas->max == ULONG_MAX; 349 } 350 351 static inline bool mt_is_alloc(struct maple_tree *mt) 352 { 353 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 354 } 355 356 /* 357 * The Parent Pointer 358 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 359 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 360 * bit values need an extra bit to store the offset. This extra bit comes from 361 * a reuse of the last bit in the node type. This is possible by using bit 1 to 362 * indicate if bit 2 is part of the type or the slot. 363 * 364 * Note types: 365 * 0x??1 = Root 366 * 0x?00 = 16 bit nodes 367 * 0x010 = 32 bit nodes 368 * 0x110 = 64 bit nodes 369 * 370 * Slot size and alignment 371 * 0b??1 : Root 372 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 373 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 374 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 375 */ 376 377 #define MAPLE_PARENT_ROOT 0x01 378 379 #define MAPLE_PARENT_SLOT_SHIFT 0x03 380 #define MAPLE_PARENT_SLOT_MASK 0xF8 381 382 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 383 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 384 385 #define MAPLE_PARENT_RANGE64 0x06 386 #define MAPLE_PARENT_RANGE32 0x04 387 #define MAPLE_PARENT_NOT_RANGE16 0x02 388 389 /* 390 * mte_parent_shift() - Get the parent shift for the slot storage. 391 * @parent: The parent pointer cast as an unsigned long 392 * Return: The shift into that pointer to the star to of the slot 393 */ 394 static inline unsigned long mte_parent_shift(unsigned long parent) 395 { 396 /* Note bit 1 == 0 means 16B */ 397 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 398 return MAPLE_PARENT_SLOT_SHIFT; 399 400 return MAPLE_PARENT_16B_SLOT_SHIFT; 401 } 402 403 /* 404 * mte_parent_slot_mask() - Get the slot mask for the parent. 405 * @parent: The parent pointer cast as an unsigned long. 406 * Return: The slot mask for that parent. 407 */ 408 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 409 { 410 /* Note bit 1 == 0 means 16B */ 411 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 412 return MAPLE_PARENT_SLOT_MASK; 413 414 return MAPLE_PARENT_16B_SLOT_MASK; 415 } 416 417 /* 418 * mas_parent_enum() - Return the maple_type of the parent from the stored 419 * parent type. 420 * @mas: The maple state 421 * @node: The maple_enode to extract the parent's enum 422 * Return: The node->parent maple_type 423 */ 424 static inline 425 enum maple_type mte_parent_enum(struct maple_enode *p_enode, 426 struct maple_tree *mt) 427 { 428 unsigned long p_type; 429 430 p_type = (unsigned long)p_enode; 431 if (p_type & MAPLE_PARENT_ROOT) 432 return 0; /* Validated in the caller. */ 433 434 p_type &= MAPLE_NODE_MASK; 435 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); 436 437 switch (p_type) { 438 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 439 if (mt_is_alloc(mt)) 440 return maple_arange_64; 441 return maple_range_64; 442 } 443 444 return 0; 445 } 446 447 static inline 448 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) 449 { 450 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); 451 } 452 453 /* 454 * mte_set_parent() - Set the parent node and encode the slot 455 * @enode: The encoded maple node. 456 * @parent: The encoded maple node that is the parent of @enode. 457 * @slot: The slot that @enode resides in @parent. 458 * 459 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 460 * parent type. 461 */ 462 static inline 463 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, 464 unsigned char slot) 465 { 466 unsigned long val = (unsigned long) parent; 467 unsigned long shift; 468 unsigned long type; 469 enum maple_type p_type = mte_node_type(parent); 470 471 BUG_ON(p_type == maple_dense); 472 BUG_ON(p_type == maple_leaf_64); 473 474 switch (p_type) { 475 case maple_range_64: 476 case maple_arange_64: 477 shift = MAPLE_PARENT_SLOT_SHIFT; 478 type = MAPLE_PARENT_RANGE64; 479 break; 480 default: 481 case maple_dense: 482 case maple_leaf_64: 483 shift = type = 0; 484 break; 485 } 486 487 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 488 val |= (slot << shift) | type; 489 mte_to_node(enode)->parent = ma_parent_ptr(val); 490 } 491 492 /* 493 * mte_parent_slot() - get the parent slot of @enode. 494 * @enode: The encoded maple node. 495 * 496 * Return: The slot in the parent node where @enode resides. 497 */ 498 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 499 { 500 unsigned long val = (unsigned long) mte_to_node(enode)->parent; 501 502 /* Root. */ 503 if (val & 1) 504 return 0; 505 506 /* 507 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 508 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 509 */ 510 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 511 } 512 513 /* 514 * mte_parent() - Get the parent of @node. 515 * @node: The encoded maple node. 516 * 517 * Return: The parent maple node. 518 */ 519 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 520 { 521 return (void *)((unsigned long) 522 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 523 } 524 525 /* 526 * ma_dead_node() - check if the @enode is dead. 527 * @enode: The encoded maple node 528 * 529 * Return: true if dead, false otherwise. 530 */ 531 static inline bool ma_dead_node(const struct maple_node *node) 532 { 533 struct maple_node *parent = (void *)((unsigned long) 534 node->parent & ~MAPLE_NODE_MASK); 535 536 return (parent == node); 537 } 538 /* 539 * mte_dead_node() - check if the @enode is dead. 540 * @enode: The encoded maple node 541 * 542 * Return: true if dead, false otherwise. 543 */ 544 static inline bool mte_dead_node(const struct maple_enode *enode) 545 { 546 struct maple_node *parent, *node; 547 548 node = mte_to_node(enode); 549 parent = mte_parent(enode); 550 return (parent == node); 551 } 552 553 /* 554 * mas_allocated() - Get the number of nodes allocated in a maple state. 555 * @mas: The maple state 556 * 557 * The ma_state alloc member is overloaded to hold a pointer to the first 558 * allocated node or to the number of requested nodes to allocate. If bit 0 is 559 * set, then the alloc contains the number of requested nodes. If there is an 560 * allocated node, then the total allocated nodes is in that node. 561 * 562 * Return: The total number of nodes allocated 563 */ 564 static inline unsigned long mas_allocated(const struct ma_state *mas) 565 { 566 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 567 return 0; 568 569 return mas->alloc->total; 570 } 571 572 /* 573 * mas_set_alloc_req() - Set the requested number of allocations. 574 * @mas: the maple state 575 * @count: the number of allocations. 576 * 577 * The requested number of allocations is either in the first allocated node, 578 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 579 * no allocated node. Set the request either in the node or do the necessary 580 * encoding to store in @mas->alloc directly. 581 */ 582 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 583 { 584 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 585 if (!count) 586 mas->alloc = NULL; 587 else 588 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 589 return; 590 } 591 592 mas->alloc->request_count = count; 593 } 594 595 /* 596 * mas_alloc_req() - get the requested number of allocations. 597 * @mas: The maple state 598 * 599 * The alloc count is either stored directly in @mas, or in 600 * @mas->alloc->request_count if there is at least one node allocated. Decode 601 * the request count if it's stored directly in @mas->alloc. 602 * 603 * Return: The allocation request count. 604 */ 605 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 606 { 607 if ((unsigned long)mas->alloc & 0x1) 608 return (unsigned long)(mas->alloc) >> 1; 609 else if (mas->alloc) 610 return mas->alloc->request_count; 611 return 0; 612 } 613 614 /* 615 * ma_pivots() - Get a pointer to the maple node pivots. 616 * @node - the maple node 617 * @type - the node type 618 * 619 * Return: A pointer to the maple node pivots 620 */ 621 static inline unsigned long *ma_pivots(struct maple_node *node, 622 enum maple_type type) 623 { 624 switch (type) { 625 case maple_arange_64: 626 return node->ma64.pivot; 627 case maple_range_64: 628 case maple_leaf_64: 629 return node->mr64.pivot; 630 case maple_dense: 631 return NULL; 632 } 633 return NULL; 634 } 635 636 /* 637 * ma_gaps() - Get a pointer to the maple node gaps. 638 * @node - the maple node 639 * @type - the node type 640 * 641 * Return: A pointer to the maple node gaps 642 */ 643 static inline unsigned long *ma_gaps(struct maple_node *node, 644 enum maple_type type) 645 { 646 switch (type) { 647 case maple_arange_64: 648 return node->ma64.gap; 649 case maple_range_64: 650 case maple_leaf_64: 651 case maple_dense: 652 return NULL; 653 } 654 return NULL; 655 } 656 657 /* 658 * mte_pivot() - Get the pivot at @piv of the maple encoded node. 659 * @mn: The maple encoded node. 660 * @piv: The pivot. 661 * 662 * Return: the pivot at @piv of @mn. 663 */ 664 static inline unsigned long mte_pivot(const struct maple_enode *mn, 665 unsigned char piv) 666 { 667 struct maple_node *node = mte_to_node(mn); 668 669 if (piv >= mt_pivots[piv]) { 670 WARN_ON(1); 671 return 0; 672 } 673 switch (mte_node_type(mn)) { 674 case maple_arange_64: 675 return node->ma64.pivot[piv]; 676 case maple_range_64: 677 case maple_leaf_64: 678 return node->mr64.pivot[piv]; 679 case maple_dense: 680 return 0; 681 } 682 return 0; 683 } 684 685 /* 686 * mas_safe_pivot() - get the pivot at @piv or mas->max. 687 * @mas: The maple state 688 * @pivots: The pointer to the maple node pivots 689 * @piv: The pivot to fetch 690 * @type: The maple node type 691 * 692 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 693 * otherwise. 694 */ 695 static inline unsigned long 696 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 697 unsigned char piv, enum maple_type type) 698 { 699 if (piv >= mt_pivots[type]) 700 return mas->max; 701 702 return pivots[piv]; 703 } 704 705 /* 706 * mas_safe_min() - Return the minimum for a given offset. 707 * @mas: The maple state 708 * @pivots: The pointer to the maple node pivots 709 * @offset: The offset into the pivot array 710 * 711 * Return: The minimum range value that is contained in @offset. 712 */ 713 static inline unsigned long 714 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 715 { 716 if (likely(offset)) 717 return pivots[offset - 1] + 1; 718 719 return mas->min; 720 } 721 722 /* 723 * mas_logical_pivot() - Get the logical pivot of a given offset. 724 * @mas: The maple state 725 * @pivots: The pointer to the maple node pivots 726 * @offset: The offset into the pivot array 727 * @type: The maple node type 728 * 729 * When there is no value at a pivot (beyond the end of the data), then the 730 * pivot is actually @mas->max. 731 * 732 * Return: the logical pivot of a given @offset. 733 */ 734 static inline unsigned long 735 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, 736 unsigned char offset, enum maple_type type) 737 { 738 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); 739 740 if (likely(lpiv)) 741 return lpiv; 742 743 if (likely(offset)) 744 return mas->max; 745 746 return lpiv; 747 } 748 749 /* 750 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 751 * @mn: The encoded maple node 752 * @piv: The pivot offset 753 * @val: The value of the pivot 754 */ 755 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 756 unsigned long val) 757 { 758 struct maple_node *node = mte_to_node(mn); 759 enum maple_type type = mte_node_type(mn); 760 761 BUG_ON(piv >= mt_pivots[type]); 762 switch (type) { 763 default: 764 case maple_range_64: 765 case maple_leaf_64: 766 node->mr64.pivot[piv] = val; 767 break; 768 case maple_arange_64: 769 node->ma64.pivot[piv] = val; 770 break; 771 case maple_dense: 772 break; 773 } 774 775 } 776 777 /* 778 * ma_slots() - Get a pointer to the maple node slots. 779 * @mn: The maple node 780 * @mt: The maple node type 781 * 782 * Return: A pointer to the maple node slots 783 */ 784 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 785 { 786 switch (mt) { 787 default: 788 case maple_arange_64: 789 return mn->ma64.slot; 790 case maple_range_64: 791 case maple_leaf_64: 792 return mn->mr64.slot; 793 case maple_dense: 794 return mn->slot; 795 } 796 } 797 798 static inline bool mt_locked(const struct maple_tree *mt) 799 { 800 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 801 lockdep_is_held(&mt->ma_lock); 802 } 803 804 static inline void *mt_slot(const struct maple_tree *mt, 805 void __rcu **slots, unsigned char offset) 806 { 807 return rcu_dereference_check(slots[offset], mt_locked(mt)); 808 } 809 810 /* 811 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 812 * @mas: The maple state 813 * @slots: The pointer to the slots 814 * @offset: The offset into the slots array to fetch 815 * 816 * Return: The entry stored in @slots at the @offset. 817 */ 818 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 819 unsigned char offset) 820 { 821 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree)); 822 } 823 824 /* 825 * mas_slot() - Get the slot value when not holding the maple tree lock. 826 * @mas: The maple state 827 * @slots: The pointer to the slots 828 * @offset: The offset into the slots array to fetch 829 * 830 * Return: The entry stored in @slots at the @offset 831 */ 832 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 833 unsigned char offset) 834 { 835 return mt_slot(mas->tree, slots, offset); 836 } 837 838 /* 839 * mas_root() - Get the maple tree root. 840 * @mas: The maple state. 841 * 842 * Return: The pointer to the root of the tree 843 */ 844 static inline void *mas_root(struct ma_state *mas) 845 { 846 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 847 } 848 849 static inline void *mt_root_locked(struct maple_tree *mt) 850 { 851 return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); 852 } 853 854 /* 855 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 856 * @mas: The maple state. 857 * 858 * Return: The pointer to the root of the tree 859 */ 860 static inline void *mas_root_locked(struct ma_state *mas) 861 { 862 return mt_root_locked(mas->tree); 863 } 864 865 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 866 enum maple_type mt) 867 { 868 switch (mt) { 869 case maple_arange_64: 870 return &mn->ma64.meta; 871 default: 872 return &mn->mr64.meta; 873 } 874 } 875 876 /* 877 * ma_set_meta() - Set the metadata information of a node. 878 * @mn: The maple node 879 * @mt: The maple node type 880 * @offset: The offset of the highest sub-gap in this node. 881 * @end: The end of the data in this node. 882 */ 883 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 884 unsigned char offset, unsigned char end) 885 { 886 struct maple_metadata *meta = ma_meta(mn, mt); 887 888 meta->gap = offset; 889 meta->end = end; 890 } 891 892 /* 893 * ma_meta_end() - Get the data end of a node from the metadata 894 * @mn: The maple node 895 * @mt: The maple node type 896 */ 897 static inline unsigned char ma_meta_end(struct maple_node *mn, 898 enum maple_type mt) 899 { 900 struct maple_metadata *meta = ma_meta(mn, mt); 901 902 return meta->end; 903 } 904 905 /* 906 * ma_meta_gap() - Get the largest gap location of a node from the metadata 907 * @mn: The maple node 908 * @mt: The maple node type 909 */ 910 static inline unsigned char ma_meta_gap(struct maple_node *mn, 911 enum maple_type mt) 912 { 913 BUG_ON(mt != maple_arange_64); 914 915 return mn->ma64.meta.gap; 916 } 917 918 /* 919 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 920 * @mn: The maple node 921 * @mn: The maple node type 922 * @offset: The location of the largest gap. 923 */ 924 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 925 unsigned char offset) 926 { 927 928 struct maple_metadata *meta = ma_meta(mn, mt); 929 930 meta->gap = offset; 931 } 932 933 /* 934 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 935 * @mat - the ma_topiary, a linked list of dead nodes. 936 * @dead_enode - the node to be marked as dead and added to the tail of the list 937 * 938 * Add the @dead_enode to the linked list in @mat. 939 */ 940 static inline void mat_add(struct ma_topiary *mat, 941 struct maple_enode *dead_enode) 942 { 943 mte_set_node_dead(dead_enode); 944 mte_to_mat(dead_enode)->next = NULL; 945 if (!mat->tail) { 946 mat->tail = mat->head = dead_enode; 947 return; 948 } 949 950 mte_to_mat(mat->tail)->next = dead_enode; 951 mat->tail = dead_enode; 952 } 953 954 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 955 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 956 957 /* 958 * mas_mat_free() - Free all nodes in a dead list. 959 * @mas - the maple state 960 * @mat - the ma_topiary linked list of dead nodes to free. 961 * 962 * Free walk a dead list. 963 */ 964 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 965 { 966 struct maple_enode *next; 967 968 while (mat->head) { 969 next = mte_to_mat(mat->head)->next; 970 mas_free(mas, mat->head); 971 mat->head = next; 972 } 973 } 974 975 /* 976 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 977 * @mas - the maple state 978 * @mat - the ma_topiary linked list of dead nodes to free. 979 * 980 * Destroy walk a dead list. 981 */ 982 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 983 { 984 struct maple_enode *next; 985 986 while (mat->head) { 987 next = mte_to_mat(mat->head)->next; 988 mte_destroy_walk(mat->head, mat->mtree); 989 mat->head = next; 990 } 991 } 992 /* 993 * mas_descend() - Descend into the slot stored in the ma_state. 994 * @mas - the maple state. 995 * 996 * Note: Not RCU safe, only use in write side or debug code. 997 */ 998 static inline void mas_descend(struct ma_state *mas) 999 { 1000 enum maple_type type; 1001 unsigned long *pivots; 1002 struct maple_node *node; 1003 void __rcu **slots; 1004 1005 node = mas_mn(mas); 1006 type = mte_node_type(mas->node); 1007 pivots = ma_pivots(node, type); 1008 slots = ma_slots(node, type); 1009 1010 if (mas->offset) 1011 mas->min = pivots[mas->offset - 1] + 1; 1012 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1013 mas->node = mas_slot(mas, slots, mas->offset); 1014 } 1015 1016 /* 1017 * mte_set_gap() - Set a maple node gap. 1018 * @mn: The encoded maple node 1019 * @gap: The offset of the gap to set 1020 * @val: The gap value 1021 */ 1022 static inline void mte_set_gap(const struct maple_enode *mn, 1023 unsigned char gap, unsigned long val) 1024 { 1025 switch (mte_node_type(mn)) { 1026 default: 1027 break; 1028 case maple_arange_64: 1029 mte_to_node(mn)->ma64.gap[gap] = val; 1030 break; 1031 } 1032 } 1033 1034 /* 1035 * mas_ascend() - Walk up a level of the tree. 1036 * @mas: The maple state 1037 * 1038 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1039 * may cause several levels of walking up to find the correct min and max. 1040 * May find a dead node which will cause a premature return. 1041 * Return: 1 on dead node, 0 otherwise 1042 */ 1043 static int mas_ascend(struct ma_state *mas) 1044 { 1045 struct maple_enode *p_enode; /* parent enode. */ 1046 struct maple_enode *a_enode; /* ancestor enode. */ 1047 struct maple_node *a_node; /* ancestor node. */ 1048 struct maple_node *p_node; /* parent node. */ 1049 unsigned char a_slot; 1050 enum maple_type a_type; 1051 unsigned long min, max; 1052 unsigned long *pivots; 1053 unsigned char offset; 1054 bool set_max = false, set_min = false; 1055 1056 a_node = mas_mn(mas); 1057 if (ma_is_root(a_node)) { 1058 mas->offset = 0; 1059 return 0; 1060 } 1061 1062 p_node = mte_parent(mas->node); 1063 if (unlikely(a_node == p_node)) 1064 return 1; 1065 a_type = mas_parent_enum(mas, mas->node); 1066 offset = mte_parent_slot(mas->node); 1067 a_enode = mt_mk_node(p_node, a_type); 1068 1069 /* Check to make sure all parent information is still accurate */ 1070 if (p_node != mte_parent(mas->node)) 1071 return 1; 1072 1073 mas->node = a_enode; 1074 mas->offset = offset; 1075 1076 if (mte_is_root(a_enode)) { 1077 mas->max = ULONG_MAX; 1078 mas->min = 0; 1079 return 0; 1080 } 1081 1082 min = 0; 1083 max = ULONG_MAX; 1084 do { 1085 p_enode = a_enode; 1086 a_type = mas_parent_enum(mas, p_enode); 1087 a_node = mte_parent(p_enode); 1088 a_slot = mte_parent_slot(p_enode); 1089 pivots = ma_pivots(a_node, a_type); 1090 a_enode = mt_mk_node(a_node, a_type); 1091 1092 if (!set_min && a_slot) { 1093 set_min = true; 1094 min = pivots[a_slot - 1] + 1; 1095 } 1096 1097 if (!set_max && a_slot < mt_pivots[a_type]) { 1098 set_max = true; 1099 max = pivots[a_slot]; 1100 } 1101 1102 if (unlikely(ma_dead_node(a_node))) 1103 return 1; 1104 1105 if (unlikely(ma_is_root(a_node))) 1106 break; 1107 1108 } while (!set_min || !set_max); 1109 1110 mas->max = max; 1111 mas->min = min; 1112 return 0; 1113 } 1114 1115 /* 1116 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1117 * @mas: The maple state 1118 * 1119 * Return: A pointer to a maple node. 1120 */ 1121 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1122 { 1123 struct maple_alloc *ret, *node = mas->alloc; 1124 unsigned long total = mas_allocated(mas); 1125 1126 /* nothing or a request pending. */ 1127 if (unlikely(!total)) 1128 return NULL; 1129 1130 if (total == 1) { 1131 /* single allocation in this ma_state */ 1132 mas->alloc = NULL; 1133 ret = node; 1134 goto single_node; 1135 } 1136 1137 if (!node->node_count) { 1138 /* Single allocation in this node. */ 1139 mas->alloc = node->slot[0]; 1140 node->slot[0] = NULL; 1141 mas->alloc->total = node->total - 1; 1142 ret = node; 1143 goto new_head; 1144 } 1145 1146 node->total--; 1147 ret = node->slot[node->node_count]; 1148 node->slot[node->node_count--] = NULL; 1149 1150 single_node: 1151 new_head: 1152 ret->total = 0; 1153 ret->node_count = 0; 1154 if (ret->request_count) { 1155 mas_set_alloc_req(mas, ret->request_count + 1); 1156 ret->request_count = 0; 1157 } 1158 return (struct maple_node *)ret; 1159 } 1160 1161 /* 1162 * mas_push_node() - Push a node back on the maple state allocation. 1163 * @mas: The maple state 1164 * @used: The used maple node 1165 * 1166 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1167 * requested node count as necessary. 1168 */ 1169 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1170 { 1171 struct maple_alloc *reuse = (struct maple_alloc *)used; 1172 struct maple_alloc *head = mas->alloc; 1173 unsigned long count; 1174 unsigned int requested = mas_alloc_req(mas); 1175 1176 memset(reuse, 0, sizeof(*reuse)); 1177 count = mas_allocated(mas); 1178 1179 if (count && (head->node_count < MAPLE_ALLOC_SLOTS - 1)) { 1180 if (head->slot[0]) 1181 head->node_count++; 1182 head->slot[head->node_count] = reuse; 1183 head->total++; 1184 goto done; 1185 } 1186 1187 reuse->total = 1; 1188 if ((head) && !((unsigned long)head & 0x1)) { 1189 head->request_count = 0; 1190 reuse->slot[0] = head; 1191 reuse->total += head->total; 1192 } 1193 1194 mas->alloc = reuse; 1195 done: 1196 if (requested > 1) 1197 mas_set_alloc_req(mas, requested - 1); 1198 } 1199 1200 /* 1201 * mas_alloc_nodes() - Allocate nodes into a maple state 1202 * @mas: The maple state 1203 * @gfp: The GFP Flags 1204 */ 1205 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1206 { 1207 struct maple_alloc *node; 1208 unsigned long allocated = mas_allocated(mas); 1209 unsigned long success = allocated; 1210 unsigned int requested = mas_alloc_req(mas); 1211 unsigned int count; 1212 void **slots = NULL; 1213 unsigned int max_req = 0; 1214 1215 if (!requested) 1216 return; 1217 1218 mas_set_alloc_req(mas, 0); 1219 if (mas->mas_flags & MA_STATE_PREALLOC) { 1220 if (allocated) 1221 return; 1222 WARN_ON(!allocated); 1223 } 1224 1225 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS - 1) { 1226 node = (struct maple_alloc *)mt_alloc_one(gfp); 1227 if (!node) 1228 goto nomem_one; 1229 1230 if (allocated) 1231 node->slot[0] = mas->alloc; 1232 1233 success++; 1234 mas->alloc = node; 1235 requested--; 1236 } 1237 1238 node = mas->alloc; 1239 while (requested) { 1240 max_req = MAPLE_ALLOC_SLOTS; 1241 if (node->slot[0]) { 1242 unsigned int offset = node->node_count + 1; 1243 1244 slots = (void **)&node->slot[offset]; 1245 max_req -= offset; 1246 } else { 1247 slots = (void **)&node->slot; 1248 } 1249 1250 max_req = min(requested, max_req); 1251 count = mt_alloc_bulk(gfp, max_req, slots); 1252 if (!count) 1253 goto nomem_bulk; 1254 1255 node->node_count += count; 1256 /* zero indexed. */ 1257 if (slots == (void **)&node->slot) 1258 node->node_count--; 1259 1260 success += count; 1261 node = node->slot[0]; 1262 requested -= count; 1263 } 1264 mas->alloc->total = success; 1265 return; 1266 1267 nomem_bulk: 1268 /* Clean up potential freed allocations on bulk failure */ 1269 memset(slots, 0, max_req * sizeof(unsigned long)); 1270 nomem_one: 1271 mas_set_alloc_req(mas, requested); 1272 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1273 mas->alloc->total = success; 1274 mas_set_err(mas, -ENOMEM); 1275 return; 1276 1277 } 1278 1279 /* 1280 * mas_free() - Free an encoded maple node 1281 * @mas: The maple state 1282 * @used: The encoded maple node to free. 1283 * 1284 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1285 * otherwise. 1286 */ 1287 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1288 { 1289 struct maple_node *tmp = mte_to_node(used); 1290 1291 if (mt_in_rcu(mas->tree)) 1292 ma_free_rcu(tmp); 1293 else 1294 mas_push_node(mas, tmp); 1295 } 1296 1297 /* 1298 * mas_node_count() - Check if enough nodes are allocated and request more if 1299 * there is not enough nodes. 1300 * @mas: The maple state 1301 * @count: The number of nodes needed 1302 * @gfp: the gfp flags 1303 */ 1304 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1305 { 1306 unsigned long allocated = mas_allocated(mas); 1307 1308 if (allocated < count) { 1309 mas_set_alloc_req(mas, count - allocated); 1310 mas_alloc_nodes(mas, gfp); 1311 } 1312 } 1313 1314 /* 1315 * mas_node_count() - Check if enough nodes are allocated and request more if 1316 * there is not enough nodes. 1317 * @mas: The maple state 1318 * @count: The number of nodes needed 1319 * 1320 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1321 */ 1322 static void mas_node_count(struct ma_state *mas, int count) 1323 { 1324 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1325 } 1326 1327 /* 1328 * mas_start() - Sets up maple state for operations. 1329 * @mas: The maple state. 1330 * 1331 * If mas->node == MAS_START, then set the min, max, depth, and offset to 1332 * defaults. 1333 * 1334 * Return: 1335 * - If mas->node is an error or not MAS_START, return NULL. 1336 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1337 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1338 * - If it's a tree: NULL & mas->node == safe root node. 1339 */ 1340 static inline struct maple_enode *mas_start(struct ma_state *mas) 1341 { 1342 if (likely(mas_is_start(mas))) { 1343 struct maple_enode *root; 1344 1345 mas->node = MAS_NONE; 1346 mas->min = 0; 1347 mas->max = ULONG_MAX; 1348 mas->depth = 0; 1349 mas->offset = 0; 1350 1351 root = mas_root(mas); 1352 /* Tree with nodes */ 1353 if (likely(xa_is_node(root))) { 1354 mas->depth = 1; 1355 mas->node = mte_safe_root(root); 1356 return NULL; 1357 } 1358 1359 /* empty tree */ 1360 if (unlikely(!root)) { 1361 mas->offset = MAPLE_NODE_SLOTS; 1362 return NULL; 1363 } 1364 1365 /* Single entry tree */ 1366 mas->node = MAS_ROOT; 1367 mas->offset = MAPLE_NODE_SLOTS; 1368 1369 /* Single entry tree. */ 1370 if (mas->index > 0) 1371 return NULL; 1372 1373 return root; 1374 } 1375 1376 return NULL; 1377 } 1378 1379 /* 1380 * ma_data_end() - Find the end of the data in a node. 1381 * @node: The maple node 1382 * @type: The maple node type 1383 * @pivots: The array of pivots in the node 1384 * @max: The maximum value in the node 1385 * 1386 * Uses metadata to find the end of the data when possible. 1387 * Return: The zero indexed last slot with data (may be null). 1388 */ 1389 static inline unsigned char ma_data_end(struct maple_node *node, 1390 enum maple_type type, 1391 unsigned long *pivots, 1392 unsigned long max) 1393 { 1394 unsigned char offset; 1395 1396 if (type == maple_arange_64) 1397 return ma_meta_end(node, type); 1398 1399 offset = mt_pivots[type] - 1; 1400 if (likely(!pivots[offset])) 1401 return ma_meta_end(node, type); 1402 1403 if (likely(pivots[offset] == max)) 1404 return offset; 1405 1406 return mt_pivots[type]; 1407 } 1408 1409 /* 1410 * mas_data_end() - Find the end of the data (slot). 1411 * @mas: the maple state 1412 * 1413 * This method is optimized to check the metadata of a node if the node type 1414 * supports data end metadata. 1415 * 1416 * Return: The zero indexed last slot with data (may be null). 1417 */ 1418 static inline unsigned char mas_data_end(struct ma_state *mas) 1419 { 1420 enum maple_type type; 1421 struct maple_node *node; 1422 unsigned char offset; 1423 unsigned long *pivots; 1424 1425 type = mte_node_type(mas->node); 1426 node = mas_mn(mas); 1427 if (type == maple_arange_64) 1428 return ma_meta_end(node, type); 1429 1430 pivots = ma_pivots(node, type); 1431 offset = mt_pivots[type] - 1; 1432 if (likely(!pivots[offset])) 1433 return ma_meta_end(node, type); 1434 1435 if (likely(pivots[offset] == mas->max)) 1436 return offset; 1437 1438 return mt_pivots[type]; 1439 } 1440 1441 /* 1442 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1443 * @mas - the maple state 1444 * 1445 * Return: The maximum gap in the leaf. 1446 */ 1447 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1448 { 1449 enum maple_type mt; 1450 unsigned long pstart, gap, max_gap; 1451 struct maple_node *mn; 1452 unsigned long *pivots; 1453 void __rcu **slots; 1454 unsigned char i; 1455 unsigned char max_piv; 1456 1457 mt = mte_node_type(mas->node); 1458 mn = mas_mn(mas); 1459 slots = ma_slots(mn, mt); 1460 max_gap = 0; 1461 if (unlikely(ma_is_dense(mt))) { 1462 gap = 0; 1463 for (i = 0; i < mt_slots[mt]; i++) { 1464 if (slots[i]) { 1465 if (gap > max_gap) 1466 max_gap = gap; 1467 gap = 0; 1468 } else { 1469 gap++; 1470 } 1471 } 1472 if (gap > max_gap) 1473 max_gap = gap; 1474 return max_gap; 1475 } 1476 1477 /* 1478 * Check the first implied pivot optimizes the loop below and slot 1 may 1479 * be skipped if there is a gap in slot 0. 1480 */ 1481 pivots = ma_pivots(mn, mt); 1482 if (likely(!slots[0])) { 1483 max_gap = pivots[0] - mas->min + 1; 1484 i = 2; 1485 } else { 1486 i = 1; 1487 } 1488 1489 /* reduce max_piv as the special case is checked before the loop */ 1490 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1491 /* 1492 * Check end implied pivot which can only be a gap on the right most 1493 * node. 1494 */ 1495 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1496 gap = ULONG_MAX - pivots[max_piv]; 1497 if (gap > max_gap) 1498 max_gap = gap; 1499 } 1500 1501 for (; i <= max_piv; i++) { 1502 /* data == no gap. */ 1503 if (likely(slots[i])) 1504 continue; 1505 1506 pstart = pivots[i - 1]; 1507 gap = pivots[i] - pstart; 1508 if (gap > max_gap) 1509 max_gap = gap; 1510 1511 /* There cannot be two gaps in a row. */ 1512 i++; 1513 } 1514 return max_gap; 1515 } 1516 1517 /* 1518 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1519 * @node: The maple node 1520 * @gaps: The pointer to the gaps 1521 * @mt: The maple node type 1522 * @*off: Pointer to store the offset location of the gap. 1523 * 1524 * Uses the metadata data end to scan backwards across set gaps. 1525 * 1526 * Return: The maximum gap value 1527 */ 1528 static inline unsigned long 1529 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1530 unsigned char *off) 1531 { 1532 unsigned char offset, i; 1533 unsigned long max_gap = 0; 1534 1535 i = offset = ma_meta_end(node, mt); 1536 do { 1537 if (gaps[i] > max_gap) { 1538 max_gap = gaps[i]; 1539 offset = i; 1540 } 1541 } while (i--); 1542 1543 *off = offset; 1544 return max_gap; 1545 } 1546 1547 /* 1548 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1549 * @mas: The maple state. 1550 * 1551 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. 1552 * 1553 * Return: The gap value. 1554 */ 1555 static inline unsigned long mas_max_gap(struct ma_state *mas) 1556 { 1557 unsigned long *gaps; 1558 unsigned char offset; 1559 enum maple_type mt; 1560 struct maple_node *node; 1561 1562 mt = mte_node_type(mas->node); 1563 if (ma_is_leaf(mt)) 1564 return mas_leaf_max_gap(mas); 1565 1566 node = mas_mn(mas); 1567 offset = ma_meta_gap(node, mt); 1568 if (offset == MAPLE_ARANGE64_META_MAX) 1569 return 0; 1570 1571 gaps = ma_gaps(node, mt); 1572 return gaps[offset]; 1573 } 1574 1575 /* 1576 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1577 * @mas: The maple state 1578 * @offset: The gap offset in the parent to set 1579 * @new: The new gap value. 1580 * 1581 * Set the parent gap then continue to set the gap upwards, using the metadata 1582 * of the parent to see if it is necessary to check the node above. 1583 */ 1584 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1585 unsigned long new) 1586 { 1587 unsigned long meta_gap = 0; 1588 struct maple_node *pnode; 1589 struct maple_enode *penode; 1590 unsigned long *pgaps; 1591 unsigned char meta_offset; 1592 enum maple_type pmt; 1593 1594 pnode = mte_parent(mas->node); 1595 pmt = mas_parent_enum(mas, mas->node); 1596 penode = mt_mk_node(pnode, pmt); 1597 pgaps = ma_gaps(pnode, pmt); 1598 1599 ascend: 1600 meta_offset = ma_meta_gap(pnode, pmt); 1601 if (meta_offset == MAPLE_ARANGE64_META_MAX) 1602 meta_gap = 0; 1603 else 1604 meta_gap = pgaps[meta_offset]; 1605 1606 pgaps[offset] = new; 1607 1608 if (meta_gap == new) 1609 return; 1610 1611 if (offset != meta_offset) { 1612 if (meta_gap > new) 1613 return; 1614 1615 ma_set_meta_gap(pnode, pmt, offset); 1616 } else if (new < meta_gap) { 1617 meta_offset = 15; 1618 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1619 ma_set_meta_gap(pnode, pmt, meta_offset); 1620 } 1621 1622 if (ma_is_root(pnode)) 1623 return; 1624 1625 /* Go to the parent node. */ 1626 pnode = mte_parent(penode); 1627 pmt = mas_parent_enum(mas, penode); 1628 pgaps = ma_gaps(pnode, pmt); 1629 offset = mte_parent_slot(penode); 1630 penode = mt_mk_node(pnode, pmt); 1631 goto ascend; 1632 } 1633 1634 /* 1635 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1636 * @mas - the maple state. 1637 */ 1638 static inline void mas_update_gap(struct ma_state *mas) 1639 { 1640 unsigned char pslot; 1641 unsigned long p_gap; 1642 unsigned long max_gap; 1643 1644 if (!mt_is_alloc(mas->tree)) 1645 return; 1646 1647 if (mte_is_root(mas->node)) 1648 return; 1649 1650 max_gap = mas_max_gap(mas); 1651 1652 pslot = mte_parent_slot(mas->node); 1653 p_gap = ma_gaps(mte_parent(mas->node), 1654 mas_parent_enum(mas, mas->node))[pslot]; 1655 1656 if (p_gap != max_gap) 1657 mas_parent_gap(mas, pslot, max_gap); 1658 } 1659 1660 /* 1661 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1662 * @parent with the slot encoded. 1663 * @mas - the maple state (for the tree) 1664 * @parent - the maple encoded node containing the children. 1665 */ 1666 static inline void mas_adopt_children(struct ma_state *mas, 1667 struct maple_enode *parent) 1668 { 1669 enum maple_type type = mte_node_type(parent); 1670 struct maple_node *node = mas_mn(mas); 1671 void __rcu **slots = ma_slots(node, type); 1672 unsigned long *pivots = ma_pivots(node, type); 1673 struct maple_enode *child; 1674 unsigned char offset; 1675 1676 offset = ma_data_end(node, type, pivots, mas->max); 1677 do { 1678 child = mas_slot_locked(mas, slots, offset); 1679 mte_set_parent(child, parent, offset); 1680 } while (offset--); 1681 } 1682 1683 /* 1684 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1685 * parent encoding to locate the maple node in the tree. 1686 * @mas - the ma_state to use for operations. 1687 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1688 * leave the node (true) and handle the adoption and free elsewhere. 1689 */ 1690 static inline void mas_replace(struct ma_state *mas, bool advanced) 1691 __must_hold(mas->tree->lock) 1692 { 1693 struct maple_node *mn = mas_mn(mas); 1694 struct maple_enode *old_enode; 1695 unsigned char offset = 0; 1696 void __rcu **slots = NULL; 1697 1698 if (ma_is_root(mn)) { 1699 old_enode = mas_root_locked(mas); 1700 } else { 1701 offset = mte_parent_slot(mas->node); 1702 slots = ma_slots(mte_parent(mas->node), 1703 mas_parent_enum(mas, mas->node)); 1704 old_enode = mas_slot_locked(mas, slots, offset); 1705 } 1706 1707 if (!advanced && !mte_is_leaf(mas->node)) 1708 mas_adopt_children(mas, mas->node); 1709 1710 if (mte_is_root(mas->node)) { 1711 mn->parent = ma_parent_ptr( 1712 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1713 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1714 mas_set_height(mas); 1715 } else { 1716 rcu_assign_pointer(slots[offset], mas->node); 1717 } 1718 1719 if (!advanced) 1720 mas_free(mas, old_enode); 1721 } 1722 1723 /* 1724 * mas_new_child() - Find the new child of a node. 1725 * @mas: the maple state 1726 * @child: the maple state to store the child. 1727 */ 1728 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1729 __must_hold(mas->tree->lock) 1730 { 1731 enum maple_type mt; 1732 unsigned char offset; 1733 unsigned char end; 1734 unsigned long *pivots; 1735 struct maple_enode *entry; 1736 struct maple_node *node; 1737 void __rcu **slots; 1738 1739 mt = mte_node_type(mas->node); 1740 node = mas_mn(mas); 1741 slots = ma_slots(node, mt); 1742 pivots = ma_pivots(node, mt); 1743 end = ma_data_end(node, mt, pivots, mas->max); 1744 for (offset = mas->offset; offset <= end; offset++) { 1745 entry = mas_slot_locked(mas, slots, offset); 1746 if (mte_parent(entry) == node) { 1747 *child = *mas; 1748 mas->offset = offset + 1; 1749 child->offset = offset; 1750 mas_descend(child); 1751 child->offset = 0; 1752 return true; 1753 } 1754 } 1755 return false; 1756 } 1757 1758 /* 1759 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1760 * old data or set b_node->b_end. 1761 * @b_node: the maple_big_node 1762 * @shift: the shift count 1763 */ 1764 static inline void mab_shift_right(struct maple_big_node *b_node, 1765 unsigned char shift) 1766 { 1767 unsigned long size = b_node->b_end * sizeof(unsigned long); 1768 1769 memmove(b_node->pivot + shift, b_node->pivot, size); 1770 memmove(b_node->slot + shift, b_node->slot, size); 1771 if (b_node->type == maple_arange_64) 1772 memmove(b_node->gap + shift, b_node->gap, size); 1773 } 1774 1775 /* 1776 * mab_middle_node() - Check if a middle node is needed (unlikely) 1777 * @b_node: the maple_big_node that contains the data. 1778 * @size: the amount of data in the b_node 1779 * @split: the potential split location 1780 * @slot_count: the size that can be stored in a single node being considered. 1781 * 1782 * Return: true if a middle node is required. 1783 */ 1784 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1785 unsigned char slot_count) 1786 { 1787 unsigned char size = b_node->b_end; 1788 1789 if (size >= 2 * slot_count) 1790 return true; 1791 1792 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1793 return true; 1794 1795 return false; 1796 } 1797 1798 /* 1799 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1800 * @b_node: the maple_big_node with the data 1801 * @split: the suggested split location 1802 * @slot_count: the number of slots in the node being considered. 1803 * 1804 * Return: the split location. 1805 */ 1806 static inline int mab_no_null_split(struct maple_big_node *b_node, 1807 unsigned char split, unsigned char slot_count) 1808 { 1809 if (!b_node->slot[split]) { 1810 /* 1811 * If the split is less than the max slot && the right side will 1812 * still be sufficient, then increment the split on NULL. 1813 */ 1814 if ((split < slot_count - 1) && 1815 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1816 split++; 1817 else 1818 split--; 1819 } 1820 return split; 1821 } 1822 1823 /* 1824 * mab_calc_split() - Calculate the split location and if there needs to be two 1825 * splits. 1826 * @bn: The maple_big_node with the data 1827 * @mid_split: The second split, if required. 0 otherwise. 1828 * 1829 * Return: The first split location. The middle split is set in @mid_split. 1830 */ 1831 static inline int mab_calc_split(struct ma_state *mas, 1832 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1833 { 1834 unsigned char b_end = bn->b_end; 1835 int split = b_end / 2; /* Assume equal split. */ 1836 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1837 1838 /* 1839 * To support gap tracking, all NULL entries are kept together and a node cannot 1840 * end on a NULL entry, with the exception of the left-most leaf. The 1841 * limitation means that the split of a node must be checked for this condition 1842 * and be able to put more data in one direction or the other. 1843 */ 1844 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1845 *mid_split = 0; 1846 split = b_end - mt_min_slots[bn->type]; 1847 1848 if (!ma_is_leaf(bn->type)) 1849 return split; 1850 1851 mas->mas_flags |= MA_STATE_REBALANCE; 1852 if (!bn->slot[split]) 1853 split--; 1854 return split; 1855 } 1856 1857 /* 1858 * Although extremely rare, it is possible to enter what is known as the 3-way 1859 * split scenario. The 3-way split comes about by means of a store of a range 1860 * that overwrites the end and beginning of two full nodes. The result is a set 1861 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1862 * also be located in different parent nodes which are also full. This can 1863 * carry upwards all the way to the root in the worst case. 1864 */ 1865 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1866 split = b_end / 3; 1867 *mid_split = split * 2; 1868 } else { 1869 slot_min = mt_min_slots[bn->type]; 1870 1871 *mid_split = 0; 1872 /* 1873 * Avoid having a range less than the slot count unless it 1874 * causes one node to be deficient. 1875 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1876 */ 1877 while (((bn->pivot[split] - min) < slot_count - 1) && 1878 (split < slot_count - 1) && (b_end - split > slot_min)) 1879 split++; 1880 } 1881 1882 /* Avoid ending a node on a NULL entry */ 1883 split = mab_no_null_split(bn, split, slot_count); 1884 if (!(*mid_split)) 1885 return split; 1886 1887 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1888 1889 return split; 1890 } 1891 1892 /* 1893 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1894 * and set @b_node->b_end to the next free slot. 1895 * @mas: The maple state 1896 * @mas_start: The starting slot to copy 1897 * @mas_end: The end slot to copy (inclusively) 1898 * @b_node: The maple_big_node to place the data 1899 * @mab_start: The starting location in maple_big_node to store the data. 1900 */ 1901 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1902 unsigned char mas_end, struct maple_big_node *b_node, 1903 unsigned char mab_start) 1904 { 1905 enum maple_type mt; 1906 struct maple_node *node; 1907 void __rcu **slots; 1908 unsigned long *pivots, *gaps; 1909 int i = mas_start, j = mab_start; 1910 unsigned char piv_end; 1911 1912 node = mas_mn(mas); 1913 mt = mte_node_type(mas->node); 1914 pivots = ma_pivots(node, mt); 1915 if (!i) { 1916 b_node->pivot[j] = pivots[i++]; 1917 if (unlikely(i > mas_end)) 1918 goto complete; 1919 j++; 1920 } 1921 1922 piv_end = min(mas_end, mt_pivots[mt]); 1923 for (; i < piv_end; i++, j++) { 1924 b_node->pivot[j] = pivots[i]; 1925 if (unlikely(!b_node->pivot[j])) 1926 break; 1927 1928 if (unlikely(mas->max == b_node->pivot[j])) 1929 goto complete; 1930 } 1931 1932 if (likely(i <= mas_end)) 1933 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1934 1935 complete: 1936 b_node->b_end = ++j; 1937 j -= mab_start; 1938 slots = ma_slots(node, mt); 1939 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1940 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1941 gaps = ma_gaps(node, mt); 1942 memcpy(b_node->gap + mab_start, gaps + mas_start, 1943 sizeof(unsigned long) * j); 1944 } 1945 } 1946 1947 /* 1948 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1949 * @mas: The maple state 1950 * @node: The maple node 1951 * @pivots: pointer to the maple node pivots 1952 * @mt: The maple type 1953 * @end: The assumed end 1954 * 1955 * Note, end may be incremented within this function but not modified at the 1956 * source. This is fine since the metadata is the last thing to be stored in a 1957 * node during a write. 1958 */ 1959 static inline void mas_leaf_set_meta(struct ma_state *mas, 1960 struct maple_node *node, unsigned long *pivots, 1961 enum maple_type mt, unsigned char end) 1962 { 1963 /* There is no room for metadata already */ 1964 if (mt_pivots[mt] <= end) 1965 return; 1966 1967 if (pivots[end] && pivots[end] < mas->max) 1968 end++; 1969 1970 if (end < mt_slots[mt] - 1) 1971 ma_set_meta(node, mt, 0, end); 1972 } 1973 1974 /* 1975 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 1976 * @b_node: the maple_big_node that has the data 1977 * @mab_start: the start location in @b_node. 1978 * @mab_end: The end location in @b_node (inclusively) 1979 * @mas: The maple state with the maple encoded node. 1980 */ 1981 static inline void mab_mas_cp(struct maple_big_node *b_node, 1982 unsigned char mab_start, unsigned char mab_end, 1983 struct ma_state *mas, bool new_max) 1984 { 1985 int i, j = 0; 1986 enum maple_type mt = mte_node_type(mas->node); 1987 struct maple_node *node = mte_to_node(mas->node); 1988 void __rcu **slots = ma_slots(node, mt); 1989 unsigned long *pivots = ma_pivots(node, mt); 1990 unsigned long *gaps = NULL; 1991 unsigned char end; 1992 1993 if (mab_end - mab_start > mt_pivots[mt]) 1994 mab_end--; 1995 1996 if (!pivots[mt_pivots[mt] - 1]) 1997 slots[mt_pivots[mt]] = NULL; 1998 1999 i = mab_start; 2000 do { 2001 pivots[j++] = b_node->pivot[i++]; 2002 } while (i <= mab_end && likely(b_node->pivot[i])); 2003 2004 memcpy(slots, b_node->slot + mab_start, 2005 sizeof(void *) * (i - mab_start)); 2006 2007 if (new_max) 2008 mas->max = b_node->pivot[i - 1]; 2009 2010 end = j - 1; 2011 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2012 unsigned long max_gap = 0; 2013 unsigned char offset = 15; 2014 2015 gaps = ma_gaps(node, mt); 2016 do { 2017 gaps[--j] = b_node->gap[--i]; 2018 if (gaps[j] > max_gap) { 2019 offset = j; 2020 max_gap = gaps[j]; 2021 } 2022 } while (j); 2023 2024 ma_set_meta(node, mt, offset, end); 2025 } else { 2026 mas_leaf_set_meta(mas, node, pivots, mt, end); 2027 } 2028 } 2029 2030 /* 2031 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2032 * @mas: the maple state with the maple encoded node of the sub-tree. 2033 * 2034 * Descend through a sub-tree and adopt children who do not have the correct 2035 * parents set. Follow the parents which have the correct parents as they are 2036 * the new entries which need to be followed to find other incorrectly set 2037 * parents. 2038 */ 2039 static inline void mas_descend_adopt(struct ma_state *mas) 2040 { 2041 struct ma_state list[3], next[3]; 2042 int i, n; 2043 2044 /* 2045 * At each level there may be up to 3 correct parent pointers which indicates 2046 * the new nodes which need to be walked to find any new nodes at a lower level. 2047 */ 2048 2049 for (i = 0; i < 3; i++) { 2050 list[i] = *mas; 2051 list[i].offset = 0; 2052 next[i].offset = 0; 2053 } 2054 next[0] = *mas; 2055 2056 while (!mte_is_leaf(list[0].node)) { 2057 n = 0; 2058 for (i = 0; i < 3; i++) { 2059 if (mas_is_none(&list[i])) 2060 continue; 2061 2062 if (i && list[i-1].node == list[i].node) 2063 continue; 2064 2065 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2066 n++; 2067 2068 mas_adopt_children(&list[i], list[i].node); 2069 } 2070 2071 while (n < 3) 2072 next[n++].node = MAS_NONE; 2073 2074 /* descend by setting the list to the children */ 2075 for (i = 0; i < 3; i++) 2076 list[i] = next[i]; 2077 } 2078 } 2079 2080 /* 2081 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2082 * @mas: The maple state 2083 * @end: The maple node end 2084 * @mt: The maple node type 2085 */ 2086 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2087 enum maple_type mt) 2088 { 2089 if (!(mas->mas_flags & MA_STATE_BULK)) 2090 return; 2091 2092 if (mte_is_root(mas->node)) 2093 return; 2094 2095 if (end > mt_min_slots[mt]) { 2096 mas->mas_flags &= ~MA_STATE_REBALANCE; 2097 return; 2098 } 2099 } 2100 2101 /* 2102 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2103 * data from a maple encoded node. 2104 * @wr_mas: the maple write state 2105 * @b_node: the maple_big_node to fill with data 2106 * @offset_end: the offset to end copying 2107 * 2108 * Return: The actual end of the data stored in @b_node 2109 */ 2110 static inline void mas_store_b_node(struct ma_wr_state *wr_mas, 2111 struct maple_big_node *b_node, unsigned char offset_end) 2112 { 2113 unsigned char slot; 2114 unsigned char b_end; 2115 /* Possible underflow of piv will wrap back to 0 before use. */ 2116 unsigned long piv; 2117 struct ma_state *mas = wr_mas->mas; 2118 2119 b_node->type = wr_mas->type; 2120 b_end = 0; 2121 slot = mas->offset; 2122 if (slot) { 2123 /* Copy start data up to insert. */ 2124 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2125 b_end = b_node->b_end; 2126 piv = b_node->pivot[b_end - 1]; 2127 } else 2128 piv = mas->min - 1; 2129 2130 if (piv + 1 < mas->index) { 2131 /* Handle range starting after old range */ 2132 b_node->slot[b_end] = wr_mas->content; 2133 if (!wr_mas->content) 2134 b_node->gap[b_end] = mas->index - 1 - piv; 2135 b_node->pivot[b_end++] = mas->index - 1; 2136 } 2137 2138 /* Store the new entry. */ 2139 mas->offset = b_end; 2140 b_node->slot[b_end] = wr_mas->entry; 2141 b_node->pivot[b_end] = mas->last; 2142 2143 /* Appended. */ 2144 if (mas->last >= mas->max) 2145 goto b_end; 2146 2147 /* Handle new range ending before old range ends */ 2148 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2149 if (piv > mas->last) { 2150 if (piv == ULONG_MAX) 2151 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2152 2153 if (offset_end != slot) 2154 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2155 offset_end); 2156 2157 b_node->slot[++b_end] = wr_mas->content; 2158 if (!wr_mas->content) 2159 b_node->gap[b_end] = piv - mas->last + 1; 2160 b_node->pivot[b_end] = piv; 2161 } 2162 2163 slot = offset_end + 1; 2164 if (slot > wr_mas->node_end) 2165 goto b_end; 2166 2167 /* Copy end data to the end of the node. */ 2168 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2169 b_node->b_end--; 2170 return; 2171 2172 b_end: 2173 b_node->b_end = b_end; 2174 } 2175 2176 /* 2177 * mas_prev_sibling() - Find the previous node with the same parent. 2178 * @mas: the maple state 2179 * 2180 * Return: True if there is a previous sibling, false otherwise. 2181 */ 2182 static inline bool mas_prev_sibling(struct ma_state *mas) 2183 { 2184 unsigned int p_slot = mte_parent_slot(mas->node); 2185 2186 if (mte_is_root(mas->node)) 2187 return false; 2188 2189 if (!p_slot) 2190 return false; 2191 2192 mas_ascend(mas); 2193 mas->offset = p_slot - 1; 2194 mas_descend(mas); 2195 return true; 2196 } 2197 2198 /* 2199 * mas_next_sibling() - Find the next node with the same parent. 2200 * @mas: the maple state 2201 * 2202 * Return: true if there is a next sibling, false otherwise. 2203 */ 2204 static inline bool mas_next_sibling(struct ma_state *mas) 2205 { 2206 MA_STATE(parent, mas->tree, mas->index, mas->last); 2207 2208 if (mte_is_root(mas->node)) 2209 return false; 2210 2211 parent = *mas; 2212 mas_ascend(&parent); 2213 parent.offset = mte_parent_slot(mas->node) + 1; 2214 if (parent.offset > mas_data_end(&parent)) 2215 return false; 2216 2217 *mas = parent; 2218 mas_descend(mas); 2219 return true; 2220 } 2221 2222 /* 2223 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2224 * @enode: The encoded maple node. 2225 * 2226 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2227 * 2228 * Return: @enode or MAS_NONE 2229 */ 2230 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2231 { 2232 if (enode) 2233 return enode; 2234 2235 return ma_enode_ptr(MAS_NONE); 2236 } 2237 2238 /* 2239 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2240 * @wr_mas: The maple write state 2241 * 2242 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2243 */ 2244 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2245 { 2246 struct ma_state *mas = wr_mas->mas; 2247 unsigned char count; 2248 unsigned char offset; 2249 unsigned long index, min, max; 2250 2251 if (unlikely(ma_is_dense(wr_mas->type))) { 2252 wr_mas->r_max = wr_mas->r_min = mas->index; 2253 mas->offset = mas->index = mas->min; 2254 return; 2255 } 2256 2257 wr_mas->node = mas_mn(wr_mas->mas); 2258 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2259 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2260 wr_mas->pivots, mas->max); 2261 offset = mas->offset; 2262 min = mas_safe_min(mas, wr_mas->pivots, offset); 2263 if (unlikely(offset == count)) 2264 goto max; 2265 2266 max = wr_mas->pivots[offset]; 2267 index = mas->index; 2268 if (unlikely(index <= max)) 2269 goto done; 2270 2271 if (unlikely(!max && offset)) 2272 goto max; 2273 2274 min = max + 1; 2275 while (++offset < count) { 2276 max = wr_mas->pivots[offset]; 2277 if (index <= max) 2278 goto done; 2279 else if (unlikely(!max)) 2280 break; 2281 2282 min = max + 1; 2283 } 2284 2285 max: 2286 max = mas->max; 2287 done: 2288 wr_mas->r_max = max; 2289 wr_mas->r_min = min; 2290 wr_mas->offset_end = mas->offset = offset; 2291 } 2292 2293 /* 2294 * mas_topiary_range() - Add a range of slots to the topiary. 2295 * @mas: The maple state 2296 * @destroy: The topiary to add the slots (usually destroy) 2297 * @start: The starting slot inclusively 2298 * @end: The end slot inclusively 2299 */ 2300 static inline void mas_topiary_range(struct ma_state *mas, 2301 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2302 { 2303 void __rcu **slots; 2304 unsigned char offset; 2305 2306 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); 2307 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2308 for (offset = start; offset <= end; offset++) { 2309 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2310 2311 if (mte_dead_node(enode)) 2312 continue; 2313 2314 mat_add(destroy, enode); 2315 } 2316 } 2317 2318 /* 2319 * mast_topiary() - Add the portions of the tree to the removal list; either to 2320 * be freed or discarded (destroy walk). 2321 * @mast: The maple_subtree_state. 2322 */ 2323 static inline void mast_topiary(struct maple_subtree_state *mast) 2324 { 2325 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2326 unsigned char r_start, r_end; 2327 unsigned char l_start, l_end; 2328 void __rcu **l_slots, **r_slots; 2329 2330 wr_mas.type = mte_node_type(mast->orig_l->node); 2331 mast->orig_l->index = mast->orig_l->last; 2332 mas_wr_node_walk(&wr_mas); 2333 l_start = mast->orig_l->offset + 1; 2334 l_end = mas_data_end(mast->orig_l); 2335 r_start = 0; 2336 r_end = mast->orig_r->offset; 2337 2338 if (r_end) 2339 r_end--; 2340 2341 l_slots = ma_slots(mas_mn(mast->orig_l), 2342 mte_node_type(mast->orig_l->node)); 2343 2344 r_slots = ma_slots(mas_mn(mast->orig_r), 2345 mte_node_type(mast->orig_r->node)); 2346 2347 if ((l_start < l_end) && 2348 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2349 l_start++; 2350 } 2351 2352 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2353 if (r_end) 2354 r_end--; 2355 } 2356 2357 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2358 return; 2359 2360 /* At the node where left and right sides meet, add the parts between */ 2361 if (mast->orig_l->node == mast->orig_r->node) { 2362 return mas_topiary_range(mast->orig_l, mast->destroy, 2363 l_start, r_end); 2364 } 2365 2366 /* mast->orig_r is different and consumed. */ 2367 if (mte_is_leaf(mast->orig_r->node)) 2368 return; 2369 2370 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2371 l_end--; 2372 2373 2374 if (l_start <= l_end) 2375 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2376 2377 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2378 r_start++; 2379 2380 if (r_start <= r_end) 2381 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2382 } 2383 2384 /* 2385 * mast_rebalance_next() - Rebalance against the next node 2386 * @mast: The maple subtree state 2387 * @old_r: The encoded maple node to the right (next node). 2388 */ 2389 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2390 { 2391 unsigned char b_end = mast->bn->b_end; 2392 2393 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2394 mast->bn, b_end); 2395 mast->orig_r->last = mast->orig_r->max; 2396 } 2397 2398 /* 2399 * mast_rebalance_prev() - Rebalance against the previous node 2400 * @mast: The maple subtree state 2401 * @old_l: The encoded maple node to the left (previous node) 2402 */ 2403 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2404 { 2405 unsigned char end = mas_data_end(mast->orig_l) + 1; 2406 unsigned char b_end = mast->bn->b_end; 2407 2408 mab_shift_right(mast->bn, end); 2409 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2410 mast->l->min = mast->orig_l->min; 2411 mast->orig_l->index = mast->orig_l->min; 2412 mast->bn->b_end = end + b_end; 2413 mast->l->offset += end; 2414 } 2415 2416 /* 2417 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2418 * the node to the right. Checking the nodes to the right then the left at each 2419 * level upwards until root is reached. Free and destroy as needed. 2420 * Data is copied into the @mast->bn. 2421 * @mast: The maple_subtree_state. 2422 */ 2423 static inline 2424 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2425 { 2426 struct ma_state r_tmp = *mast->orig_r; 2427 struct ma_state l_tmp = *mast->orig_l; 2428 struct maple_enode *ancestor = NULL; 2429 unsigned char start, end; 2430 unsigned char depth = 0; 2431 2432 r_tmp = *mast->orig_r; 2433 l_tmp = *mast->orig_l; 2434 do { 2435 mas_ascend(mast->orig_r); 2436 mas_ascend(mast->orig_l); 2437 depth++; 2438 if (!ancestor && 2439 (mast->orig_r->node == mast->orig_l->node)) { 2440 ancestor = mast->orig_r->node; 2441 end = mast->orig_r->offset - 1; 2442 start = mast->orig_l->offset + 1; 2443 } 2444 2445 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2446 if (!ancestor) { 2447 ancestor = mast->orig_r->node; 2448 start = 0; 2449 } 2450 2451 mast->orig_r->offset++; 2452 do { 2453 mas_descend(mast->orig_r); 2454 mast->orig_r->offset = 0; 2455 depth--; 2456 } while (depth); 2457 2458 mast_rebalance_next(mast); 2459 do { 2460 unsigned char l_off = 0; 2461 struct maple_enode *child = r_tmp.node; 2462 2463 mas_ascend(&r_tmp); 2464 if (ancestor == r_tmp.node) 2465 l_off = start; 2466 2467 if (r_tmp.offset) 2468 r_tmp.offset--; 2469 2470 if (l_off < r_tmp.offset) 2471 mas_topiary_range(&r_tmp, mast->destroy, 2472 l_off, r_tmp.offset); 2473 2474 if (l_tmp.node != child) 2475 mat_add(mast->free, child); 2476 2477 } while (r_tmp.node != ancestor); 2478 2479 *mast->orig_l = l_tmp; 2480 return true; 2481 2482 } else if (mast->orig_l->offset != 0) { 2483 if (!ancestor) { 2484 ancestor = mast->orig_l->node; 2485 end = mas_data_end(mast->orig_l); 2486 } 2487 2488 mast->orig_l->offset--; 2489 do { 2490 mas_descend(mast->orig_l); 2491 mast->orig_l->offset = 2492 mas_data_end(mast->orig_l); 2493 depth--; 2494 } while (depth); 2495 2496 mast_rebalance_prev(mast); 2497 do { 2498 unsigned char r_off; 2499 struct maple_enode *child = l_tmp.node; 2500 2501 mas_ascend(&l_tmp); 2502 if (ancestor == l_tmp.node) 2503 r_off = end; 2504 else 2505 r_off = mas_data_end(&l_tmp); 2506 2507 if (l_tmp.offset < r_off) 2508 l_tmp.offset++; 2509 2510 if (l_tmp.offset < r_off) 2511 mas_topiary_range(&l_tmp, mast->destroy, 2512 l_tmp.offset, r_off); 2513 2514 if (r_tmp.node != child) 2515 mat_add(mast->free, child); 2516 2517 } while (l_tmp.node != ancestor); 2518 2519 *mast->orig_r = r_tmp; 2520 return true; 2521 } 2522 } while (!mte_is_root(mast->orig_r->node)); 2523 2524 *mast->orig_r = r_tmp; 2525 *mast->orig_l = l_tmp; 2526 return false; 2527 } 2528 2529 /* 2530 * mast_ascend_free() - Add current original maple state nodes to the free list 2531 * and ascend. 2532 * @mast: the maple subtree state. 2533 * 2534 * Ascend the original left and right sides and add the previous nodes to the 2535 * free list. Set the slots to point to the correct location in the new nodes. 2536 */ 2537 static inline void 2538 mast_ascend_free(struct maple_subtree_state *mast) 2539 { 2540 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2541 struct maple_enode *left = mast->orig_l->node; 2542 struct maple_enode *right = mast->orig_r->node; 2543 2544 mas_ascend(mast->orig_l); 2545 mas_ascend(mast->orig_r); 2546 mat_add(mast->free, left); 2547 2548 if (left != right) 2549 mat_add(mast->free, right); 2550 2551 mast->orig_r->offset = 0; 2552 mast->orig_r->index = mast->r->max; 2553 /* last should be larger than or equal to index */ 2554 if (mast->orig_r->last < mast->orig_r->index) 2555 mast->orig_r->last = mast->orig_r->index; 2556 /* 2557 * The node may not contain the value so set slot to ensure all 2558 * of the nodes contents are freed or destroyed. 2559 */ 2560 wr_mas.type = mte_node_type(mast->orig_r->node); 2561 mas_wr_node_walk(&wr_mas); 2562 /* Set up the left side of things */ 2563 mast->orig_l->offset = 0; 2564 mast->orig_l->index = mast->l->min; 2565 wr_mas.mas = mast->orig_l; 2566 wr_mas.type = mte_node_type(mast->orig_l->node); 2567 mas_wr_node_walk(&wr_mas); 2568 2569 mast->bn->type = wr_mas.type; 2570 } 2571 2572 /* 2573 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2574 * @mas: the maple state with the allocations. 2575 * @b_node: the maple_big_node with the type encoding. 2576 * 2577 * Use the node type from the maple_big_node to allocate a new node from the 2578 * ma_state. This function exists mainly for code readability. 2579 * 2580 * Return: A new maple encoded node 2581 */ 2582 static inline struct maple_enode 2583 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2584 { 2585 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2586 } 2587 2588 /* 2589 * mas_mab_to_node() - Set up right and middle nodes 2590 * 2591 * @mas: the maple state that contains the allocations. 2592 * @b_node: the node which contains the data. 2593 * @left: The pointer which will have the left node 2594 * @right: The pointer which may have the right node 2595 * @middle: the pointer which may have the middle node (rare) 2596 * @mid_split: the split location for the middle node 2597 * 2598 * Return: the split of left. 2599 */ 2600 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2601 struct maple_big_node *b_node, struct maple_enode **left, 2602 struct maple_enode **right, struct maple_enode **middle, 2603 unsigned char *mid_split, unsigned long min) 2604 { 2605 unsigned char split = 0; 2606 unsigned char slot_count = mt_slots[b_node->type]; 2607 2608 *left = mas_new_ma_node(mas, b_node); 2609 *right = NULL; 2610 *middle = NULL; 2611 *mid_split = 0; 2612 2613 if (b_node->b_end < slot_count) { 2614 split = b_node->b_end; 2615 } else { 2616 split = mab_calc_split(mas, b_node, mid_split, min); 2617 *right = mas_new_ma_node(mas, b_node); 2618 } 2619 2620 if (*mid_split) 2621 *middle = mas_new_ma_node(mas, b_node); 2622 2623 return split; 2624 2625 } 2626 2627 /* 2628 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2629 * pointer. 2630 * @b_node - the big node to add the entry 2631 * @mas - the maple state to get the pivot (mas->max) 2632 * @entry - the entry to add, if NULL nothing happens. 2633 */ 2634 static inline void mab_set_b_end(struct maple_big_node *b_node, 2635 struct ma_state *mas, 2636 void *entry) 2637 { 2638 if (!entry) 2639 return; 2640 2641 b_node->slot[b_node->b_end] = entry; 2642 if (mt_is_alloc(mas->tree)) 2643 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2644 b_node->pivot[b_node->b_end++] = mas->max; 2645 } 2646 2647 /* 2648 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2649 * of @mas->node to either @left or @right, depending on @slot and @split 2650 * 2651 * @mas - the maple state with the node that needs a parent 2652 * @left - possible parent 1 2653 * @right - possible parent 2 2654 * @slot - the slot the mas->node was placed 2655 * @split - the split location between @left and @right 2656 */ 2657 static inline void mas_set_split_parent(struct ma_state *mas, 2658 struct maple_enode *left, 2659 struct maple_enode *right, 2660 unsigned char *slot, unsigned char split) 2661 { 2662 if (mas_is_none(mas)) 2663 return; 2664 2665 if ((*slot) <= split) 2666 mte_set_parent(mas->node, left, *slot); 2667 else if (right) 2668 mte_set_parent(mas->node, right, (*slot) - split - 1); 2669 2670 (*slot)++; 2671 } 2672 2673 /* 2674 * mte_mid_split_check() - Check if the next node passes the mid-split 2675 * @**l: Pointer to left encoded maple node. 2676 * @**m: Pointer to middle encoded maple node. 2677 * @**r: Pointer to right encoded maple node. 2678 * @slot: The offset 2679 * @*split: The split location. 2680 * @mid_split: The middle split. 2681 */ 2682 static inline void mte_mid_split_check(struct maple_enode **l, 2683 struct maple_enode **r, 2684 struct maple_enode *right, 2685 unsigned char slot, 2686 unsigned char *split, 2687 unsigned char mid_split) 2688 { 2689 if (*r == right) 2690 return; 2691 2692 if (slot < mid_split) 2693 return; 2694 2695 *l = *r; 2696 *r = right; 2697 *split = mid_split; 2698 } 2699 2700 /* 2701 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2702 * is taken from @mast->l. 2703 * @mast - the maple subtree state 2704 * @left - the left node 2705 * @right - the right node 2706 * @split - the split location. 2707 */ 2708 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2709 struct maple_enode *left, 2710 struct maple_enode *middle, 2711 struct maple_enode *right, 2712 unsigned char split, 2713 unsigned char mid_split) 2714 { 2715 unsigned char slot; 2716 struct maple_enode *l = left; 2717 struct maple_enode *r = right; 2718 2719 if (mas_is_none(mast->l)) 2720 return; 2721 2722 if (middle) 2723 r = middle; 2724 2725 slot = mast->l->offset; 2726 2727 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2728 mas_set_split_parent(mast->l, l, r, &slot, split); 2729 2730 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2731 mas_set_split_parent(mast->m, l, r, &slot, split); 2732 2733 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2734 mas_set_split_parent(mast->r, l, r, &slot, split); 2735 } 2736 2737 /* 2738 * mas_wmb_replace() - Write memory barrier and replace 2739 * @mas: The maple state 2740 * @free: the maple topiary list of nodes to free 2741 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2742 * 2743 * Updates gap as necessary. 2744 */ 2745 static inline void mas_wmb_replace(struct ma_state *mas, 2746 struct ma_topiary *free, 2747 struct ma_topiary *destroy) 2748 { 2749 /* All nodes must see old data as dead prior to replacing that data */ 2750 smp_wmb(); /* Needed for RCU */ 2751 2752 /* Insert the new data in the tree */ 2753 mas_replace(mas, true); 2754 2755 if (!mte_is_leaf(mas->node)) 2756 mas_descend_adopt(mas); 2757 2758 mas_mat_free(mas, free); 2759 2760 if (destroy) 2761 mas_mat_destroy(mas, destroy); 2762 2763 if (mte_is_leaf(mas->node)) 2764 return; 2765 2766 mas_update_gap(mas); 2767 } 2768 2769 /* 2770 * mast_new_root() - Set a new tree root during subtree creation 2771 * @mast: The maple subtree state 2772 * @mas: The maple state 2773 */ 2774 static inline void mast_new_root(struct maple_subtree_state *mast, 2775 struct ma_state *mas) 2776 { 2777 mas_mn(mast->l)->parent = 2778 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2779 if (!mte_dead_node(mast->orig_l->node) && 2780 !mte_is_root(mast->orig_l->node)) { 2781 do { 2782 mast_ascend_free(mast); 2783 mast_topiary(mast); 2784 } while (!mte_is_root(mast->orig_l->node)); 2785 } 2786 if ((mast->orig_l->node != mas->node) && 2787 (mast->l->depth > mas_mt_height(mas))) { 2788 mat_add(mast->free, mas->node); 2789 } 2790 } 2791 2792 /* 2793 * mast_cp_to_nodes() - Copy data out to nodes. 2794 * @mast: The maple subtree state 2795 * @left: The left encoded maple node 2796 * @middle: The middle encoded maple node 2797 * @right: The right encoded maple node 2798 * @split: The location to split between left and (middle ? middle : right) 2799 * @mid_split: The location to split between middle and right. 2800 */ 2801 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2802 struct maple_enode *left, struct maple_enode *middle, 2803 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2804 { 2805 bool new_lmax = true; 2806 2807 mast->l->node = mte_node_or_none(left); 2808 mast->m->node = mte_node_or_none(middle); 2809 mast->r->node = mte_node_or_none(right); 2810 2811 mast->l->min = mast->orig_l->min; 2812 if (split == mast->bn->b_end) { 2813 mast->l->max = mast->orig_r->max; 2814 new_lmax = false; 2815 } 2816 2817 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2818 2819 if (middle) { 2820 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2821 mast->m->min = mast->bn->pivot[split] + 1; 2822 split = mid_split; 2823 } 2824 2825 mast->r->max = mast->orig_r->max; 2826 if (right) { 2827 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2828 mast->r->min = mast->bn->pivot[split] + 1; 2829 } 2830 } 2831 2832 /* 2833 * mast_combine_cp_left - Copy in the original left side of the tree into the 2834 * combined data set in the maple subtree state big node. 2835 * @mast: The maple subtree state 2836 */ 2837 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2838 { 2839 unsigned char l_slot = mast->orig_l->offset; 2840 2841 if (!l_slot) 2842 return; 2843 2844 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2845 } 2846 2847 /* 2848 * mast_combine_cp_right: Copy in the original right side of the tree into the 2849 * combined data set in the maple subtree state big node. 2850 * @mast: The maple subtree state 2851 */ 2852 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2853 { 2854 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2855 return; 2856 2857 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2858 mt_slot_count(mast->orig_r->node), mast->bn, 2859 mast->bn->b_end); 2860 mast->orig_r->last = mast->orig_r->max; 2861 } 2862 2863 /* 2864 * mast_sufficient: Check if the maple subtree state has enough data in the big 2865 * node to create at least one sufficient node 2866 * @mast: the maple subtree state 2867 */ 2868 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2869 { 2870 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2871 return true; 2872 2873 return false; 2874 } 2875 2876 /* 2877 * mast_overflow: Check if there is too much data in the subtree state for a 2878 * single node. 2879 * @mast: The maple subtree state 2880 */ 2881 static inline bool mast_overflow(struct maple_subtree_state *mast) 2882 { 2883 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2884 return true; 2885 2886 return false; 2887 } 2888 2889 static inline void *mtree_range_walk(struct ma_state *mas) 2890 { 2891 unsigned long *pivots; 2892 unsigned char offset; 2893 struct maple_node *node; 2894 struct maple_enode *next, *last; 2895 enum maple_type type; 2896 void __rcu **slots; 2897 unsigned char end; 2898 unsigned long max, min; 2899 unsigned long prev_max, prev_min; 2900 2901 next = mas->node; 2902 min = mas->min; 2903 max = mas->max; 2904 do { 2905 offset = 0; 2906 last = next; 2907 node = mte_to_node(next); 2908 type = mte_node_type(next); 2909 pivots = ma_pivots(node, type); 2910 end = ma_data_end(node, type, pivots, max); 2911 if (unlikely(ma_dead_node(node))) 2912 goto dead_node; 2913 2914 if (pivots[offset] >= mas->index) { 2915 prev_max = max; 2916 prev_min = min; 2917 max = pivots[offset]; 2918 goto next; 2919 } 2920 2921 do { 2922 offset++; 2923 } while ((offset < end) && (pivots[offset] < mas->index)); 2924 2925 prev_min = min; 2926 min = pivots[offset - 1] + 1; 2927 prev_max = max; 2928 if (likely(offset < end && pivots[offset])) 2929 max = pivots[offset]; 2930 2931 next: 2932 slots = ma_slots(node, type); 2933 next = mt_slot(mas->tree, slots, offset); 2934 if (unlikely(ma_dead_node(node))) 2935 goto dead_node; 2936 } while (!ma_is_leaf(type)); 2937 2938 mas->offset = offset; 2939 mas->index = min; 2940 mas->last = max; 2941 mas->min = prev_min; 2942 mas->max = prev_max; 2943 mas->node = last; 2944 return (void *) next; 2945 2946 dead_node: 2947 mas_reset(mas); 2948 return NULL; 2949 } 2950 2951 /* 2952 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2953 * @mas: The starting maple state 2954 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2955 * @count: The estimated count of iterations needed. 2956 * 2957 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2958 * is hit. First @b_node is split into two entries which are inserted into the 2959 * next iteration of the loop. @b_node is returned populated with the final 2960 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2961 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2962 * to account of what has been copied into the new sub-tree. The update of 2963 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2964 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2965 * the new sub-tree in case the sub-tree becomes the full tree. 2966 * 2967 * Return: the number of elements in b_node during the last loop. 2968 */ 2969 static int mas_spanning_rebalance(struct ma_state *mas, 2970 struct maple_subtree_state *mast, unsigned char count) 2971 { 2972 unsigned char split, mid_split; 2973 unsigned char slot = 0; 2974 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2975 2976 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2977 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2978 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2979 MA_TOPIARY(free, mas->tree); 2980 MA_TOPIARY(destroy, mas->tree); 2981 2982 /* 2983 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2984 * Rebalancing is done by use of the ``struct maple_topiary``. 2985 */ 2986 mast->l = &l_mas; 2987 mast->m = &m_mas; 2988 mast->r = &r_mas; 2989 mast->free = &free; 2990 mast->destroy = &destroy; 2991 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 2992 if (!(mast->orig_l->min && mast->orig_r->max == ULONG_MAX) && 2993 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 2994 mast_spanning_rebalance(mast); 2995 2996 mast->orig_l->depth = 0; 2997 2998 /* 2999 * Each level of the tree is examined and balanced, pushing data to the left or 3000 * right, or rebalancing against left or right nodes is employed to avoid 3001 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3002 * the tree is created, there may be a mix of new and old nodes. The old nodes 3003 * will have the incorrect parent pointers and currently be in two trees: the 3004 * original tree and the partially new tree. To remedy the parent pointers in 3005 * the old tree, the new data is swapped into the active tree and a walk down 3006 * the tree is performed and the parent pointers are updated. 3007 * See mas_descend_adopt() for more information.. 3008 */ 3009 while (count--) { 3010 mast->bn->b_end--; 3011 mast->bn->type = mte_node_type(mast->orig_l->node); 3012 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3013 &mid_split, mast->orig_l->min); 3014 mast_set_split_parents(mast, left, middle, right, split, 3015 mid_split); 3016 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3017 3018 /* 3019 * Copy data from next level in the tree to mast->bn from next 3020 * iteration 3021 */ 3022 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3023 mast->bn->type = mte_node_type(left); 3024 mast->orig_l->depth++; 3025 3026 /* Root already stored in l->node. */ 3027 if (mas_is_root_limits(mast->l)) 3028 goto new_root; 3029 3030 mast_ascend_free(mast); 3031 mast_combine_cp_left(mast); 3032 l_mas.offset = mast->bn->b_end; 3033 mab_set_b_end(mast->bn, &l_mas, left); 3034 mab_set_b_end(mast->bn, &m_mas, middle); 3035 mab_set_b_end(mast->bn, &r_mas, right); 3036 3037 /* Copy anything necessary out of the right node. */ 3038 mast_combine_cp_right(mast); 3039 mast_topiary(mast); 3040 mast->orig_l->last = mast->orig_l->max; 3041 3042 if (mast_sufficient(mast)) 3043 continue; 3044 3045 if (mast_overflow(mast)) 3046 continue; 3047 3048 /* May be a new root stored in mast->bn */ 3049 if (mas_is_root_limits(mast->orig_l)) 3050 break; 3051 3052 mast_spanning_rebalance(mast); 3053 3054 /* rebalancing from other nodes may require another loop. */ 3055 if (!count) 3056 count++; 3057 } 3058 3059 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3060 mte_node_type(mast->orig_l->node)); 3061 mast->orig_l->depth++; 3062 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3063 mte_set_parent(left, l_mas.node, slot); 3064 if (middle) 3065 mte_set_parent(middle, l_mas.node, ++slot); 3066 3067 if (right) 3068 mte_set_parent(right, l_mas.node, ++slot); 3069 3070 if (mas_is_root_limits(mast->l)) { 3071 new_root: 3072 mast_new_root(mast, mas); 3073 } else { 3074 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3075 } 3076 3077 if (!mte_dead_node(mast->orig_l->node)) 3078 mat_add(&free, mast->orig_l->node); 3079 3080 mas->depth = mast->orig_l->depth; 3081 *mast->orig_l = l_mas; 3082 mte_set_node_dead(mas->node); 3083 3084 /* Set up mas for insertion. */ 3085 mast->orig_l->depth = mas->depth; 3086 mast->orig_l->alloc = mas->alloc; 3087 *mas = *mast->orig_l; 3088 mas_wmb_replace(mas, &free, &destroy); 3089 mtree_range_walk(mas); 3090 return mast->bn->b_end; 3091 } 3092 3093 /* 3094 * mas_rebalance() - Rebalance a given node. 3095 * @mas: The maple state 3096 * @b_node: The big maple node. 3097 * 3098 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3099 * Continue upwards until tree is sufficient. 3100 * 3101 * Return: the number of elements in b_node during the last loop. 3102 */ 3103 static inline int mas_rebalance(struct ma_state *mas, 3104 struct maple_big_node *b_node) 3105 { 3106 char empty_count = mas_mt_height(mas); 3107 struct maple_subtree_state mast; 3108 unsigned char shift, b_end = ++b_node->b_end; 3109 3110 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3111 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3112 3113 trace_ma_op(__func__, mas); 3114 3115 /* 3116 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3117 * against the node to the right if it exists, otherwise the node to the 3118 * left of this node is rebalanced against this node. If rebalancing 3119 * causes just one node to be produced instead of two, then the parent 3120 * is also examined and rebalanced if it is insufficient. Every level 3121 * tries to combine the data in the same way. If one node contains the 3122 * entire range of the tree, then that node is used as a new root node. 3123 */ 3124 mas_node_count(mas, 1 + empty_count * 3); 3125 if (mas_is_err(mas)) 3126 return 0; 3127 3128 mast.orig_l = &l_mas; 3129 mast.orig_r = &r_mas; 3130 mast.bn = b_node; 3131 mast.bn->type = mte_node_type(mas->node); 3132 3133 l_mas = r_mas = *mas; 3134 3135 if (mas_next_sibling(&r_mas)) { 3136 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3137 r_mas.last = r_mas.index = r_mas.max; 3138 } else { 3139 mas_prev_sibling(&l_mas); 3140 shift = mas_data_end(&l_mas) + 1; 3141 mab_shift_right(b_node, shift); 3142 mas->offset += shift; 3143 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3144 b_node->b_end = shift + b_end; 3145 l_mas.index = l_mas.last = l_mas.min; 3146 } 3147 3148 return mas_spanning_rebalance(mas, &mast, empty_count); 3149 } 3150 3151 /* 3152 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3153 * state. 3154 * @mas: The maple state 3155 * @end: The end of the left-most node. 3156 * 3157 * During a mass-insert event (such as forking), it may be necessary to 3158 * rebalance the left-most node when it is not sufficient. 3159 */ 3160 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3161 { 3162 enum maple_type mt = mte_node_type(mas->node); 3163 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3164 struct maple_enode *eparent; 3165 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3166 void __rcu **l_slots, **slots; 3167 unsigned long *l_pivs, *pivs, gap; 3168 bool in_rcu = mt_in_rcu(mas->tree); 3169 3170 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3171 3172 l_mas = *mas; 3173 mas_prev_sibling(&l_mas); 3174 3175 /* set up node. */ 3176 if (in_rcu) { 3177 /* Allocate for both left and right as well as parent. */ 3178 mas_node_count(mas, 3); 3179 if (mas_is_err(mas)) 3180 return; 3181 3182 newnode = mas_pop_node(mas); 3183 } else { 3184 newnode = &reuse; 3185 } 3186 3187 node = mas_mn(mas); 3188 newnode->parent = node->parent; 3189 slots = ma_slots(newnode, mt); 3190 pivs = ma_pivots(newnode, mt); 3191 left = mas_mn(&l_mas); 3192 l_slots = ma_slots(left, mt); 3193 l_pivs = ma_pivots(left, mt); 3194 if (!l_slots[split]) 3195 split++; 3196 tmp = mas_data_end(&l_mas) - split; 3197 3198 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3199 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3200 pivs[tmp] = l_mas.max; 3201 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3202 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3203 3204 l_mas.max = l_pivs[split]; 3205 mas->min = l_mas.max + 1; 3206 eparent = mt_mk_node(mte_parent(l_mas.node), 3207 mas_parent_enum(&l_mas, l_mas.node)); 3208 tmp += end; 3209 if (!in_rcu) { 3210 unsigned char max_p = mt_pivots[mt]; 3211 unsigned char max_s = mt_slots[mt]; 3212 3213 if (tmp < max_p) 3214 memset(pivs + tmp, 0, 3215 sizeof(unsigned long *) * (max_p - tmp)); 3216 3217 if (tmp < mt_slots[mt]) 3218 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3219 3220 memcpy(node, newnode, sizeof(struct maple_node)); 3221 ma_set_meta(node, mt, 0, tmp - 1); 3222 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3223 l_pivs[split]); 3224 3225 /* Remove data from l_pivs. */ 3226 tmp = split + 1; 3227 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3228 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3229 ma_set_meta(left, mt, 0, split); 3230 3231 goto done; 3232 } 3233 3234 /* RCU requires replacing both l_mas, mas, and parent. */ 3235 mas->node = mt_mk_node(newnode, mt); 3236 ma_set_meta(newnode, mt, 0, tmp); 3237 3238 new_left = mas_pop_node(mas); 3239 new_left->parent = left->parent; 3240 mt = mte_node_type(l_mas.node); 3241 slots = ma_slots(new_left, mt); 3242 pivs = ma_pivots(new_left, mt); 3243 memcpy(slots, l_slots, sizeof(void *) * split); 3244 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3245 ma_set_meta(new_left, mt, 0, split); 3246 l_mas.node = mt_mk_node(new_left, mt); 3247 3248 /* replace parent. */ 3249 offset = mte_parent_slot(mas->node); 3250 mt = mas_parent_enum(&l_mas, l_mas.node); 3251 parent = mas_pop_node(mas); 3252 slots = ma_slots(parent, mt); 3253 pivs = ma_pivots(parent, mt); 3254 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3255 rcu_assign_pointer(slots[offset], mas->node); 3256 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3257 pivs[offset - 1] = l_mas.max; 3258 eparent = mt_mk_node(parent, mt); 3259 done: 3260 gap = mas_leaf_max_gap(mas); 3261 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3262 gap = mas_leaf_max_gap(&l_mas); 3263 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3264 mas_ascend(mas); 3265 3266 if (in_rcu) 3267 mas_replace(mas, false); 3268 3269 mas_update_gap(mas); 3270 } 3271 3272 /* 3273 * mas_split_final_node() - Split the final node in a subtree operation. 3274 * @mast: the maple subtree state 3275 * @mas: The maple state 3276 * @height: The height of the tree in case it's a new root. 3277 */ 3278 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3279 struct ma_state *mas, int height) 3280 { 3281 struct maple_enode *ancestor; 3282 3283 if (mte_is_root(mas->node)) { 3284 if (mt_is_alloc(mas->tree)) 3285 mast->bn->type = maple_arange_64; 3286 else 3287 mast->bn->type = maple_range_64; 3288 mas->depth = height; 3289 } 3290 /* 3291 * Only a single node is used here, could be root. 3292 * The Big_node data should just fit in a single node. 3293 */ 3294 ancestor = mas_new_ma_node(mas, mast->bn); 3295 mte_set_parent(mast->l->node, ancestor, mast->l->offset); 3296 mte_set_parent(mast->r->node, ancestor, mast->r->offset); 3297 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3298 3299 mast->l->node = ancestor; 3300 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3301 mas->offset = mast->bn->b_end - 1; 3302 return true; 3303 } 3304 3305 /* 3306 * mast_fill_bnode() - Copy data into the big node in the subtree state 3307 * @mast: The maple subtree state 3308 * @mas: the maple state 3309 * @skip: The number of entries to skip for new nodes insertion. 3310 */ 3311 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3312 struct ma_state *mas, 3313 unsigned char skip) 3314 { 3315 bool cp = true; 3316 struct maple_enode *old = mas->node; 3317 unsigned char split; 3318 3319 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3320 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3321 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3322 mast->bn->b_end = 0; 3323 3324 if (mte_is_root(mas->node)) { 3325 cp = false; 3326 } else { 3327 mas_ascend(mas); 3328 mat_add(mast->free, old); 3329 mas->offset = mte_parent_slot(mas->node); 3330 } 3331 3332 if (cp && mast->l->offset) 3333 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3334 3335 split = mast->bn->b_end; 3336 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3337 mast->r->offset = mast->bn->b_end; 3338 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3339 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3340 cp = false; 3341 3342 if (cp) 3343 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3344 mast->bn, mast->bn->b_end); 3345 3346 mast->bn->b_end--; 3347 mast->bn->type = mte_node_type(mas->node); 3348 } 3349 3350 /* 3351 * mast_split_data() - Split the data in the subtree state big node into regular 3352 * nodes. 3353 * @mast: The maple subtree state 3354 * @mas: The maple state 3355 * @split: The location to split the big node 3356 */ 3357 static inline void mast_split_data(struct maple_subtree_state *mast, 3358 struct ma_state *mas, unsigned char split) 3359 { 3360 unsigned char p_slot; 3361 3362 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3363 mte_set_pivot(mast->r->node, 0, mast->r->max); 3364 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3365 mast->l->offset = mte_parent_slot(mas->node); 3366 mast->l->max = mast->bn->pivot[split]; 3367 mast->r->min = mast->l->max + 1; 3368 if (mte_is_leaf(mas->node)) 3369 return; 3370 3371 p_slot = mast->orig_l->offset; 3372 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3373 &p_slot, split); 3374 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3375 &p_slot, split); 3376 } 3377 3378 /* 3379 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3380 * data to the right or left node if there is room. 3381 * @mas: The maple state 3382 * @height: The current height of the maple state 3383 * @mast: The maple subtree state 3384 * @left: Push left or not. 3385 * 3386 * Keeping the height of the tree low means faster lookups. 3387 * 3388 * Return: True if pushed, false otherwise. 3389 */ 3390 static inline bool mas_push_data(struct ma_state *mas, int height, 3391 struct maple_subtree_state *mast, bool left) 3392 { 3393 unsigned char slot_total = mast->bn->b_end; 3394 unsigned char end, space, split; 3395 3396 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3397 tmp_mas = *mas; 3398 tmp_mas.depth = mast->l->depth; 3399 3400 if (left && !mas_prev_sibling(&tmp_mas)) 3401 return false; 3402 else if (!left && !mas_next_sibling(&tmp_mas)) 3403 return false; 3404 3405 end = mas_data_end(&tmp_mas); 3406 slot_total += end; 3407 space = 2 * mt_slot_count(mas->node) - 2; 3408 /* -2 instead of -1 to ensure there isn't a triple split */ 3409 if (ma_is_leaf(mast->bn->type)) 3410 space--; 3411 3412 if (mas->max == ULONG_MAX) 3413 space--; 3414 3415 if (slot_total >= space) 3416 return false; 3417 3418 /* Get the data; Fill mast->bn */ 3419 mast->bn->b_end++; 3420 if (left) { 3421 mab_shift_right(mast->bn, end + 1); 3422 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3423 mast->bn->b_end = slot_total + 1; 3424 } else { 3425 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3426 } 3427 3428 /* Configure mast for splitting of mast->bn */ 3429 split = mt_slots[mast->bn->type] - 2; 3430 if (left) { 3431 /* Switch mas to prev node */ 3432 mat_add(mast->free, mas->node); 3433 *mas = tmp_mas; 3434 /* Start using mast->l for the left side. */ 3435 tmp_mas.node = mast->l->node; 3436 *mast->l = tmp_mas; 3437 } else { 3438 mat_add(mast->free, tmp_mas.node); 3439 tmp_mas.node = mast->r->node; 3440 *mast->r = tmp_mas; 3441 split = slot_total - split; 3442 } 3443 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3444 /* Update parent slot for split calculation. */ 3445 if (left) 3446 mast->orig_l->offset += end + 1; 3447 3448 mast_split_data(mast, mas, split); 3449 mast_fill_bnode(mast, mas, 2); 3450 mas_split_final_node(mast, mas, height + 1); 3451 return true; 3452 } 3453 3454 /* 3455 * mas_split() - Split data that is too big for one node into two. 3456 * @mas: The maple state 3457 * @b_node: The maple big node 3458 * Return: 1 on success, 0 on failure. 3459 */ 3460 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3461 { 3462 3463 struct maple_subtree_state mast; 3464 int height = 0; 3465 unsigned char mid_split, split = 0; 3466 3467 /* 3468 * Splitting is handled differently from any other B-tree; the Maple 3469 * Tree splits upwards. Splitting up means that the split operation 3470 * occurs when the walk of the tree hits the leaves and not on the way 3471 * down. The reason for splitting up is that it is impossible to know 3472 * how much space will be needed until the leaf is (or leaves are) 3473 * reached. Since overwriting data is allowed and a range could 3474 * overwrite more than one range or result in changing one entry into 3 3475 * entries, it is impossible to know if a split is required until the 3476 * data is examined. 3477 * 3478 * Splitting is a balancing act between keeping allocations to a minimum 3479 * and avoiding a 'jitter' event where a tree is expanded to make room 3480 * for an entry followed by a contraction when the entry is removed. To 3481 * accomplish the balance, there are empty slots remaining in both left 3482 * and right nodes after a split. 3483 */ 3484 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3485 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3486 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3487 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3488 MA_TOPIARY(mat, mas->tree); 3489 3490 trace_ma_op(__func__, mas); 3491 mas->depth = mas_mt_height(mas); 3492 /* Allocation failures will happen early. */ 3493 mas_node_count(mas, 1 + mas->depth * 2); 3494 if (mas_is_err(mas)) 3495 return 0; 3496 3497 mast.l = &l_mas; 3498 mast.r = &r_mas; 3499 mast.orig_l = &prev_l_mas; 3500 mast.orig_r = &prev_r_mas; 3501 mast.free = &mat; 3502 mast.bn = b_node; 3503 3504 while (height++ <= mas->depth) { 3505 if (mt_slots[b_node->type] > b_node->b_end) { 3506 mas_split_final_node(&mast, mas, height); 3507 break; 3508 } 3509 3510 l_mas = r_mas = *mas; 3511 l_mas.node = mas_new_ma_node(mas, b_node); 3512 r_mas.node = mas_new_ma_node(mas, b_node); 3513 /* 3514 * Another way that 'jitter' is avoided is to terminate a split up early if the 3515 * left or right node has space to spare. This is referred to as "pushing left" 3516 * or "pushing right" and is similar to the B* tree, except the nodes left or 3517 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3518 * is a significant savings. 3519 */ 3520 /* Try to push left. */ 3521 if (mas_push_data(mas, height, &mast, true)) 3522 break; 3523 3524 /* Try to push right. */ 3525 if (mas_push_data(mas, height, &mast, false)) 3526 break; 3527 3528 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3529 mast_split_data(&mast, mas, split); 3530 /* 3531 * Usually correct, mab_mas_cp in the above call overwrites 3532 * r->max. 3533 */ 3534 mast.r->max = mas->max; 3535 mast_fill_bnode(&mast, mas, 1); 3536 prev_l_mas = *mast.l; 3537 prev_r_mas = *mast.r; 3538 } 3539 3540 /* Set the original node as dead */ 3541 mat_add(mast.free, mas->node); 3542 mas->node = l_mas.node; 3543 mas_wmb_replace(mas, mast.free, NULL); 3544 mtree_range_walk(mas); 3545 return 1; 3546 } 3547 3548 /* 3549 * mas_reuse_node() - Reuse the node to store the data. 3550 * @wr_mas: The maple write state 3551 * @bn: The maple big node 3552 * @end: The end of the data. 3553 * 3554 * Will always return false in RCU mode. 3555 * 3556 * Return: True if node was reused, false otherwise. 3557 */ 3558 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3559 struct maple_big_node *bn, unsigned char end) 3560 { 3561 /* Need to be rcu safe. */ 3562 if (mt_in_rcu(wr_mas->mas->tree)) 3563 return false; 3564 3565 if (end > bn->b_end) { 3566 int clear = mt_slots[wr_mas->type] - bn->b_end; 3567 3568 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3569 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3570 } 3571 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3572 return true; 3573 } 3574 3575 /* 3576 * mas_commit_b_node() - Commit the big node into the tree. 3577 * @wr_mas: The maple write state 3578 * @b_node: The maple big node 3579 * @end: The end of the data. 3580 */ 3581 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas, 3582 struct maple_big_node *b_node, unsigned char end) 3583 { 3584 struct maple_node *node; 3585 unsigned char b_end = b_node->b_end; 3586 enum maple_type b_type = b_node->type; 3587 3588 if ((b_end < mt_min_slots[b_type]) && 3589 (!mte_is_root(wr_mas->mas->node)) && 3590 (mas_mt_height(wr_mas->mas) > 1)) 3591 return mas_rebalance(wr_mas->mas, b_node); 3592 3593 if (b_end >= mt_slots[b_type]) 3594 return mas_split(wr_mas->mas, b_node); 3595 3596 if (mas_reuse_node(wr_mas, b_node, end)) 3597 goto reuse_node; 3598 3599 mas_node_count(wr_mas->mas, 1); 3600 if (mas_is_err(wr_mas->mas)) 3601 return 0; 3602 3603 node = mas_pop_node(wr_mas->mas); 3604 node->parent = mas_mn(wr_mas->mas)->parent; 3605 wr_mas->mas->node = mt_mk_node(node, b_type); 3606 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3607 mas_replace(wr_mas->mas, false); 3608 reuse_node: 3609 mas_update_gap(wr_mas->mas); 3610 return 1; 3611 } 3612 3613 /* 3614 * mas_root_expand() - Expand a root to a node 3615 * @mas: The maple state 3616 * @entry: The entry to store into the tree 3617 */ 3618 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3619 { 3620 void *contents = mas_root_locked(mas); 3621 enum maple_type type = maple_leaf_64; 3622 struct maple_node *node; 3623 void __rcu **slots; 3624 unsigned long *pivots; 3625 int slot = 0; 3626 3627 mas_node_count(mas, 1); 3628 if (unlikely(mas_is_err(mas))) 3629 return 0; 3630 3631 node = mas_pop_node(mas); 3632 pivots = ma_pivots(node, type); 3633 slots = ma_slots(node, type); 3634 node->parent = ma_parent_ptr( 3635 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3636 mas->node = mt_mk_node(node, type); 3637 3638 if (mas->index) { 3639 if (contents) { 3640 rcu_assign_pointer(slots[slot], contents); 3641 if (likely(mas->index > 1)) 3642 slot++; 3643 } 3644 pivots[slot++] = mas->index - 1; 3645 } 3646 3647 rcu_assign_pointer(slots[slot], entry); 3648 mas->offset = slot; 3649 pivots[slot] = mas->last; 3650 if (mas->last != ULONG_MAX) 3651 slot++; 3652 mas->depth = 1; 3653 mas_set_height(mas); 3654 3655 /* swap the new root into the tree */ 3656 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3657 ma_set_meta(node, maple_leaf_64, 0, slot); 3658 return slot; 3659 } 3660 3661 static inline void mas_store_root(struct ma_state *mas, void *entry) 3662 { 3663 if (likely((mas->last != 0) || (mas->index != 0))) 3664 mas_root_expand(mas, entry); 3665 else if (((unsigned long) (entry) & 3) == 2) 3666 mas_root_expand(mas, entry); 3667 else { 3668 rcu_assign_pointer(mas->tree->ma_root, entry); 3669 mas->node = MAS_START; 3670 } 3671 } 3672 3673 /* 3674 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3675 * spans the node. 3676 * @mas: The maple state 3677 * @piv: The pivot value being written 3678 * @type: The maple node type 3679 * @entry: The data to write 3680 * 3681 * Spanning writes are writes that start in one node and end in another OR if 3682 * the write of a %NULL will cause the node to end with a %NULL. 3683 * 3684 * Return: True if this is a spanning write, false otherwise. 3685 */ 3686 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3687 { 3688 unsigned long max; 3689 unsigned long last = wr_mas->mas->last; 3690 unsigned long piv = wr_mas->r_max; 3691 enum maple_type type = wr_mas->type; 3692 void *entry = wr_mas->entry; 3693 3694 /* Contained in this pivot */ 3695 if (piv > last) 3696 return false; 3697 3698 max = wr_mas->mas->max; 3699 if (unlikely(ma_is_leaf(type))) { 3700 /* Fits in the node, but may span slots. */ 3701 if (last < max) 3702 return false; 3703 3704 /* Writes to the end of the node but not null. */ 3705 if ((last == max) && entry) 3706 return false; 3707 3708 /* 3709 * Writing ULONG_MAX is not a spanning write regardless of the 3710 * value being written as long as the range fits in the node. 3711 */ 3712 if ((last == ULONG_MAX) && (last == max)) 3713 return false; 3714 } else if (piv == last) { 3715 if (entry) 3716 return false; 3717 3718 /* Detect spanning store wr walk */ 3719 if (last == ULONG_MAX) 3720 return false; 3721 } 3722 3723 trace_ma_write(__func__, wr_mas->mas, piv, entry); 3724 3725 return true; 3726 } 3727 3728 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3729 { 3730 wr_mas->type = mte_node_type(wr_mas->mas->node); 3731 mas_wr_node_walk(wr_mas); 3732 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3733 } 3734 3735 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3736 { 3737 wr_mas->mas->max = wr_mas->r_max; 3738 wr_mas->mas->min = wr_mas->r_min; 3739 wr_mas->mas->node = wr_mas->content; 3740 wr_mas->mas->offset = 0; 3741 wr_mas->mas->depth++; 3742 } 3743 /* 3744 * mas_wr_walk() - Walk the tree for a write. 3745 * @wr_mas: The maple write state 3746 * 3747 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3748 * 3749 * Return: True if it's contained in a node, false on spanning write. 3750 */ 3751 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3752 { 3753 struct ma_state *mas = wr_mas->mas; 3754 3755 while (true) { 3756 mas_wr_walk_descend(wr_mas); 3757 if (unlikely(mas_is_span_wr(wr_mas))) 3758 return false; 3759 3760 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3761 mas->offset); 3762 if (ma_is_leaf(wr_mas->type)) 3763 return true; 3764 3765 mas_wr_walk_traverse(wr_mas); 3766 } 3767 3768 return true; 3769 } 3770 3771 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3772 { 3773 struct ma_state *mas = wr_mas->mas; 3774 3775 while (true) { 3776 mas_wr_walk_descend(wr_mas); 3777 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3778 mas->offset); 3779 if (ma_is_leaf(wr_mas->type)) 3780 return true; 3781 mas_wr_walk_traverse(wr_mas); 3782 3783 } 3784 return true; 3785 } 3786 /* 3787 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3788 * @l_wr_mas: The left maple write state 3789 * @r_wr_mas: The right maple write state 3790 */ 3791 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3792 struct ma_wr_state *r_wr_mas) 3793 { 3794 struct ma_state *r_mas = r_wr_mas->mas; 3795 struct ma_state *l_mas = l_wr_mas->mas; 3796 unsigned char l_slot; 3797 3798 l_slot = l_mas->offset; 3799 if (!l_wr_mas->content) 3800 l_mas->index = l_wr_mas->r_min; 3801 3802 if ((l_mas->index == l_wr_mas->r_min) && 3803 (l_slot && 3804 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3805 if (l_slot > 1) 3806 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3807 else 3808 l_mas->index = l_mas->min; 3809 3810 l_mas->offset = l_slot - 1; 3811 } 3812 3813 if (!r_wr_mas->content) { 3814 if (r_mas->last < r_wr_mas->r_max) 3815 r_mas->last = r_wr_mas->r_max; 3816 r_mas->offset++; 3817 } else if ((r_mas->last == r_wr_mas->r_max) && 3818 (r_mas->last < r_mas->max) && 3819 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3820 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3821 r_wr_mas->type, r_mas->offset + 1); 3822 r_mas->offset++; 3823 } 3824 } 3825 3826 static inline void *mas_state_walk(struct ma_state *mas) 3827 { 3828 void *entry; 3829 3830 entry = mas_start(mas); 3831 if (mas_is_none(mas)) 3832 return NULL; 3833 3834 if (mas_is_ptr(mas)) 3835 return entry; 3836 3837 return mtree_range_walk(mas); 3838 } 3839 3840 /* 3841 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3842 * to date. 3843 * 3844 * @mas: The maple state. 3845 * 3846 * Note: Leaves mas in undesirable state. 3847 * Return: The entry for @mas->index or %NULL on dead node. 3848 */ 3849 static inline void *mtree_lookup_walk(struct ma_state *mas) 3850 { 3851 unsigned long *pivots; 3852 unsigned char offset; 3853 struct maple_node *node; 3854 struct maple_enode *next; 3855 enum maple_type type; 3856 void __rcu **slots; 3857 unsigned char end; 3858 unsigned long max; 3859 3860 next = mas->node; 3861 max = ULONG_MAX; 3862 do { 3863 offset = 0; 3864 node = mte_to_node(next); 3865 type = mte_node_type(next); 3866 pivots = ma_pivots(node, type); 3867 end = ma_data_end(node, type, pivots, max); 3868 if (unlikely(ma_dead_node(node))) 3869 goto dead_node; 3870 3871 if (pivots[offset] >= mas->index) 3872 goto next; 3873 3874 do { 3875 offset++; 3876 } while ((offset < end) && (pivots[offset] < mas->index)); 3877 3878 if (likely(offset > end)) 3879 max = pivots[offset]; 3880 3881 next: 3882 slots = ma_slots(node, type); 3883 next = mt_slot(mas->tree, slots, offset); 3884 if (unlikely(ma_dead_node(node))) 3885 goto dead_node; 3886 } while (!ma_is_leaf(type)); 3887 3888 return (void *) next; 3889 3890 dead_node: 3891 mas_reset(mas); 3892 return NULL; 3893 } 3894 3895 /* 3896 * mas_new_root() - Create a new root node that only contains the entry passed 3897 * in. 3898 * @mas: The maple state 3899 * @entry: The entry to store. 3900 * 3901 * Only valid when the index == 0 and the last == ULONG_MAX 3902 * 3903 * Return 0 on error, 1 on success. 3904 */ 3905 static inline int mas_new_root(struct ma_state *mas, void *entry) 3906 { 3907 struct maple_enode *root = mas_root_locked(mas); 3908 enum maple_type type = maple_leaf_64; 3909 struct maple_node *node; 3910 void __rcu **slots; 3911 unsigned long *pivots; 3912 3913 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3914 mas->depth = 0; 3915 mas_set_height(mas); 3916 rcu_assign_pointer(mas->tree->ma_root, entry); 3917 mas->node = MAS_START; 3918 goto done; 3919 } 3920 3921 mas_node_count(mas, 1); 3922 if (mas_is_err(mas)) 3923 return 0; 3924 3925 node = mas_pop_node(mas); 3926 pivots = ma_pivots(node, type); 3927 slots = ma_slots(node, type); 3928 node->parent = ma_parent_ptr( 3929 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3930 mas->node = mt_mk_node(node, type); 3931 rcu_assign_pointer(slots[0], entry); 3932 pivots[0] = mas->last; 3933 mas->depth = 1; 3934 mas_set_height(mas); 3935 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3936 3937 done: 3938 if (xa_is_node(root)) 3939 mte_destroy_walk(root, mas->tree); 3940 3941 return 1; 3942 } 3943 /* 3944 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3945 * and new nodes where necessary, then place the sub-tree in the actual tree. 3946 * Note that mas is expected to point to the node which caused the store to 3947 * span. 3948 * @wr_mas: The maple write state 3949 * 3950 * Return: 0 on error, positive on success. 3951 */ 3952 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3953 { 3954 struct maple_subtree_state mast; 3955 struct maple_big_node b_node; 3956 struct ma_state *mas; 3957 unsigned char height; 3958 3959 /* Left and Right side of spanning store */ 3960 MA_STATE(l_mas, NULL, 0, 0); 3961 MA_STATE(r_mas, NULL, 0, 0); 3962 3963 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3964 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3965 3966 /* 3967 * A store operation that spans multiple nodes is called a spanning 3968 * store and is handled early in the store call stack by the function 3969 * mas_is_span_wr(). When a spanning store is identified, the maple 3970 * state is duplicated. The first maple state walks the left tree path 3971 * to ``index``, the duplicate walks the right tree path to ``last``. 3972 * The data in the two nodes are combined into a single node, two nodes, 3973 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3974 * written to the last entry of a node is considered a spanning store as 3975 * a rebalance is required for the operation to complete and an overflow 3976 * of data may happen. 3977 */ 3978 mas = wr_mas->mas; 3979 trace_ma_op(__func__, mas); 3980 3981 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3982 return mas_new_root(mas, wr_mas->entry); 3983 /* 3984 * Node rebalancing may occur due to this store, so there may be three new 3985 * entries per level plus a new root. 3986 */ 3987 height = mas_mt_height(mas); 3988 mas_node_count(mas, 1 + height * 3); 3989 if (mas_is_err(mas)) 3990 return 0; 3991 3992 /* 3993 * Set up right side. Need to get to the next offset after the spanning 3994 * store to ensure it's not NULL and to combine both the next node and 3995 * the node with the start together. 3996 */ 3997 r_mas = *mas; 3998 /* Avoid overflow, walk to next slot in the tree. */ 3999 if (r_mas.last + 1) 4000 r_mas.last++; 4001 4002 r_mas.index = r_mas.last; 4003 mas_wr_walk_index(&r_wr_mas); 4004 r_mas.last = r_mas.index = mas->last; 4005 4006 /* Set up left side. */ 4007 l_mas = *mas; 4008 mas_wr_walk_index(&l_wr_mas); 4009 4010 if (!wr_mas->entry) { 4011 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4012 mas->offset = l_mas.offset; 4013 mas->index = l_mas.index; 4014 mas->last = l_mas.last = r_mas.last; 4015 } 4016 4017 /* expanding NULLs may make this cover the entire range */ 4018 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4019 mas_set_range(mas, 0, ULONG_MAX); 4020 return mas_new_root(mas, wr_mas->entry); 4021 } 4022 4023 memset(&b_node, 0, sizeof(struct maple_big_node)); 4024 /* Copy l_mas and store the value in b_node. */ 4025 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4026 /* Copy r_mas into b_node. */ 4027 if (r_mas.offset <= r_wr_mas.node_end) 4028 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4029 &b_node, b_node.b_end + 1); 4030 else 4031 b_node.b_end++; 4032 4033 /* Stop spanning searches by searching for just index. */ 4034 l_mas.index = l_mas.last = mas->index; 4035 4036 mast.bn = &b_node; 4037 mast.orig_l = &l_mas; 4038 mast.orig_r = &r_mas; 4039 /* Combine l_mas and r_mas and split them up evenly again. */ 4040 return mas_spanning_rebalance(mas, &mast, height + 1); 4041 } 4042 4043 /* 4044 * mas_wr_node_store() - Attempt to store the value in a node 4045 * @wr_mas: The maple write state 4046 * 4047 * Attempts to reuse the node, but may allocate. 4048 * 4049 * Return: True if stored, false otherwise 4050 */ 4051 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) 4052 { 4053 struct ma_state *mas = wr_mas->mas; 4054 void __rcu **dst_slots; 4055 unsigned long *dst_pivots; 4056 unsigned char dst_offset; 4057 unsigned char new_end = wr_mas->node_end; 4058 unsigned char offset; 4059 unsigned char node_slots = mt_slots[wr_mas->type]; 4060 struct maple_node reuse, *newnode; 4061 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; 4062 bool in_rcu = mt_in_rcu(mas->tree); 4063 4064 offset = mas->offset; 4065 if (mas->last == wr_mas->r_max) { 4066 /* runs right to the end of the node */ 4067 if (mas->last == mas->max) 4068 new_end = offset; 4069 /* don't copy this offset */ 4070 wr_mas->offset_end++; 4071 } else if (mas->last < wr_mas->r_max) { 4072 /* new range ends in this range */ 4073 if (unlikely(wr_mas->r_max == ULONG_MAX)) 4074 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4075 4076 new_end++; 4077 } else { 4078 if (wr_mas->end_piv == mas->last) 4079 wr_mas->offset_end++; 4080 4081 new_end -= wr_mas->offset_end - offset - 1; 4082 } 4083 4084 /* new range starts within a range */ 4085 if (wr_mas->r_min < mas->index) 4086 new_end++; 4087 4088 /* Not enough room */ 4089 if (new_end >= node_slots) 4090 return false; 4091 4092 /* Not enough data. */ 4093 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4094 !(mas->mas_flags & MA_STATE_BULK)) 4095 return false; 4096 4097 /* set up node. */ 4098 if (in_rcu) { 4099 mas_node_count(mas, 1); 4100 if (mas_is_err(mas)) 4101 return false; 4102 4103 newnode = mas_pop_node(mas); 4104 } else { 4105 memset(&reuse, 0, sizeof(struct maple_node)); 4106 newnode = &reuse; 4107 } 4108 4109 newnode->parent = mas_mn(mas)->parent; 4110 dst_pivots = ma_pivots(newnode, wr_mas->type); 4111 dst_slots = ma_slots(newnode, wr_mas->type); 4112 /* Copy from start to insert point */ 4113 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); 4114 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); 4115 dst_offset = offset; 4116 4117 /* Handle insert of new range starting after old range */ 4118 if (wr_mas->r_min < mas->index) { 4119 mas->offset++; 4120 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); 4121 dst_pivots[dst_offset++] = mas->index - 1; 4122 } 4123 4124 /* Store the new entry and range end. */ 4125 if (dst_offset < max_piv) 4126 dst_pivots[dst_offset] = mas->last; 4127 mas->offset = dst_offset; 4128 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); 4129 4130 /* 4131 * this range wrote to the end of the node or it overwrote the rest of 4132 * the data 4133 */ 4134 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { 4135 new_end = dst_offset; 4136 goto done; 4137 } 4138 4139 dst_offset++; 4140 /* Copy to the end of node if necessary. */ 4141 copy_size = wr_mas->node_end - wr_mas->offset_end + 1; 4142 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, 4143 sizeof(void *) * copy_size); 4144 if (dst_offset < max_piv) { 4145 if (copy_size > max_piv - dst_offset) 4146 copy_size = max_piv - dst_offset; 4147 4148 memcpy(dst_pivots + dst_offset, 4149 wr_mas->pivots + wr_mas->offset_end, 4150 sizeof(unsigned long) * copy_size); 4151 } 4152 4153 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) 4154 dst_pivots[new_end] = mas->max; 4155 4156 done: 4157 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4158 if (in_rcu) { 4159 mas->node = mt_mk_node(newnode, wr_mas->type); 4160 mas_replace(mas, false); 4161 } else { 4162 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4163 } 4164 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4165 mas_update_gap(mas); 4166 return true; 4167 } 4168 4169 /* 4170 * mas_wr_slot_store: Attempt to store a value in a slot. 4171 * @wr_mas: the maple write state 4172 * 4173 * Return: True if stored, false otherwise 4174 */ 4175 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4176 { 4177 struct ma_state *mas = wr_mas->mas; 4178 unsigned long lmax; /* Logical max. */ 4179 unsigned char offset = mas->offset; 4180 4181 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || 4182 (offset != wr_mas->node_end))) 4183 return false; 4184 4185 if (offset == wr_mas->node_end - 1) 4186 lmax = mas->max; 4187 else 4188 lmax = wr_mas->pivots[offset + 1]; 4189 4190 /* going to overwrite too many slots. */ 4191 if (lmax < mas->last) 4192 return false; 4193 4194 if (wr_mas->r_min == mas->index) { 4195 /* overwriting two or more ranges with one. */ 4196 if (lmax == mas->last) 4197 return false; 4198 4199 /* Overwriting all of offset and a portion of offset + 1. */ 4200 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); 4201 wr_mas->pivots[offset] = mas->last; 4202 goto done; 4203 } 4204 4205 /* Doesn't end on the next range end. */ 4206 if (lmax != mas->last) 4207 return false; 4208 4209 /* Overwriting a portion of offset and all of offset + 1 */ 4210 if ((offset + 1 < mt_pivots[wr_mas->type]) && 4211 (wr_mas->entry || wr_mas->pivots[offset + 1])) 4212 wr_mas->pivots[offset + 1] = mas->last; 4213 4214 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); 4215 wr_mas->pivots[offset] = mas->index - 1; 4216 mas->offset++; /* Keep mas accurate. */ 4217 4218 done: 4219 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4220 mas_update_gap(mas); 4221 return true; 4222 } 4223 4224 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4225 { 4226 while ((wr_mas->mas->last > wr_mas->end_piv) && 4227 (wr_mas->offset_end < wr_mas->node_end)) 4228 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; 4229 4230 if (wr_mas->mas->last > wr_mas->end_piv) 4231 wr_mas->end_piv = wr_mas->mas->max; 4232 } 4233 4234 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4235 { 4236 struct ma_state *mas = wr_mas->mas; 4237 4238 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) 4239 mas->last = wr_mas->end_piv; 4240 4241 /* Check next slot(s) if we are overwriting the end */ 4242 if ((mas->last == wr_mas->end_piv) && 4243 (wr_mas->node_end != wr_mas->offset_end) && 4244 !wr_mas->slots[wr_mas->offset_end + 1]) { 4245 wr_mas->offset_end++; 4246 if (wr_mas->offset_end == wr_mas->node_end) 4247 mas->last = mas->max; 4248 else 4249 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4250 wr_mas->end_piv = mas->last; 4251 } 4252 4253 if (!wr_mas->content) { 4254 /* If this one is null, the next and prev are not */ 4255 mas->index = wr_mas->r_min; 4256 } else { 4257 /* Check prev slot if we are overwriting the start */ 4258 if (mas->index == wr_mas->r_min && mas->offset && 4259 !wr_mas->slots[mas->offset - 1]) { 4260 mas->offset--; 4261 wr_mas->r_min = mas->index = 4262 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4263 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4264 } 4265 } 4266 } 4267 4268 static inline bool mas_wr_append(struct ma_wr_state *wr_mas) 4269 { 4270 unsigned char end = wr_mas->node_end; 4271 unsigned char new_end = end + 1; 4272 struct ma_state *mas = wr_mas->mas; 4273 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4274 4275 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { 4276 if (new_end < node_pivots) 4277 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4278 4279 if (new_end < node_pivots) 4280 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4281 4282 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); 4283 mas->offset = new_end; 4284 wr_mas->pivots[end] = mas->index - 1; 4285 4286 return true; 4287 } 4288 4289 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { 4290 if (new_end < node_pivots) 4291 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4292 4293 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4294 if (new_end < node_pivots) 4295 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4296 4297 wr_mas->pivots[end] = mas->last; 4298 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4299 return true; 4300 } 4301 4302 return false; 4303 } 4304 4305 /* 4306 * mas_wr_bnode() - Slow path for a modification. 4307 * @wr_mas: The write maple state 4308 * 4309 * This is where split, rebalance end up. 4310 */ 4311 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4312 { 4313 struct maple_big_node b_node; 4314 4315 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4316 memset(&b_node, 0, sizeof(struct maple_big_node)); 4317 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4318 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4319 } 4320 4321 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4322 { 4323 unsigned char node_slots; 4324 unsigned char node_size; 4325 struct ma_state *mas = wr_mas->mas; 4326 4327 /* Direct replacement */ 4328 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4329 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4330 if (!!wr_mas->entry ^ !!wr_mas->content) 4331 mas_update_gap(mas); 4332 return; 4333 } 4334 4335 /* Attempt to append */ 4336 node_slots = mt_slots[wr_mas->type]; 4337 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; 4338 if (mas->max == ULONG_MAX) 4339 node_size++; 4340 4341 /* slot and node store will not fit, go to the slow path */ 4342 if (unlikely(node_size >= node_slots)) 4343 goto slow_path; 4344 4345 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && 4346 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { 4347 if (!wr_mas->content || !wr_mas->entry) 4348 mas_update_gap(mas); 4349 return; 4350 } 4351 4352 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) 4353 return; 4354 else if (mas_wr_node_store(wr_mas)) 4355 return; 4356 4357 if (mas_is_err(mas)) 4358 return; 4359 4360 slow_path: 4361 mas_wr_bnode(wr_mas); 4362 } 4363 4364 /* 4365 * mas_wr_store_entry() - Internal call to store a value 4366 * @mas: The maple state 4367 * @entry: The entry to store. 4368 * 4369 * Return: The contents that was stored at the index. 4370 */ 4371 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4372 { 4373 struct ma_state *mas = wr_mas->mas; 4374 4375 wr_mas->content = mas_start(mas); 4376 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4377 mas_store_root(mas, wr_mas->entry); 4378 return wr_mas->content; 4379 } 4380 4381 if (unlikely(!mas_wr_walk(wr_mas))) { 4382 mas_wr_spanning_store(wr_mas); 4383 return wr_mas->content; 4384 } 4385 4386 /* At this point, we are at the leaf node that needs to be altered. */ 4387 wr_mas->end_piv = wr_mas->r_max; 4388 mas_wr_end_piv(wr_mas); 4389 4390 if (!wr_mas->entry) 4391 mas_wr_extend_null(wr_mas); 4392 4393 /* New root for a single pointer */ 4394 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4395 mas_new_root(mas, wr_mas->entry); 4396 return wr_mas->content; 4397 } 4398 4399 mas_wr_modify(wr_mas); 4400 return wr_mas->content; 4401 } 4402 4403 /** 4404 * mas_insert() - Internal call to insert a value 4405 * @mas: The maple state 4406 * @entry: The entry to store 4407 * 4408 * Return: %NULL or the contents that already exists at the requested index 4409 * otherwise. The maple state needs to be checked for error conditions. 4410 */ 4411 static inline void *mas_insert(struct ma_state *mas, void *entry) 4412 { 4413 MA_WR_STATE(wr_mas, mas, entry); 4414 4415 /* 4416 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4417 * tree. If the insert fits exactly into an existing gap with a value 4418 * of NULL, then the slot only needs to be written with the new value. 4419 * If the range being inserted is adjacent to another range, then only a 4420 * single pivot needs to be inserted (as well as writing the entry). If 4421 * the new range is within a gap but does not touch any other ranges, 4422 * then two pivots need to be inserted: the start - 1, and the end. As 4423 * usual, the entry must be written. Most operations require a new node 4424 * to be allocated and replace an existing node to ensure RCU safety, 4425 * when in RCU mode. The exception to requiring a newly allocated node 4426 * is when inserting at the end of a node (appending). When done 4427 * carefully, appending can reuse the node in place. 4428 */ 4429 wr_mas.content = mas_start(mas); 4430 if (wr_mas.content) 4431 goto exists; 4432 4433 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4434 mas_store_root(mas, entry); 4435 return NULL; 4436 } 4437 4438 /* spanning writes always overwrite something */ 4439 if (!mas_wr_walk(&wr_mas)) 4440 goto exists; 4441 4442 /* At this point, we are at the leaf node that needs to be altered. */ 4443 wr_mas.offset_end = mas->offset; 4444 wr_mas.end_piv = wr_mas.r_max; 4445 4446 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4447 goto exists; 4448 4449 if (!entry) 4450 return NULL; 4451 4452 mas_wr_modify(&wr_mas); 4453 return wr_mas.content; 4454 4455 exists: 4456 mas_set_err(mas, -EEXIST); 4457 return wr_mas.content; 4458 4459 } 4460 4461 /* 4462 * mas_prev_node() - Find the prev non-null entry at the same level in the 4463 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4464 * @mas: The maple state 4465 * @min: The lower limit to search 4466 * 4467 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4468 * Return: 1 if the node is dead, 0 otherwise. 4469 */ 4470 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4471 { 4472 enum maple_type mt; 4473 int offset, level; 4474 void __rcu **slots; 4475 struct maple_node *node; 4476 struct maple_enode *enode; 4477 unsigned long *pivots; 4478 4479 if (mas_is_none(mas)) 4480 return 0; 4481 4482 level = 0; 4483 do { 4484 node = mas_mn(mas); 4485 if (ma_is_root(node)) 4486 goto no_entry; 4487 4488 /* Walk up. */ 4489 if (unlikely(mas_ascend(mas))) 4490 return 1; 4491 offset = mas->offset; 4492 level++; 4493 } while (!offset); 4494 4495 offset--; 4496 mt = mte_node_type(mas->node); 4497 node = mas_mn(mas); 4498 slots = ma_slots(node, mt); 4499 pivots = ma_pivots(node, mt); 4500 mas->max = pivots[offset]; 4501 if (offset) 4502 mas->min = pivots[offset - 1] + 1; 4503 if (unlikely(ma_dead_node(node))) 4504 return 1; 4505 4506 if (mas->max < min) 4507 goto no_entry_min; 4508 4509 while (level > 1) { 4510 level--; 4511 enode = mas_slot(mas, slots, offset); 4512 if (unlikely(ma_dead_node(node))) 4513 return 1; 4514 4515 mas->node = enode; 4516 mt = mte_node_type(mas->node); 4517 node = mas_mn(mas); 4518 slots = ma_slots(node, mt); 4519 pivots = ma_pivots(node, mt); 4520 offset = ma_data_end(node, mt, pivots, mas->max); 4521 if (offset) 4522 mas->min = pivots[offset - 1] + 1; 4523 4524 if (offset < mt_pivots[mt]) 4525 mas->max = pivots[offset]; 4526 4527 if (mas->max < min) 4528 goto no_entry; 4529 } 4530 4531 mas->node = mas_slot(mas, slots, offset); 4532 if (unlikely(ma_dead_node(node))) 4533 return 1; 4534 4535 mas->offset = mas_data_end(mas); 4536 if (unlikely(mte_dead_node(mas->node))) 4537 return 1; 4538 4539 return 0; 4540 4541 no_entry_min: 4542 mas->offset = offset; 4543 if (offset) 4544 mas->min = pivots[offset - 1] + 1; 4545 no_entry: 4546 if (unlikely(ma_dead_node(node))) 4547 return 1; 4548 4549 mas->node = MAS_NONE; 4550 return 0; 4551 } 4552 4553 /* 4554 * mas_next_node() - Get the next node at the same level in the tree. 4555 * @mas: The maple state 4556 * @max: The maximum pivot value to check. 4557 * 4558 * The next value will be mas->node[mas->offset] or MAS_NONE. 4559 * Return: 1 on dead node, 0 otherwise. 4560 */ 4561 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4562 unsigned long max) 4563 { 4564 unsigned long min, pivot; 4565 unsigned long *pivots; 4566 struct maple_enode *enode; 4567 int level = 0; 4568 unsigned char offset; 4569 enum maple_type mt; 4570 void __rcu **slots; 4571 4572 if (mas->max >= max) 4573 goto no_entry; 4574 4575 level = 0; 4576 do { 4577 if (ma_is_root(node)) 4578 goto no_entry; 4579 4580 min = mas->max + 1; 4581 if (min > max) 4582 goto no_entry; 4583 4584 if (unlikely(mas_ascend(mas))) 4585 return 1; 4586 4587 offset = mas->offset; 4588 level++; 4589 node = mas_mn(mas); 4590 mt = mte_node_type(mas->node); 4591 pivots = ma_pivots(node, mt); 4592 } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); 4593 4594 slots = ma_slots(node, mt); 4595 pivot = mas_safe_pivot(mas, pivots, ++offset, mt); 4596 while (unlikely(level > 1)) { 4597 /* Descend, if necessary */ 4598 enode = mas_slot(mas, slots, offset); 4599 if (unlikely(ma_dead_node(node))) 4600 return 1; 4601 4602 mas->node = enode; 4603 level--; 4604 node = mas_mn(mas); 4605 mt = mte_node_type(mas->node); 4606 slots = ma_slots(node, mt); 4607 pivots = ma_pivots(node, mt); 4608 offset = 0; 4609 pivot = pivots[0]; 4610 } 4611 4612 enode = mas_slot(mas, slots, offset); 4613 if (unlikely(ma_dead_node(node))) 4614 return 1; 4615 4616 mas->node = enode; 4617 mas->min = min; 4618 mas->max = pivot; 4619 return 0; 4620 4621 no_entry: 4622 if (unlikely(ma_dead_node(node))) 4623 return 1; 4624 4625 mas->node = MAS_NONE; 4626 return 0; 4627 } 4628 4629 /* 4630 * mas_next_nentry() - Get the next node entry 4631 * @mas: The maple state 4632 * @max: The maximum value to check 4633 * @*range_start: Pointer to store the start of the range. 4634 * 4635 * Sets @mas->offset to the offset of the next node entry, @mas->last to the 4636 * pivot of the entry. 4637 * 4638 * Return: The next entry, %NULL otherwise 4639 */ 4640 static inline void *mas_next_nentry(struct ma_state *mas, 4641 struct maple_node *node, unsigned long max, enum maple_type type) 4642 { 4643 unsigned char count; 4644 unsigned long pivot; 4645 unsigned long *pivots; 4646 void __rcu **slots; 4647 void *entry; 4648 4649 if (mas->last == mas->max) { 4650 mas->index = mas->max; 4651 return NULL; 4652 } 4653 4654 pivots = ma_pivots(node, type); 4655 slots = ma_slots(node, type); 4656 mas->index = mas_safe_min(mas, pivots, mas->offset); 4657 if (ma_dead_node(node)) 4658 return NULL; 4659 4660 if (mas->index > max) 4661 return NULL; 4662 4663 count = ma_data_end(node, type, pivots, mas->max); 4664 if (mas->offset > count) 4665 return NULL; 4666 4667 while (mas->offset < count) { 4668 pivot = pivots[mas->offset]; 4669 entry = mas_slot(mas, slots, mas->offset); 4670 if (ma_dead_node(node)) 4671 return NULL; 4672 4673 if (entry) 4674 goto found; 4675 4676 if (pivot >= max) 4677 return NULL; 4678 4679 mas->index = pivot + 1; 4680 mas->offset++; 4681 } 4682 4683 if (mas->index > mas->max) { 4684 mas->index = mas->last; 4685 return NULL; 4686 } 4687 4688 pivot = mas_safe_pivot(mas, pivots, mas->offset, type); 4689 entry = mas_slot(mas, slots, mas->offset); 4690 if (ma_dead_node(node)) 4691 return NULL; 4692 4693 if (!pivot) 4694 return NULL; 4695 4696 if (!entry) 4697 return NULL; 4698 4699 found: 4700 mas->last = pivot; 4701 return entry; 4702 } 4703 4704 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4705 { 4706 4707 retry: 4708 mas_set(mas, index); 4709 mas_state_walk(mas); 4710 if (mas_is_start(mas)) 4711 goto retry; 4712 4713 return; 4714 4715 } 4716 4717 /* 4718 * mas_next_entry() - Internal function to get the next entry. 4719 * @mas: The maple state 4720 * @limit: The maximum range start. 4721 * 4722 * Set the @mas->node to the next entry and the range_start to 4723 * the beginning value for the entry. Does not check beyond @limit. 4724 * Sets @mas->index and @mas->last to the limit if it is hit. 4725 * Restarts on dead nodes. 4726 * 4727 * Return: the next entry or %NULL. 4728 */ 4729 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4730 { 4731 void *entry = NULL; 4732 struct maple_enode *prev_node; 4733 struct maple_node *node; 4734 unsigned char offset; 4735 unsigned long last; 4736 enum maple_type mt; 4737 4738 last = mas->last; 4739 retry: 4740 offset = mas->offset; 4741 prev_node = mas->node; 4742 node = mas_mn(mas); 4743 mt = mte_node_type(mas->node); 4744 mas->offset++; 4745 if (unlikely(mas->offset >= mt_slots[mt])) { 4746 mas->offset = mt_slots[mt] - 1; 4747 goto next_node; 4748 } 4749 4750 while (!mas_is_none(mas)) { 4751 entry = mas_next_nentry(mas, node, limit, mt); 4752 if (unlikely(ma_dead_node(node))) { 4753 mas_rewalk(mas, last); 4754 goto retry; 4755 } 4756 4757 if (likely(entry)) 4758 return entry; 4759 4760 if (unlikely((mas->index > limit))) 4761 break; 4762 4763 next_node: 4764 prev_node = mas->node; 4765 offset = mas->offset; 4766 if (unlikely(mas_next_node(mas, node, limit))) { 4767 mas_rewalk(mas, last); 4768 goto retry; 4769 } 4770 mas->offset = 0; 4771 node = mas_mn(mas); 4772 mt = mte_node_type(mas->node); 4773 } 4774 4775 mas->index = mas->last = limit; 4776 mas->offset = offset; 4777 mas->node = prev_node; 4778 return NULL; 4779 } 4780 4781 /* 4782 * mas_prev_nentry() - Get the previous node entry. 4783 * @mas: The maple state. 4784 * @limit: The lower limit to check for a value. 4785 * 4786 * Return: the entry, %NULL otherwise. 4787 */ 4788 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, 4789 unsigned long index) 4790 { 4791 unsigned long pivot, min; 4792 unsigned char offset; 4793 struct maple_node *mn; 4794 enum maple_type mt; 4795 unsigned long *pivots; 4796 void __rcu **slots; 4797 void *entry; 4798 4799 retry: 4800 if (!mas->offset) 4801 return NULL; 4802 4803 mn = mas_mn(mas); 4804 mt = mte_node_type(mas->node); 4805 offset = mas->offset - 1; 4806 if (offset >= mt_slots[mt]) 4807 offset = mt_slots[mt] - 1; 4808 4809 slots = ma_slots(mn, mt); 4810 pivots = ma_pivots(mn, mt); 4811 if (offset == mt_pivots[mt]) 4812 pivot = mas->max; 4813 else 4814 pivot = pivots[offset]; 4815 4816 if (unlikely(ma_dead_node(mn))) { 4817 mas_rewalk(mas, index); 4818 goto retry; 4819 } 4820 4821 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || 4822 !pivot)) 4823 pivot = pivots[--offset]; 4824 4825 min = mas_safe_min(mas, pivots, offset); 4826 entry = mas_slot(mas, slots, offset); 4827 if (unlikely(ma_dead_node(mn))) { 4828 mas_rewalk(mas, index); 4829 goto retry; 4830 } 4831 4832 if (likely(entry)) { 4833 mas->offset = offset; 4834 mas->last = pivot; 4835 mas->index = min; 4836 } 4837 return entry; 4838 } 4839 4840 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) 4841 { 4842 void *entry; 4843 4844 retry: 4845 while (likely(!mas_is_none(mas))) { 4846 entry = mas_prev_nentry(mas, min, mas->index); 4847 if (unlikely(mas->last < min)) 4848 goto not_found; 4849 4850 if (likely(entry)) 4851 return entry; 4852 4853 if (unlikely(mas_prev_node(mas, min))) { 4854 mas_rewalk(mas, mas->index); 4855 goto retry; 4856 } 4857 4858 mas->offset++; 4859 } 4860 4861 mas->offset--; 4862 not_found: 4863 mas->index = mas->last = min; 4864 return NULL; 4865 } 4866 4867 /* 4868 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4869 * highest gap address of a given size in a given node and descend. 4870 * @mas: The maple state 4871 * @size: The needed size. 4872 * 4873 * Return: True if found in a leaf, false otherwise. 4874 * 4875 */ 4876 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) 4877 { 4878 enum maple_type type = mte_node_type(mas->node); 4879 struct maple_node *node = mas_mn(mas); 4880 unsigned long *pivots, *gaps; 4881 void __rcu **slots; 4882 unsigned long gap = 0; 4883 unsigned long max, min, index; 4884 unsigned char offset; 4885 4886 if (unlikely(mas_is_err(mas))) 4887 return true; 4888 4889 if (ma_is_dense(type)) { 4890 /* dense nodes. */ 4891 mas->offset = (unsigned char)(mas->index - mas->min); 4892 return true; 4893 } 4894 4895 pivots = ma_pivots(node, type); 4896 slots = ma_slots(node, type); 4897 gaps = ma_gaps(node, type); 4898 offset = mas->offset; 4899 min = mas_safe_min(mas, pivots, offset); 4900 /* Skip out of bounds. */ 4901 while (mas->last < min) 4902 min = mas_safe_min(mas, pivots, --offset); 4903 4904 max = mas_safe_pivot(mas, pivots, offset, type); 4905 index = mas->index; 4906 while (index <= max) { 4907 gap = 0; 4908 if (gaps) 4909 gap = gaps[offset]; 4910 else if (!mas_slot(mas, slots, offset)) 4911 gap = max - min + 1; 4912 4913 if (gap) { 4914 if ((size <= gap) && (size <= mas->last - min + 1)) 4915 break; 4916 4917 if (!gaps) { 4918 /* Skip the next slot, it cannot be a gap. */ 4919 if (offset < 2) 4920 goto ascend; 4921 4922 offset -= 2; 4923 max = pivots[offset]; 4924 min = mas_safe_min(mas, pivots, offset); 4925 continue; 4926 } 4927 } 4928 4929 if (!offset) 4930 goto ascend; 4931 4932 offset--; 4933 max = min - 1; 4934 min = mas_safe_min(mas, pivots, offset); 4935 } 4936 4937 if (unlikely(index > max)) { 4938 mas_set_err(mas, -EBUSY); 4939 return false; 4940 } 4941 4942 if (unlikely(ma_is_leaf(type))) { 4943 mas->offset = offset; 4944 mas->min = min; 4945 mas->max = min + gap - 1; 4946 return true; 4947 } 4948 4949 /* descend, only happens under lock. */ 4950 mas->node = mas_slot(mas, slots, offset); 4951 mas->min = min; 4952 mas->max = max; 4953 mas->offset = mas_data_end(mas); 4954 return false; 4955 4956 ascend: 4957 if (mte_is_root(mas->node)) 4958 mas_set_err(mas, -EBUSY); 4959 4960 return false; 4961 } 4962 4963 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4964 { 4965 enum maple_type type = mte_node_type(mas->node); 4966 unsigned long pivot, min, gap = 0; 4967 unsigned char offset; 4968 unsigned long *gaps; 4969 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 4970 void __rcu **slots = ma_slots(mas_mn(mas), type); 4971 bool found = false; 4972 4973 if (ma_is_dense(type)) { 4974 mas->offset = (unsigned char)(mas->index - mas->min); 4975 return true; 4976 } 4977 4978 gaps = ma_gaps(mte_to_node(mas->node), type); 4979 offset = mas->offset; 4980 min = mas_safe_min(mas, pivots, offset); 4981 for (; offset < mt_slots[type]; offset++) { 4982 pivot = mas_safe_pivot(mas, pivots, offset, type); 4983 if (offset && !pivot) 4984 break; 4985 4986 /* Not within lower bounds */ 4987 if (mas->index > pivot) 4988 goto next_slot; 4989 4990 if (gaps) 4991 gap = gaps[offset]; 4992 else if (!mas_slot(mas, slots, offset)) 4993 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 4994 else 4995 goto next_slot; 4996 4997 if (gap >= size) { 4998 if (ma_is_leaf(type)) { 4999 found = true; 5000 goto done; 5001 } 5002 if (mas->index <= pivot) { 5003 mas->node = mas_slot(mas, slots, offset); 5004 mas->min = min; 5005 mas->max = pivot; 5006 offset = 0; 5007 break; 5008 } 5009 } 5010 next_slot: 5011 min = pivot + 1; 5012 if (mas->last <= pivot) { 5013 mas_set_err(mas, -EBUSY); 5014 return true; 5015 } 5016 } 5017 5018 if (mte_is_root(mas->node)) 5019 found = true; 5020 done: 5021 mas->offset = offset; 5022 return found; 5023 } 5024 5025 /** 5026 * mas_walk() - Search for @mas->index in the tree. 5027 * @mas: The maple state. 5028 * 5029 * mas->index and mas->last will be set to the range if there is a value. If 5030 * mas->node is MAS_NONE, reset to MAS_START. 5031 * 5032 * Return: the entry at the location or %NULL. 5033 */ 5034 void *mas_walk(struct ma_state *mas) 5035 { 5036 void *entry; 5037 5038 retry: 5039 entry = mas_state_walk(mas); 5040 if (mas_is_start(mas)) 5041 goto retry; 5042 5043 if (mas_is_ptr(mas)) { 5044 if (!mas->index) { 5045 mas->last = 0; 5046 } else { 5047 mas->index = 1; 5048 mas->last = ULONG_MAX; 5049 } 5050 return entry; 5051 } 5052 5053 if (mas_is_none(mas)) { 5054 mas->index = 0; 5055 mas->last = ULONG_MAX; 5056 } 5057 5058 return entry; 5059 } 5060 EXPORT_SYMBOL_GPL(mas_walk); 5061 5062 static inline bool mas_rewind_node(struct ma_state *mas) 5063 { 5064 unsigned char slot; 5065 5066 do { 5067 if (mte_is_root(mas->node)) { 5068 slot = mas->offset; 5069 if (!slot) 5070 return false; 5071 } else { 5072 mas_ascend(mas); 5073 slot = mas->offset; 5074 } 5075 } while (!slot); 5076 5077 mas->offset = --slot; 5078 return true; 5079 } 5080 5081 /* 5082 * mas_skip_node() - Internal function. Skip over a node. 5083 * @mas: The maple state. 5084 * 5085 * Return: true if there is another node, false otherwise. 5086 */ 5087 static inline bool mas_skip_node(struct ma_state *mas) 5088 { 5089 unsigned char slot, slot_count; 5090 unsigned long *pivots; 5091 enum maple_type mt; 5092 5093 mt = mte_node_type(mas->node); 5094 slot_count = mt_slots[mt] - 1; 5095 do { 5096 if (mte_is_root(mas->node)) { 5097 slot = mas->offset; 5098 if (slot > slot_count) { 5099 mas_set_err(mas, -EBUSY); 5100 return false; 5101 } 5102 } else { 5103 mas_ascend(mas); 5104 slot = mas->offset; 5105 mt = mte_node_type(mas->node); 5106 slot_count = mt_slots[mt] - 1; 5107 } 5108 } while (slot > slot_count); 5109 5110 mas->offset = ++slot; 5111 pivots = ma_pivots(mas_mn(mas), mt); 5112 if (slot > 0) 5113 mas->min = pivots[slot - 1] + 1; 5114 5115 if (slot <= slot_count) 5116 mas->max = pivots[slot]; 5117 5118 return true; 5119 } 5120 5121 /* 5122 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5123 * @size 5124 * @mas: The maple state 5125 * @size: The size of the gap required 5126 * 5127 * Search between @mas->index and @mas->last for a gap of @size. 5128 */ 5129 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5130 { 5131 struct maple_enode *last = NULL; 5132 5133 /* 5134 * There are 4 options: 5135 * go to child (descend) 5136 * go back to parent (ascend) 5137 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5138 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5139 */ 5140 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5141 if (last == mas->node) 5142 mas_skip_node(mas); 5143 else 5144 last = mas->node; 5145 } 5146 } 5147 5148 /* 5149 * mas_fill_gap() - Fill a located gap with @entry. 5150 * @mas: The maple state 5151 * @entry: The value to store 5152 * @slot: The offset into the node to store the @entry 5153 * @size: The size of the entry 5154 * @index: The start location 5155 */ 5156 static inline void mas_fill_gap(struct ma_state *mas, void *entry, 5157 unsigned char slot, unsigned long size, unsigned long *index) 5158 { 5159 MA_WR_STATE(wr_mas, mas, entry); 5160 unsigned char pslot = mte_parent_slot(mas->node); 5161 struct maple_enode *mn = mas->node; 5162 unsigned long *pivots; 5163 enum maple_type ptype; 5164 /* 5165 * mas->index is the start address for the search 5166 * which may no longer be needed. 5167 * mas->last is the end address for the search 5168 */ 5169 5170 *index = mas->index; 5171 mas->last = mas->index + size - 1; 5172 5173 /* 5174 * It is possible that using mas->max and mas->min to correctly 5175 * calculate the index and last will cause an issue in the gap 5176 * calculation, so fix the ma_state here 5177 */ 5178 mas_ascend(mas); 5179 ptype = mte_node_type(mas->node); 5180 pivots = ma_pivots(mas_mn(mas), ptype); 5181 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); 5182 mas->min = mas_safe_min(mas, pivots, pslot); 5183 mas->node = mn; 5184 mas->offset = slot; 5185 mas_wr_store_entry(&wr_mas); 5186 } 5187 5188 /* 5189 * mas_sparse_area() - Internal function. Return upper or lower limit when 5190 * searching for a gap in an empty tree. 5191 * @mas: The maple state 5192 * @min: the minimum range 5193 * @max: The maximum range 5194 * @size: The size of the gap 5195 * @fwd: Searching forward or back 5196 */ 5197 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, 5198 unsigned long max, unsigned long size, bool fwd) 5199 { 5200 unsigned long start = 0; 5201 5202 if (!unlikely(mas_is_none(mas))) 5203 start++; 5204 /* mas_is_ptr */ 5205 5206 if (start < min) 5207 start = min; 5208 5209 if (fwd) { 5210 mas->index = start; 5211 mas->last = start + size - 1; 5212 return; 5213 } 5214 5215 mas->index = max; 5216 } 5217 5218 /* 5219 * mas_empty_area() - Get the lowest address within the range that is 5220 * sufficient for the size requested. 5221 * @mas: The maple state 5222 * @min: The lowest value of the range 5223 * @max: The highest value of the range 5224 * @size: The size needed 5225 */ 5226 int mas_empty_area(struct ma_state *mas, unsigned long min, 5227 unsigned long max, unsigned long size) 5228 { 5229 unsigned char offset; 5230 unsigned long *pivots; 5231 enum maple_type mt; 5232 5233 if (mas_is_start(mas)) 5234 mas_start(mas); 5235 else if (mas->offset >= 2) 5236 mas->offset -= 2; 5237 else if (!mas_skip_node(mas)) 5238 return -EBUSY; 5239 5240 /* Empty set */ 5241 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5242 mas_sparse_area(mas, min, max, size, true); 5243 return 0; 5244 } 5245 5246 /* The start of the window can only be within these values */ 5247 mas->index = min; 5248 mas->last = max; 5249 mas_awalk(mas, size); 5250 5251 if (unlikely(mas_is_err(mas))) 5252 return xa_err(mas->node); 5253 5254 offset = mas->offset; 5255 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5256 return -EBUSY; 5257 5258 mt = mte_node_type(mas->node); 5259 pivots = ma_pivots(mas_mn(mas), mt); 5260 if (offset) 5261 mas->min = pivots[offset - 1] + 1; 5262 5263 if (offset < mt_pivots[mt]) 5264 mas->max = pivots[offset]; 5265 5266 if (mas->index < mas->min) 5267 mas->index = mas->min; 5268 5269 mas->last = mas->index + size - 1; 5270 return 0; 5271 } 5272 EXPORT_SYMBOL_GPL(mas_empty_area); 5273 5274 /* 5275 * mas_empty_area_rev() - Get the highest address within the range that is 5276 * sufficient for the size requested. 5277 * @mas: The maple state 5278 * @min: The lowest value of the range 5279 * @max: The highest value of the range 5280 * @size: The size needed 5281 */ 5282 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5283 unsigned long max, unsigned long size) 5284 { 5285 struct maple_enode *last = mas->node; 5286 5287 if (mas_is_start(mas)) { 5288 mas_start(mas); 5289 mas->offset = mas_data_end(mas); 5290 } else if (mas->offset >= 2) { 5291 mas->offset -= 2; 5292 } else if (!mas_rewind_node(mas)) { 5293 return -EBUSY; 5294 } 5295 5296 /* Empty set. */ 5297 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5298 mas_sparse_area(mas, min, max, size, false); 5299 return 0; 5300 } 5301 5302 /* The start of the window can only be within these values. */ 5303 mas->index = min; 5304 mas->last = max; 5305 5306 while (!mas_rev_awalk(mas, size)) { 5307 if (last == mas->node) { 5308 if (!mas_rewind_node(mas)) 5309 return -EBUSY; 5310 } else { 5311 last = mas->node; 5312 } 5313 } 5314 5315 if (mas_is_err(mas)) 5316 return xa_err(mas->node); 5317 5318 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5319 return -EBUSY; 5320 5321 /* 5322 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If 5323 * the maximum is outside the window we are searching, then use the last 5324 * location in the search. 5325 * mas->max and mas->min is the range of the gap. 5326 * mas->index and mas->last are currently set to the search range. 5327 */ 5328 5329 /* Trim the upper limit to the max. */ 5330 if (mas->max <= mas->last) 5331 mas->last = mas->max; 5332 5333 mas->index = mas->last - size + 1; 5334 return 0; 5335 } 5336 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5337 5338 static inline int mas_alloc(struct ma_state *mas, void *entry, 5339 unsigned long size, unsigned long *index) 5340 { 5341 unsigned long min; 5342 5343 mas_start(mas); 5344 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5345 mas_root_expand(mas, entry); 5346 if (mas_is_err(mas)) 5347 return xa_err(mas->node); 5348 5349 if (!mas->index) 5350 return mte_pivot(mas->node, 0); 5351 return mte_pivot(mas->node, 1); 5352 } 5353 5354 /* Must be walking a tree. */ 5355 mas_awalk(mas, size); 5356 if (mas_is_err(mas)) 5357 return xa_err(mas->node); 5358 5359 if (mas->offset == MAPLE_NODE_SLOTS) 5360 goto no_gap; 5361 5362 /* 5363 * At this point, mas->node points to the right node and we have an 5364 * offset that has a sufficient gap. 5365 */ 5366 min = mas->min; 5367 if (mas->offset) 5368 min = mte_pivot(mas->node, mas->offset - 1) + 1; 5369 5370 if (mas->index < min) 5371 mas->index = min; 5372 5373 mas_fill_gap(mas, entry, mas->offset, size, index); 5374 return 0; 5375 5376 no_gap: 5377 return -EBUSY; 5378 } 5379 5380 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, 5381 unsigned long max, void *entry, 5382 unsigned long size, unsigned long *index) 5383 { 5384 int ret = 0; 5385 5386 ret = mas_empty_area_rev(mas, min, max, size); 5387 if (ret) 5388 return ret; 5389 5390 if (mas_is_err(mas)) 5391 return xa_err(mas->node); 5392 5393 if (mas->offset == MAPLE_NODE_SLOTS) 5394 goto no_gap; 5395 5396 mas_fill_gap(mas, entry, mas->offset, size, index); 5397 return 0; 5398 5399 no_gap: 5400 return -EBUSY; 5401 } 5402 5403 /* 5404 * mas_dead_leaves() - Mark all leaves of a node as dead. 5405 * @mas: The maple state 5406 * @slots: Pointer to the slot array 5407 * 5408 * Must hold the write lock. 5409 * 5410 * Return: The number of leaves marked as dead. 5411 */ 5412 static inline 5413 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots) 5414 { 5415 struct maple_node *node; 5416 enum maple_type type; 5417 void *entry; 5418 int offset; 5419 5420 for (offset = 0; offset < mt_slot_count(mas->node); offset++) { 5421 entry = mas_slot_locked(mas, slots, offset); 5422 type = mte_node_type(entry); 5423 node = mte_to_node(entry); 5424 /* Use both node and type to catch LE & BE metadata */ 5425 if (!node || !type) 5426 break; 5427 5428 mte_set_node_dead(entry); 5429 smp_wmb(); /* Needed for RCU */ 5430 node->type = type; 5431 rcu_assign_pointer(slots[offset], node); 5432 } 5433 5434 return offset; 5435 } 5436 5437 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset) 5438 { 5439 struct maple_node *node, *next; 5440 void __rcu **slots = NULL; 5441 5442 next = mas_mn(mas); 5443 do { 5444 mas->node = ma_enode_ptr(next); 5445 node = mas_mn(mas); 5446 slots = ma_slots(node, node->type); 5447 next = mas_slot_locked(mas, slots, offset); 5448 offset = 0; 5449 } while (!ma_is_leaf(next->type)); 5450 5451 return slots; 5452 } 5453 5454 static void mt_free_walk(struct rcu_head *head) 5455 { 5456 void __rcu **slots; 5457 struct maple_node *node, *start; 5458 struct maple_tree mt; 5459 unsigned char offset; 5460 enum maple_type type; 5461 MA_STATE(mas, &mt, 0, 0); 5462 5463 node = container_of(head, struct maple_node, rcu); 5464 5465 if (ma_is_leaf(node->type)) 5466 goto free_leaf; 5467 5468 mt_init_flags(&mt, node->ma_flags); 5469 mas_lock(&mas); 5470 start = node; 5471 mas.node = mt_mk_node(node, node->type); 5472 slots = mas_dead_walk(&mas, 0); 5473 node = mas_mn(&mas); 5474 do { 5475 mt_free_bulk(node->slot_len, slots); 5476 offset = node->parent_slot + 1; 5477 mas.node = node->piv_parent; 5478 if (mas_mn(&mas) == node) 5479 goto start_slots_free; 5480 5481 type = mte_node_type(mas.node); 5482 slots = ma_slots(mte_to_node(mas.node), type); 5483 if ((offset < mt_slots[type]) && (slots[offset])) 5484 slots = mas_dead_walk(&mas, offset); 5485 5486 node = mas_mn(&mas); 5487 } while ((node != start) || (node->slot_len < offset)); 5488 5489 slots = ma_slots(node, node->type); 5490 mt_free_bulk(node->slot_len, slots); 5491 5492 start_slots_free: 5493 mas_unlock(&mas); 5494 free_leaf: 5495 mt_free_rcu(&node->rcu); 5496 } 5497 5498 static inline void __rcu **mas_destroy_descend(struct ma_state *mas, 5499 struct maple_enode *prev, unsigned char offset) 5500 { 5501 struct maple_node *node; 5502 struct maple_enode *next = mas->node; 5503 void __rcu **slots = NULL; 5504 5505 do { 5506 mas->node = next; 5507 node = mas_mn(mas); 5508 slots = ma_slots(node, mte_node_type(mas->node)); 5509 next = mas_slot_locked(mas, slots, 0); 5510 if ((mte_dead_node(next))) 5511 next = mas_slot_locked(mas, slots, 1); 5512 5513 mte_set_node_dead(mas->node); 5514 node->type = mte_node_type(mas->node); 5515 node->piv_parent = prev; 5516 node->parent_slot = offset; 5517 offset = 0; 5518 prev = mas->node; 5519 } while (!mte_is_leaf(next)); 5520 5521 return slots; 5522 } 5523 5524 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags, 5525 bool free) 5526 { 5527 void __rcu **slots; 5528 struct maple_node *node = mte_to_node(enode); 5529 struct maple_enode *start; 5530 struct maple_tree mt; 5531 5532 MA_STATE(mas, &mt, 0, 0); 5533 5534 if (mte_is_leaf(enode)) 5535 goto free_leaf; 5536 5537 mt_init_flags(&mt, ma_flags); 5538 mas_lock(&mas); 5539 5540 mas.node = start = enode; 5541 slots = mas_destroy_descend(&mas, start, 0); 5542 node = mas_mn(&mas); 5543 do { 5544 enum maple_type type; 5545 unsigned char offset; 5546 struct maple_enode *parent, *tmp; 5547 5548 node->slot_len = mas_dead_leaves(&mas, slots); 5549 if (free) 5550 mt_free_bulk(node->slot_len, slots); 5551 offset = node->parent_slot + 1; 5552 mas.node = node->piv_parent; 5553 if (mas_mn(&mas) == node) 5554 goto start_slots_free; 5555 5556 type = mte_node_type(mas.node); 5557 slots = ma_slots(mte_to_node(mas.node), type); 5558 if (offset >= mt_slots[type]) 5559 goto next; 5560 5561 tmp = mas_slot_locked(&mas, slots, offset); 5562 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5563 parent = mas.node; 5564 mas.node = tmp; 5565 slots = mas_destroy_descend(&mas, parent, offset); 5566 } 5567 next: 5568 node = mas_mn(&mas); 5569 } while (start != mas.node); 5570 5571 node = mas_mn(&mas); 5572 node->slot_len = mas_dead_leaves(&mas, slots); 5573 if (free) 5574 mt_free_bulk(node->slot_len, slots); 5575 5576 start_slots_free: 5577 mas_unlock(&mas); 5578 5579 free_leaf: 5580 if (free) 5581 mt_free_rcu(&node->rcu); 5582 } 5583 5584 /* 5585 * mte_destroy_walk() - Free a tree or sub-tree. 5586 * @enode - the encoded maple node (maple_enode) to start 5587 * @mn - the tree to free - needed for node types. 5588 * 5589 * Must hold the write lock. 5590 */ 5591 static inline void mte_destroy_walk(struct maple_enode *enode, 5592 struct maple_tree *mt) 5593 { 5594 struct maple_node *node = mte_to_node(enode); 5595 5596 if (mt_in_rcu(mt)) { 5597 mt_destroy_walk(enode, mt->ma_flags, false); 5598 call_rcu(&node->rcu, mt_free_walk); 5599 } else { 5600 mt_destroy_walk(enode, mt->ma_flags, true); 5601 } 5602 } 5603 5604 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5605 { 5606 if (!mas_is_start(wr_mas->mas)) { 5607 if (mas_is_none(wr_mas->mas)) { 5608 mas_reset(wr_mas->mas); 5609 } else { 5610 wr_mas->r_max = wr_mas->mas->max; 5611 wr_mas->type = mte_node_type(wr_mas->mas->node); 5612 if (mas_is_span_wr(wr_mas)) 5613 mas_reset(wr_mas->mas); 5614 } 5615 } 5616 5617 } 5618 5619 /* Interface */ 5620 5621 /** 5622 * mas_store() - Store an @entry. 5623 * @mas: The maple state. 5624 * @entry: The entry to store. 5625 * 5626 * The @mas->index and @mas->last is used to set the range for the @entry. 5627 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5628 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5629 * 5630 * Return: the first entry between mas->index and mas->last or %NULL. 5631 */ 5632 void *mas_store(struct ma_state *mas, void *entry) 5633 { 5634 MA_WR_STATE(wr_mas, mas, entry); 5635 5636 trace_ma_write(__func__, mas, 0, entry); 5637 #ifdef CONFIG_DEBUG_MAPLE_TREE 5638 if (mas->index > mas->last) 5639 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); 5640 MT_BUG_ON(mas->tree, mas->index > mas->last); 5641 if (mas->index > mas->last) { 5642 mas_set_err(mas, -EINVAL); 5643 return NULL; 5644 } 5645 5646 #endif 5647 5648 /* 5649 * Storing is the same operation as insert with the added caveat that it 5650 * can overwrite entries. Although this seems simple enough, one may 5651 * want to examine what happens if a single store operation was to 5652 * overwrite multiple entries within a self-balancing B-Tree. 5653 */ 5654 mas_wr_store_setup(&wr_mas); 5655 mas_wr_store_entry(&wr_mas); 5656 return wr_mas.content; 5657 } 5658 EXPORT_SYMBOL_GPL(mas_store); 5659 5660 /** 5661 * mas_store_gfp() - Store a value into the tree. 5662 * @mas: The maple state 5663 * @entry: The entry to store 5664 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5665 * 5666 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5667 * be allocated. 5668 */ 5669 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5670 { 5671 MA_WR_STATE(wr_mas, mas, entry); 5672 5673 mas_wr_store_setup(&wr_mas); 5674 trace_ma_write(__func__, mas, 0, entry); 5675 retry: 5676 mas_wr_store_entry(&wr_mas); 5677 if (unlikely(mas_nomem(mas, gfp))) 5678 goto retry; 5679 5680 if (unlikely(mas_is_err(mas))) 5681 return xa_err(mas->node); 5682 5683 return 0; 5684 } 5685 EXPORT_SYMBOL_GPL(mas_store_gfp); 5686 5687 /** 5688 * mas_store_prealloc() - Store a value into the tree using memory 5689 * preallocated in the maple state. 5690 * @mas: The maple state 5691 * @entry: The entry to store. 5692 */ 5693 void mas_store_prealloc(struct ma_state *mas, void *entry) 5694 { 5695 MA_WR_STATE(wr_mas, mas, entry); 5696 5697 mas_wr_store_setup(&wr_mas); 5698 trace_ma_write(__func__, mas, 0, entry); 5699 mas_wr_store_entry(&wr_mas); 5700 BUG_ON(mas_is_err(mas)); 5701 mas_destroy(mas); 5702 } 5703 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5704 5705 /** 5706 * mas_preallocate() - Preallocate enough nodes for a store operation 5707 * @mas: The maple state 5708 * @entry: The entry that will be stored 5709 * @gfp: The GFP_FLAGS to use for allocations. 5710 * 5711 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5712 */ 5713 int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp) 5714 { 5715 int ret; 5716 5717 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5718 mas->mas_flags |= MA_STATE_PREALLOC; 5719 if (likely(!mas_is_err(mas))) 5720 return 0; 5721 5722 mas_set_alloc_req(mas, 0); 5723 ret = xa_err(mas->node); 5724 mas_reset(mas); 5725 mas_destroy(mas); 5726 mas_reset(mas); 5727 return ret; 5728 } 5729 5730 /* 5731 * mas_destroy() - destroy a maple state. 5732 * @mas: The maple state 5733 * 5734 * Upon completion, check the left-most node and rebalance against the node to 5735 * the right if necessary. Frees any allocated nodes associated with this maple 5736 * state. 5737 */ 5738 void mas_destroy(struct ma_state *mas) 5739 { 5740 struct maple_alloc *node; 5741 5742 /* 5743 * When using mas_for_each() to insert an expected number of elements, 5744 * it is possible that the number inserted is less than the expected 5745 * number. To fix an invalid final node, a check is performed here to 5746 * rebalance the previous node with the final node. 5747 */ 5748 if (mas->mas_flags & MA_STATE_REBALANCE) { 5749 unsigned char end; 5750 5751 if (mas_is_start(mas)) 5752 mas_start(mas); 5753 5754 mtree_range_walk(mas); 5755 end = mas_data_end(mas) + 1; 5756 if (end < mt_min_slot_count(mas->node) - 1) 5757 mas_destroy_rebalance(mas, end); 5758 5759 mas->mas_flags &= ~MA_STATE_REBALANCE; 5760 } 5761 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5762 5763 while (mas->alloc && !((unsigned long)mas->alloc & 0x1)) { 5764 node = mas->alloc; 5765 mas->alloc = node->slot[0]; 5766 if (node->node_count > 0) 5767 mt_free_bulk(node->node_count, 5768 (void __rcu **)&node->slot[1]); 5769 kmem_cache_free(maple_node_cache, node); 5770 } 5771 mas->alloc = NULL; 5772 } 5773 EXPORT_SYMBOL_GPL(mas_destroy); 5774 5775 /* 5776 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5777 * @mas: The maple state 5778 * @nr_entries: The number of expected entries. 5779 * 5780 * This will attempt to pre-allocate enough nodes to store the expected number 5781 * of entries. The allocations will occur using the bulk allocator interface 5782 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5783 * to ensure any unused nodes are freed. 5784 * 5785 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5786 */ 5787 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5788 { 5789 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5790 struct maple_enode *enode = mas->node; 5791 int nr_nodes; 5792 int ret; 5793 5794 /* 5795 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5796 * forking a process and duplicating the VMAs from one tree to a new 5797 * tree. When such a situation arises, it is known that the new tree is 5798 * not going to be used until the entire tree is populated. For 5799 * performance reasons, it is best to use a bulk load with RCU disabled. 5800 * This allows for optimistic splitting that favours the left and reuse 5801 * of nodes during the operation. 5802 */ 5803 5804 /* Optimize splitting for bulk insert in-order */ 5805 mas->mas_flags |= MA_STATE_BULK; 5806 5807 /* 5808 * Avoid overflow, assume a gap between each entry and a trailing null. 5809 * If this is wrong, it just means allocation can happen during 5810 * insertion of entries. 5811 */ 5812 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5813 if (!mt_is_alloc(mas->tree)) 5814 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5815 5816 /* Leaves; reduce slots to keep space for expansion */ 5817 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5818 /* Internal nodes */ 5819 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5820 /* Add working room for split (2 nodes) + new parents */ 5821 mas_node_count(mas, nr_nodes + 3); 5822 5823 /* Detect if allocations run out */ 5824 mas->mas_flags |= MA_STATE_PREALLOC; 5825 5826 if (!mas_is_err(mas)) 5827 return 0; 5828 5829 ret = xa_err(mas->node); 5830 mas->node = enode; 5831 mas_destroy(mas); 5832 return ret; 5833 5834 } 5835 EXPORT_SYMBOL_GPL(mas_expected_entries); 5836 5837 /** 5838 * mas_next() - Get the next entry. 5839 * @mas: The maple state 5840 * @max: The maximum index to check. 5841 * 5842 * Returns the next entry after @mas->index. 5843 * Must hold rcu_read_lock or the write lock. 5844 * Can return the zero entry. 5845 * 5846 * Return: The next entry or %NULL 5847 */ 5848 void *mas_next(struct ma_state *mas, unsigned long max) 5849 { 5850 if (mas_is_none(mas) || mas_is_paused(mas)) 5851 mas->node = MAS_START; 5852 5853 if (mas_is_start(mas)) 5854 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5855 5856 if (mas_is_ptr(mas)) { 5857 if (!mas->index) { 5858 mas->index = 1; 5859 mas->last = ULONG_MAX; 5860 } 5861 return NULL; 5862 } 5863 5864 if (mas->last == ULONG_MAX) 5865 return NULL; 5866 5867 /* Retries on dead nodes handled by mas_next_entry */ 5868 return mas_next_entry(mas, max); 5869 } 5870 EXPORT_SYMBOL_GPL(mas_next); 5871 5872 /** 5873 * mt_next() - get the next value in the maple tree 5874 * @mt: The maple tree 5875 * @index: The start index 5876 * @max: The maximum index to check 5877 * 5878 * Return: The entry at @index or higher, or %NULL if nothing is found. 5879 */ 5880 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5881 { 5882 void *entry = NULL; 5883 MA_STATE(mas, mt, index, index); 5884 5885 rcu_read_lock(); 5886 entry = mas_next(&mas, max); 5887 rcu_read_unlock(); 5888 return entry; 5889 } 5890 EXPORT_SYMBOL_GPL(mt_next); 5891 5892 /** 5893 * mas_prev() - Get the previous entry 5894 * @mas: The maple state 5895 * @min: The minimum value to check. 5896 * 5897 * Must hold rcu_read_lock or the write lock. 5898 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5899 * searchable nodes. 5900 * 5901 * Return: the previous value or %NULL. 5902 */ 5903 void *mas_prev(struct ma_state *mas, unsigned long min) 5904 { 5905 if (!mas->index) { 5906 /* Nothing comes before 0 */ 5907 mas->last = 0; 5908 return NULL; 5909 } 5910 5911 if (unlikely(mas_is_ptr(mas))) 5912 return NULL; 5913 5914 if (mas_is_none(mas) || mas_is_paused(mas)) 5915 mas->node = MAS_START; 5916 5917 if (mas_is_start(mas)) { 5918 mas_walk(mas); 5919 if (!mas->index) 5920 return NULL; 5921 } 5922 5923 if (mas_is_ptr(mas)) { 5924 if (!mas->index) { 5925 mas->last = 0; 5926 return NULL; 5927 } 5928 5929 mas->index = mas->last = 0; 5930 return mas_root_locked(mas); 5931 } 5932 return mas_prev_entry(mas, min); 5933 } 5934 EXPORT_SYMBOL_GPL(mas_prev); 5935 5936 /** 5937 * mt_prev() - get the previous value in the maple tree 5938 * @mt: The maple tree 5939 * @index: The start index 5940 * @min: The minimum index to check 5941 * 5942 * Return: The entry at @index or lower, or %NULL if nothing is found. 5943 */ 5944 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5945 { 5946 void *entry = NULL; 5947 MA_STATE(mas, mt, index, index); 5948 5949 rcu_read_lock(); 5950 entry = mas_prev(&mas, min); 5951 rcu_read_unlock(); 5952 return entry; 5953 } 5954 EXPORT_SYMBOL_GPL(mt_prev); 5955 5956 /** 5957 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5958 * @mas: The maple state to pause 5959 * 5960 * Some users need to pause a walk and drop the lock they're holding in 5961 * order to yield to a higher priority thread or carry out an operation 5962 * on an entry. Those users should call this function before they drop 5963 * the lock. It resets the @mas to be suitable for the next iteration 5964 * of the loop after the user has reacquired the lock. If most entries 5965 * found during a walk require you to call mas_pause(), the mt_for_each() 5966 * iterator may be more appropriate. 5967 * 5968 */ 5969 void mas_pause(struct ma_state *mas) 5970 { 5971 mas->node = MAS_PAUSE; 5972 } 5973 EXPORT_SYMBOL_GPL(mas_pause); 5974 5975 /** 5976 * mas_find() - On the first call, find the entry at or after mas->index up to 5977 * %max. Otherwise, find the entry after mas->index. 5978 * @mas: The maple state 5979 * @max: The maximum value to check. 5980 * 5981 * Must hold rcu_read_lock or the write lock. 5982 * If an entry exists, last and index are updated accordingly. 5983 * May set @mas->node to MAS_NONE. 5984 * 5985 * Return: The entry or %NULL. 5986 */ 5987 void *mas_find(struct ma_state *mas, unsigned long max) 5988 { 5989 if (unlikely(mas_is_paused(mas))) { 5990 if (unlikely(mas->last == ULONG_MAX)) { 5991 mas->node = MAS_NONE; 5992 return NULL; 5993 } 5994 mas->node = MAS_START; 5995 mas->index = ++mas->last; 5996 } 5997 5998 if (unlikely(mas_is_start(mas))) { 5999 /* First run or continue */ 6000 void *entry; 6001 6002 if (mas->index > max) 6003 return NULL; 6004 6005 entry = mas_walk(mas); 6006 if (entry) 6007 return entry; 6008 } 6009 6010 if (unlikely(!mas_searchable(mas))) 6011 return NULL; 6012 6013 /* Retries on dead nodes handled by mas_next_entry */ 6014 return mas_next_entry(mas, max); 6015 } 6016 EXPORT_SYMBOL_GPL(mas_find); 6017 6018 /** 6019 * mas_find_rev: On the first call, find the first non-null entry at or below 6020 * mas->index down to %min. Otherwise find the first non-null entry below 6021 * mas->index down to %min. 6022 * @mas: The maple state 6023 * @min: The minimum value to check. 6024 * 6025 * Must hold rcu_read_lock or the write lock. 6026 * If an entry exists, last and index are updated accordingly. 6027 * May set @mas->node to MAS_NONE. 6028 * 6029 * Return: The entry or %NULL. 6030 */ 6031 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6032 { 6033 if (unlikely(mas_is_paused(mas))) { 6034 if (unlikely(mas->last == ULONG_MAX)) { 6035 mas->node = MAS_NONE; 6036 return NULL; 6037 } 6038 mas->node = MAS_START; 6039 mas->last = --mas->index; 6040 } 6041 6042 if (unlikely(mas_is_start(mas))) { 6043 /* First run or continue */ 6044 void *entry; 6045 6046 if (mas->index < min) 6047 return NULL; 6048 6049 entry = mas_walk(mas); 6050 if (entry) 6051 return entry; 6052 } 6053 6054 if (unlikely(!mas_searchable(mas))) 6055 return NULL; 6056 6057 if (mas->index < min) 6058 return NULL; 6059 6060 /* Retries on dead nodes handled by mas_next_entry */ 6061 return mas_prev_entry(mas, min); 6062 } 6063 EXPORT_SYMBOL_GPL(mas_find_rev); 6064 6065 /** 6066 * mas_erase() - Find the range in which index resides and erase the entire 6067 * range. 6068 * @mas: The maple state 6069 * 6070 * Must hold the write lock. 6071 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6072 * erases that range. 6073 * 6074 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6075 */ 6076 void *mas_erase(struct ma_state *mas) 6077 { 6078 void *entry; 6079 MA_WR_STATE(wr_mas, mas, NULL); 6080 6081 if (mas_is_none(mas) || mas_is_paused(mas)) 6082 mas->node = MAS_START; 6083 6084 /* Retry unnecessary when holding the write lock. */ 6085 entry = mas_state_walk(mas); 6086 if (!entry) 6087 return NULL; 6088 6089 write_retry: 6090 /* Must reset to ensure spanning writes of last slot are detected */ 6091 mas_reset(mas); 6092 mas_wr_store_setup(&wr_mas); 6093 mas_wr_store_entry(&wr_mas); 6094 if (mas_nomem(mas, GFP_KERNEL)) 6095 goto write_retry; 6096 6097 return entry; 6098 } 6099 EXPORT_SYMBOL_GPL(mas_erase); 6100 6101 /** 6102 * mas_nomem() - Check if there was an error allocating and do the allocation 6103 * if necessary If there are allocations, then free them. 6104 * @mas: The maple state 6105 * @gfp: The GFP_FLAGS to use for allocations 6106 * Return: true on allocation, false otherwise. 6107 */ 6108 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6109 __must_hold(mas->tree->lock) 6110 { 6111 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6112 mas_destroy(mas); 6113 return false; 6114 } 6115 6116 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6117 mtree_unlock(mas->tree); 6118 mas_alloc_nodes(mas, gfp); 6119 mtree_lock(mas->tree); 6120 } else { 6121 mas_alloc_nodes(mas, gfp); 6122 } 6123 6124 if (!mas_allocated(mas)) 6125 return false; 6126 6127 mas->node = MAS_START; 6128 return true; 6129 } 6130 6131 void __init maple_tree_init(void) 6132 { 6133 maple_node_cache = kmem_cache_create("maple_node", 6134 sizeof(struct maple_node), sizeof(struct maple_node), 6135 SLAB_PANIC, NULL); 6136 } 6137 6138 /** 6139 * mtree_load() - Load a value stored in a maple tree 6140 * @mt: The maple tree 6141 * @index: The index to load 6142 * 6143 * Return: the entry or %NULL 6144 */ 6145 void *mtree_load(struct maple_tree *mt, unsigned long index) 6146 { 6147 MA_STATE(mas, mt, index, index); 6148 void *entry; 6149 6150 trace_ma_read(__func__, &mas); 6151 rcu_read_lock(); 6152 retry: 6153 entry = mas_start(&mas); 6154 if (unlikely(mas_is_none(&mas))) 6155 goto unlock; 6156 6157 if (unlikely(mas_is_ptr(&mas))) { 6158 if (index) 6159 entry = NULL; 6160 6161 goto unlock; 6162 } 6163 6164 entry = mtree_lookup_walk(&mas); 6165 if (!entry && unlikely(mas_is_start(&mas))) 6166 goto retry; 6167 unlock: 6168 rcu_read_unlock(); 6169 if (xa_is_zero(entry)) 6170 return NULL; 6171 6172 return entry; 6173 } 6174 EXPORT_SYMBOL(mtree_load); 6175 6176 /** 6177 * mtree_store_range() - Store an entry at a given range. 6178 * @mt: The maple tree 6179 * @index: The start of the range 6180 * @last: The end of the range 6181 * @entry: The entry to store 6182 * @gfp: The GFP_FLAGS to use for allocations 6183 * 6184 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6185 * be allocated. 6186 */ 6187 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6188 unsigned long last, void *entry, gfp_t gfp) 6189 { 6190 MA_STATE(mas, mt, index, last); 6191 MA_WR_STATE(wr_mas, &mas, entry); 6192 6193 trace_ma_write(__func__, &mas, 0, entry); 6194 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6195 return -EINVAL; 6196 6197 if (index > last) 6198 return -EINVAL; 6199 6200 mtree_lock(mt); 6201 retry: 6202 mas_wr_store_entry(&wr_mas); 6203 if (mas_nomem(&mas, gfp)) 6204 goto retry; 6205 6206 mtree_unlock(mt); 6207 if (mas_is_err(&mas)) 6208 return xa_err(mas.node); 6209 6210 return 0; 6211 } 6212 EXPORT_SYMBOL(mtree_store_range); 6213 6214 /** 6215 * mtree_store() - Store an entry at a given index. 6216 * @mt: The maple tree 6217 * @index: The index to store the value 6218 * @entry: The entry to store 6219 * @gfp: The GFP_FLAGS to use for allocations 6220 * 6221 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6222 * be allocated. 6223 */ 6224 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6225 gfp_t gfp) 6226 { 6227 return mtree_store_range(mt, index, index, entry, gfp); 6228 } 6229 EXPORT_SYMBOL(mtree_store); 6230 6231 /** 6232 * mtree_insert_range() - Insert an entry at a give range if there is no value. 6233 * @mt: The maple tree 6234 * @first: The start of the range 6235 * @last: The end of the range 6236 * @entry: The entry to store 6237 * @gfp: The GFP_FLAGS to use for allocations. 6238 * 6239 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6240 * request, -ENOMEM if memory could not be allocated. 6241 */ 6242 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6243 unsigned long last, void *entry, gfp_t gfp) 6244 { 6245 MA_STATE(ms, mt, first, last); 6246 6247 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6248 return -EINVAL; 6249 6250 if (first > last) 6251 return -EINVAL; 6252 6253 mtree_lock(mt); 6254 retry: 6255 mas_insert(&ms, entry); 6256 if (mas_nomem(&ms, gfp)) 6257 goto retry; 6258 6259 mtree_unlock(mt); 6260 if (mas_is_err(&ms)) 6261 return xa_err(ms.node); 6262 6263 return 0; 6264 } 6265 EXPORT_SYMBOL(mtree_insert_range); 6266 6267 /** 6268 * mtree_insert() - Insert an entry at a give index if there is no value. 6269 * @mt: The maple tree 6270 * @index : The index to store the value 6271 * @entry: The entry to store 6272 * @gfp: The FGP_FLAGS to use for allocations. 6273 * 6274 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6275 * request, -ENOMEM if memory could not be allocated. 6276 */ 6277 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6278 gfp_t gfp) 6279 { 6280 return mtree_insert_range(mt, index, index, entry, gfp); 6281 } 6282 EXPORT_SYMBOL(mtree_insert); 6283 6284 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6285 void *entry, unsigned long size, unsigned long min, 6286 unsigned long max, gfp_t gfp) 6287 { 6288 int ret = 0; 6289 6290 MA_STATE(mas, mt, min, max - size); 6291 if (!mt_is_alloc(mt)) 6292 return -EINVAL; 6293 6294 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6295 return -EINVAL; 6296 6297 if (min > max) 6298 return -EINVAL; 6299 6300 if (max < size) 6301 return -EINVAL; 6302 6303 if (!size) 6304 return -EINVAL; 6305 6306 mtree_lock(mt); 6307 retry: 6308 mas.offset = 0; 6309 mas.index = min; 6310 mas.last = max - size; 6311 ret = mas_alloc(&mas, entry, size, startp); 6312 if (mas_nomem(&mas, gfp)) 6313 goto retry; 6314 6315 mtree_unlock(mt); 6316 return ret; 6317 } 6318 EXPORT_SYMBOL(mtree_alloc_range); 6319 6320 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6321 void *entry, unsigned long size, unsigned long min, 6322 unsigned long max, gfp_t gfp) 6323 { 6324 int ret = 0; 6325 6326 MA_STATE(mas, mt, min, max - size); 6327 if (!mt_is_alloc(mt)) 6328 return -EINVAL; 6329 6330 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6331 return -EINVAL; 6332 6333 if (min >= max) 6334 return -EINVAL; 6335 6336 if (max < size - 1) 6337 return -EINVAL; 6338 6339 if (!size) 6340 return -EINVAL; 6341 6342 mtree_lock(mt); 6343 retry: 6344 ret = mas_rev_alloc(&mas, min, max, entry, size, startp); 6345 if (mas_nomem(&mas, gfp)) 6346 goto retry; 6347 6348 mtree_unlock(mt); 6349 return ret; 6350 } 6351 EXPORT_SYMBOL(mtree_alloc_rrange); 6352 6353 /** 6354 * mtree_erase() - Find an index and erase the entire range. 6355 * @mt: The maple tree 6356 * @index: The index to erase 6357 * 6358 * Erasing is the same as a walk to an entry then a store of a NULL to that 6359 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6360 * 6361 * Return: The entry stored at the @index or %NULL 6362 */ 6363 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6364 { 6365 void *entry = NULL; 6366 6367 MA_STATE(mas, mt, index, index); 6368 trace_ma_op(__func__, &mas); 6369 6370 mtree_lock(mt); 6371 entry = mas_erase(&mas); 6372 mtree_unlock(mt); 6373 6374 return entry; 6375 } 6376 EXPORT_SYMBOL(mtree_erase); 6377 6378 /** 6379 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6380 * @mt: The maple tree 6381 * 6382 * Note: Does not handle locking. 6383 */ 6384 void __mt_destroy(struct maple_tree *mt) 6385 { 6386 void *root = mt_root_locked(mt); 6387 6388 rcu_assign_pointer(mt->ma_root, NULL); 6389 if (xa_is_node(root)) 6390 mte_destroy_walk(root, mt); 6391 6392 mt->ma_flags = 0; 6393 } 6394 EXPORT_SYMBOL_GPL(__mt_destroy); 6395 6396 /** 6397 * mtree_destroy() - Destroy a maple tree 6398 * @mt: The maple tree 6399 * 6400 * Frees all resources used by the tree. Handles locking. 6401 */ 6402 void mtree_destroy(struct maple_tree *mt) 6403 { 6404 mtree_lock(mt); 6405 __mt_destroy(mt); 6406 mtree_unlock(mt); 6407 } 6408 EXPORT_SYMBOL(mtree_destroy); 6409 6410 /** 6411 * mt_find() - Search from the start up until an entry is found. 6412 * @mt: The maple tree 6413 * @index: Pointer which contains the start location of the search 6414 * @max: The maximum value to check 6415 * 6416 * Handles locking. @index will be incremented to one beyond the range. 6417 * 6418 * Return: The entry at or after the @index or %NULL 6419 */ 6420 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6421 { 6422 MA_STATE(mas, mt, *index, *index); 6423 void *entry; 6424 #ifdef CONFIG_DEBUG_MAPLE_TREE 6425 unsigned long copy = *index; 6426 #endif 6427 6428 trace_ma_read(__func__, &mas); 6429 6430 if ((*index) > max) 6431 return NULL; 6432 6433 rcu_read_lock(); 6434 retry: 6435 entry = mas_state_walk(&mas); 6436 if (mas_is_start(&mas)) 6437 goto retry; 6438 6439 if (unlikely(xa_is_zero(entry))) 6440 entry = NULL; 6441 6442 if (entry) 6443 goto unlock; 6444 6445 while (mas_searchable(&mas) && (mas.index < max)) { 6446 entry = mas_next_entry(&mas, max); 6447 if (likely(entry && !xa_is_zero(entry))) 6448 break; 6449 } 6450 6451 if (unlikely(xa_is_zero(entry))) 6452 entry = NULL; 6453 unlock: 6454 rcu_read_unlock(); 6455 if (likely(entry)) { 6456 *index = mas.last + 1; 6457 #ifdef CONFIG_DEBUG_MAPLE_TREE 6458 if ((*index) && (*index) <= copy) 6459 pr_err("index not increased! %lx <= %lx\n", 6460 *index, copy); 6461 MT_BUG_ON(mt, (*index) && ((*index) <= copy)); 6462 #endif 6463 } 6464 6465 return entry; 6466 } 6467 EXPORT_SYMBOL(mt_find); 6468 6469 /** 6470 * mt_find_after() - Search from the start up until an entry is found. 6471 * @mt: The maple tree 6472 * @index: Pointer which contains the start location of the search 6473 * @max: The maximum value to check 6474 * 6475 * Handles locking, detects wrapping on index == 0 6476 * 6477 * Return: The entry at or after the @index or %NULL 6478 */ 6479 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6480 unsigned long max) 6481 { 6482 if (!(*index)) 6483 return NULL; 6484 6485 return mt_find(mt, index, max); 6486 } 6487 EXPORT_SYMBOL(mt_find_after); 6488 6489 #ifdef CONFIG_DEBUG_MAPLE_TREE 6490 atomic_t maple_tree_tests_run; 6491 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6492 atomic_t maple_tree_tests_passed; 6493 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6494 6495 #ifndef __KERNEL__ 6496 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6497 void mt_set_non_kernel(unsigned int val) 6498 { 6499 kmem_cache_set_non_kernel(maple_node_cache, val); 6500 } 6501 6502 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6503 unsigned long mt_get_alloc_size(void) 6504 { 6505 return kmem_cache_get_alloc(maple_node_cache); 6506 } 6507 6508 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6509 void mt_zero_nr_tallocated(void) 6510 { 6511 kmem_cache_zero_nr_tallocated(maple_node_cache); 6512 } 6513 6514 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6515 unsigned int mt_nr_tallocated(void) 6516 { 6517 return kmem_cache_nr_tallocated(maple_node_cache); 6518 } 6519 6520 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6521 unsigned int mt_nr_allocated(void) 6522 { 6523 return kmem_cache_nr_allocated(maple_node_cache); 6524 } 6525 6526 /* 6527 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6528 * @mas: The maple state 6529 * @index: The index to restore in @mas. 6530 * 6531 * Used in test code. 6532 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6533 */ 6534 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6535 { 6536 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6537 return 0; 6538 6539 if (likely(!mte_dead_node(mas->node))) 6540 return 0; 6541 6542 mas_rewalk(mas, index); 6543 return 1; 6544 } 6545 6546 void mt_cache_shrink(void) 6547 { 6548 } 6549 #else 6550 /* 6551 * mt_cache_shrink() - For testing, don't use this. 6552 * 6553 * Certain testcases can trigger an OOM when combined with other memory 6554 * debugging configuration options. This function is used to reduce the 6555 * possibility of an out of memory even due to kmem_cache objects remaining 6556 * around for longer than usual. 6557 */ 6558 void mt_cache_shrink(void) 6559 { 6560 kmem_cache_shrink(maple_node_cache); 6561 6562 } 6563 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6564 6565 #endif /* not defined __KERNEL__ */ 6566 /* 6567 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6568 * @mas: The maple state 6569 * @offset: The offset into the slot array to fetch. 6570 * 6571 * Return: The entry stored at @offset. 6572 */ 6573 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6574 unsigned char offset) 6575 { 6576 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6577 offset); 6578 } 6579 6580 6581 /* 6582 * mas_first_entry() - Go the first leaf and find the first entry. 6583 * @mas: the maple state. 6584 * @limit: the maximum index to check. 6585 * @*r_start: Pointer to set to the range start. 6586 * 6587 * Sets mas->offset to the offset of the entry, r_start to the range minimum. 6588 * 6589 * Return: The first entry or MAS_NONE. 6590 */ 6591 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, 6592 unsigned long limit, enum maple_type mt) 6593 6594 { 6595 unsigned long max; 6596 unsigned long *pivots; 6597 void __rcu **slots; 6598 void *entry = NULL; 6599 6600 mas->index = mas->min; 6601 if (mas->index > limit) 6602 goto none; 6603 6604 max = mas->max; 6605 mas->offset = 0; 6606 while (likely(!ma_is_leaf(mt))) { 6607 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6608 slots = ma_slots(mn, mt); 6609 pivots = ma_pivots(mn, mt); 6610 max = pivots[0]; 6611 entry = mas_slot(mas, slots, 0); 6612 if (unlikely(ma_dead_node(mn))) 6613 return NULL; 6614 mas->node = entry; 6615 mn = mas_mn(mas); 6616 mt = mte_node_type(mas->node); 6617 } 6618 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6619 6620 mas->max = max; 6621 slots = ma_slots(mn, mt); 6622 entry = mas_slot(mas, slots, 0); 6623 if (unlikely(ma_dead_node(mn))) 6624 return NULL; 6625 6626 /* Slot 0 or 1 must be set */ 6627 if (mas->index > limit) 6628 goto none; 6629 6630 if (likely(entry)) 6631 return entry; 6632 6633 pivots = ma_pivots(mn, mt); 6634 mas->index = pivots[0] + 1; 6635 mas->offset = 1; 6636 entry = mas_slot(mas, slots, 1); 6637 if (unlikely(ma_dead_node(mn))) 6638 return NULL; 6639 6640 if (mas->index > limit) 6641 goto none; 6642 6643 if (likely(entry)) 6644 return entry; 6645 6646 none: 6647 if (likely(!ma_dead_node(mn))) 6648 mas->node = MAS_NONE; 6649 return NULL; 6650 } 6651 6652 /* Depth first search, post-order */ 6653 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6654 { 6655 6656 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6657 unsigned long p_min, p_max; 6658 6659 mas_next_node(mas, mas_mn(mas), max); 6660 if (!mas_is_none(mas)) 6661 return; 6662 6663 if (mte_is_root(mn)) 6664 return; 6665 6666 mas->node = mn; 6667 mas_ascend(mas); 6668 while (mas->node != MAS_NONE) { 6669 p = mas->node; 6670 p_min = mas->min; 6671 p_max = mas->max; 6672 mas_prev_node(mas, 0); 6673 } 6674 6675 if (p == MAS_NONE) 6676 return; 6677 6678 mas->node = p; 6679 mas->max = p_max; 6680 mas->min = p_min; 6681 } 6682 6683 /* Tree validations */ 6684 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6685 unsigned long min, unsigned long max, unsigned int depth); 6686 static void mt_dump_range(unsigned long min, unsigned long max, 6687 unsigned int depth) 6688 { 6689 static const char spaces[] = " "; 6690 6691 if (min == max) 6692 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6693 else 6694 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6695 } 6696 6697 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6698 unsigned int depth) 6699 { 6700 mt_dump_range(min, max, depth); 6701 6702 if (xa_is_value(entry)) 6703 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6704 xa_to_value(entry), entry); 6705 else if (xa_is_zero(entry)) 6706 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6707 else if (mt_is_reserved(entry)) 6708 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6709 else 6710 pr_cont("%p\n", entry); 6711 } 6712 6713 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6714 unsigned long min, unsigned long max, unsigned int depth) 6715 { 6716 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6717 bool leaf = mte_is_leaf(entry); 6718 unsigned long first = min; 6719 int i; 6720 6721 pr_cont(" contents: "); 6722 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) 6723 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6724 pr_cont("%p\n", node->slot[i]); 6725 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6726 unsigned long last = max; 6727 6728 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6729 last = node->pivot[i]; 6730 else if (!node->slot[i] && max != mt_max[mte_node_type(entry)]) 6731 break; 6732 if (last == 0 && i > 0) 6733 break; 6734 if (leaf) 6735 mt_dump_entry(mt_slot(mt, node->slot, i), 6736 first, last, depth + 1); 6737 else if (node->slot[i]) 6738 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6739 first, last, depth + 1); 6740 6741 if (last == max) 6742 break; 6743 if (last > max) { 6744 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6745 node, last, max, i); 6746 break; 6747 } 6748 first = last + 1; 6749 } 6750 } 6751 6752 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6753 unsigned long min, unsigned long max, unsigned int depth) 6754 { 6755 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6756 bool leaf = mte_is_leaf(entry); 6757 unsigned long first = min; 6758 int i; 6759 6760 pr_cont(" contents: "); 6761 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6762 pr_cont("%lu ", node->gap[i]); 6763 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6764 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6765 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6766 pr_cont("%p\n", node->slot[i]); 6767 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6768 unsigned long last = max; 6769 6770 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6771 last = node->pivot[i]; 6772 else if (!node->slot[i]) 6773 break; 6774 if (last == 0 && i > 0) 6775 break; 6776 if (leaf) 6777 mt_dump_entry(mt_slot(mt, node->slot, i), 6778 first, last, depth + 1); 6779 else if (node->slot[i]) 6780 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6781 first, last, depth + 1); 6782 6783 if (last == max) 6784 break; 6785 if (last > max) { 6786 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6787 node, last, max, i); 6788 break; 6789 } 6790 first = last + 1; 6791 } 6792 } 6793 6794 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6795 unsigned long min, unsigned long max, unsigned int depth) 6796 { 6797 struct maple_node *node = mte_to_node(entry); 6798 unsigned int type = mte_node_type(entry); 6799 unsigned int i; 6800 6801 mt_dump_range(min, max, depth); 6802 6803 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6804 node ? node->parent : NULL); 6805 switch (type) { 6806 case maple_dense: 6807 pr_cont("\n"); 6808 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6809 if (min + i > max) 6810 pr_cont("OUT OF RANGE: "); 6811 mt_dump_entry(mt_slot(mt, node->slot, i), 6812 min + i, min + i, depth); 6813 } 6814 break; 6815 case maple_leaf_64: 6816 case maple_range_64: 6817 mt_dump_range64(mt, entry, min, max, depth); 6818 break; 6819 case maple_arange_64: 6820 mt_dump_arange64(mt, entry, min, max, depth); 6821 break; 6822 6823 default: 6824 pr_cont(" UNKNOWN TYPE\n"); 6825 } 6826 } 6827 6828 void mt_dump(const struct maple_tree *mt) 6829 { 6830 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6831 6832 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6833 mt, mt->ma_flags, mt_height(mt), entry); 6834 if (!xa_is_node(entry)) 6835 mt_dump_entry(entry, 0, 0, 0); 6836 else if (entry) 6837 mt_dump_node(mt, entry, 0, mt_max[mte_node_type(entry)], 0); 6838 } 6839 EXPORT_SYMBOL_GPL(mt_dump); 6840 6841 /* 6842 * Calculate the maximum gap in a node and check if that's what is reported in 6843 * the parent (unless root). 6844 */ 6845 static void mas_validate_gaps(struct ma_state *mas) 6846 { 6847 struct maple_enode *mte = mas->node; 6848 struct maple_node *p_mn; 6849 unsigned long gap = 0, max_gap = 0; 6850 unsigned long p_end, p_start = mas->min; 6851 unsigned char p_slot; 6852 unsigned long *gaps = NULL; 6853 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); 6854 int i; 6855 6856 if (ma_is_dense(mte_node_type(mte))) { 6857 for (i = 0; i < mt_slot_count(mte); i++) { 6858 if (mas_get_slot(mas, i)) { 6859 if (gap > max_gap) 6860 max_gap = gap; 6861 gap = 0; 6862 continue; 6863 } 6864 gap++; 6865 } 6866 goto counted; 6867 } 6868 6869 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); 6870 for (i = 0; i < mt_slot_count(mte); i++) { 6871 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); 6872 6873 if (!gaps) { 6874 if (mas_get_slot(mas, i)) { 6875 gap = 0; 6876 goto not_empty; 6877 } 6878 6879 gap += p_end - p_start + 1; 6880 } else { 6881 void *entry = mas_get_slot(mas, i); 6882 6883 gap = gaps[i]; 6884 if (!entry) { 6885 if (gap != p_end - p_start + 1) { 6886 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", 6887 mas_mn(mas), i, 6888 mas_get_slot(mas, i), gap, 6889 p_end, p_start); 6890 mt_dump(mas->tree); 6891 6892 MT_BUG_ON(mas->tree, 6893 gap != p_end - p_start + 1); 6894 } 6895 } else { 6896 if (gap > p_end - p_start + 1) { 6897 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6898 mas_mn(mas), i, gap, p_end, p_start, 6899 p_end - p_start + 1); 6900 MT_BUG_ON(mas->tree, 6901 gap > p_end - p_start + 1); 6902 } 6903 } 6904 } 6905 6906 if (gap > max_gap) 6907 max_gap = gap; 6908 not_empty: 6909 p_start = p_end + 1; 6910 if (p_end >= mas->max) 6911 break; 6912 } 6913 6914 counted: 6915 if (mte_is_root(mte)) 6916 return; 6917 6918 p_slot = mte_parent_slot(mas->node); 6919 p_mn = mte_parent(mte); 6920 MT_BUG_ON(mas->tree, max_gap > mas->max); 6921 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { 6922 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 6923 mt_dump(mas->tree); 6924 } 6925 6926 MT_BUG_ON(mas->tree, 6927 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); 6928 } 6929 6930 static void mas_validate_parent_slot(struct ma_state *mas) 6931 { 6932 struct maple_node *parent; 6933 struct maple_enode *node; 6934 enum maple_type p_type = mas_parent_enum(mas, mas->node); 6935 unsigned char p_slot = mte_parent_slot(mas->node); 6936 void __rcu **slots; 6937 int i; 6938 6939 if (mte_is_root(mas->node)) 6940 return; 6941 6942 parent = mte_parent(mas->node); 6943 slots = ma_slots(parent, p_type); 6944 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 6945 6946 /* Check prev/next parent slot for duplicate node entry */ 6947 6948 for (i = 0; i < mt_slots[p_type]; i++) { 6949 node = mas_slot(mas, slots, i); 6950 if (i == p_slot) { 6951 if (node != mas->node) 6952 pr_err("parent %p[%u] does not have %p\n", 6953 parent, i, mas_mn(mas)); 6954 MT_BUG_ON(mas->tree, node != mas->node); 6955 } else if (node == mas->node) { 6956 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 6957 mas_mn(mas), parent, i, p_slot); 6958 MT_BUG_ON(mas->tree, node == mas->node); 6959 } 6960 } 6961 } 6962 6963 static void mas_validate_child_slot(struct ma_state *mas) 6964 { 6965 enum maple_type type = mte_node_type(mas->node); 6966 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 6967 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 6968 struct maple_enode *child; 6969 unsigned char i; 6970 6971 if (mte_is_leaf(mas->node)) 6972 return; 6973 6974 for (i = 0; i < mt_slots[type]; i++) { 6975 child = mas_slot(mas, slots, i); 6976 if (!pivots[i] || pivots[i] == mas->max) 6977 break; 6978 6979 if (!child) 6980 break; 6981 6982 if (mte_parent_slot(child) != i) { 6983 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 6984 mas_mn(mas), i, mte_to_node(child), 6985 mte_parent_slot(child)); 6986 MT_BUG_ON(mas->tree, 1); 6987 } 6988 6989 if (mte_parent(child) != mte_to_node(mas->node)) { 6990 pr_err("child %p has parent %p not %p\n", 6991 mte_to_node(child), mte_parent(child), 6992 mte_to_node(mas->node)); 6993 MT_BUG_ON(mas->tree, 1); 6994 } 6995 } 6996 } 6997 6998 /* 6999 * Validate all pivots are within mas->min and mas->max. 7000 */ 7001 static void mas_validate_limits(struct ma_state *mas) 7002 { 7003 int i; 7004 unsigned long prev_piv = 0; 7005 enum maple_type type = mte_node_type(mas->node); 7006 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7007 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7008 7009 /* all limits are fine here. */ 7010 if (mte_is_root(mas->node)) 7011 return; 7012 7013 for (i = 0; i < mt_slots[type]; i++) { 7014 unsigned long piv; 7015 7016 piv = mas_safe_pivot(mas, pivots, i, type); 7017 7018 if (!piv && (i != 0)) 7019 break; 7020 7021 if (!mte_is_leaf(mas->node)) { 7022 void *entry = mas_slot(mas, slots, i); 7023 7024 if (!entry) 7025 pr_err("%p[%u] cannot be null\n", 7026 mas_mn(mas), i); 7027 7028 MT_BUG_ON(mas->tree, !entry); 7029 } 7030 7031 if (prev_piv > piv) { 7032 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7033 mas_mn(mas), i, piv, prev_piv); 7034 MT_BUG_ON(mas->tree, piv < prev_piv); 7035 } 7036 7037 if (piv < mas->min) { 7038 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7039 piv, mas->min); 7040 MT_BUG_ON(mas->tree, piv < mas->min); 7041 } 7042 if (piv > mas->max) { 7043 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7044 piv, mas->max); 7045 MT_BUG_ON(mas->tree, piv > mas->max); 7046 } 7047 prev_piv = piv; 7048 if (piv == mas->max) 7049 break; 7050 } 7051 for (i += 1; i < mt_slots[type]; i++) { 7052 void *entry = mas_slot(mas, slots, i); 7053 7054 if (entry && (i != mt_slots[type] - 1)) { 7055 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7056 i, entry); 7057 MT_BUG_ON(mas->tree, entry != NULL); 7058 } 7059 7060 if (i < mt_pivots[type]) { 7061 unsigned long piv = pivots[i]; 7062 7063 if (!piv) 7064 continue; 7065 7066 pr_err("%p[%u] should not have piv %lu\n", 7067 mas_mn(mas), i, piv); 7068 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1); 7069 } 7070 } 7071 } 7072 7073 static void mt_validate_nulls(struct maple_tree *mt) 7074 { 7075 void *entry, *last = (void *)1; 7076 unsigned char offset = 0; 7077 void __rcu **slots; 7078 MA_STATE(mas, mt, 0, 0); 7079 7080 mas_start(&mas); 7081 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7082 return; 7083 7084 while (!mte_is_leaf(mas.node)) 7085 mas_descend(&mas); 7086 7087 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7088 do { 7089 entry = mas_slot(&mas, slots, offset); 7090 if (!last && !entry) { 7091 pr_err("Sequential nulls end at %p[%u]\n", 7092 mas_mn(&mas), offset); 7093 } 7094 MT_BUG_ON(mt, !last && !entry); 7095 last = entry; 7096 if (offset == mas_data_end(&mas)) { 7097 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7098 if (mas_is_none(&mas)) 7099 return; 7100 offset = 0; 7101 slots = ma_slots(mte_to_node(mas.node), 7102 mte_node_type(mas.node)); 7103 } else { 7104 offset++; 7105 } 7106 7107 } while (!mas_is_none(&mas)); 7108 } 7109 7110 /* 7111 * validate a maple tree by checking: 7112 * 1. The limits (pivots are within mas->min to mas->max) 7113 * 2. The gap is correctly set in the parents 7114 */ 7115 void mt_validate(struct maple_tree *mt) 7116 { 7117 unsigned char end; 7118 7119 MA_STATE(mas, mt, 0, 0); 7120 rcu_read_lock(); 7121 mas_start(&mas); 7122 if (!mas_searchable(&mas)) 7123 goto done; 7124 7125 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); 7126 while (!mas_is_none(&mas)) { 7127 MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); 7128 if (!mte_is_root(mas.node)) { 7129 end = mas_data_end(&mas); 7130 if ((end < mt_min_slot_count(mas.node)) && 7131 (mas.max != ULONG_MAX)) { 7132 pr_err("Invalid size %u of %p\n", end, 7133 mas_mn(&mas)); 7134 MT_BUG_ON(mas.tree, 1); 7135 } 7136 7137 } 7138 mas_validate_parent_slot(&mas); 7139 mas_validate_child_slot(&mas); 7140 mas_validate_limits(&mas); 7141 if (mt_is_alloc(mt)) 7142 mas_validate_gaps(&mas); 7143 mas_dfs_postorder(&mas, ULONG_MAX); 7144 } 7145 mt_validate_nulls(mt); 7146 done: 7147 rcu_read_unlock(); 7148 7149 } 7150 EXPORT_SYMBOL_GPL(mt_validate); 7151 7152 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7153