1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 /* Functions */ 150 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 151 { 152 return kmem_cache_alloc(maple_node_cache, gfp); 153 } 154 155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 156 { 157 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 158 } 159 160 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 161 { 162 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 163 } 164 165 static void mt_free_rcu(struct rcu_head *head) 166 { 167 struct maple_node *node = container_of(head, struct maple_node, rcu); 168 169 kmem_cache_free(maple_node_cache, node); 170 } 171 172 /* 173 * ma_free_rcu() - Use rcu callback to free a maple node 174 * @node: The node to free 175 * 176 * The maple tree uses the parent pointer to indicate this node is no longer in 177 * use and will be freed. 178 */ 179 static void ma_free_rcu(struct maple_node *node) 180 { 181 node->parent = ma_parent_ptr(node); 182 call_rcu(&node->rcu, mt_free_rcu); 183 } 184 185 static void mas_set_height(struct ma_state *mas) 186 { 187 unsigned int new_flags = mas->tree->ma_flags; 188 189 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 190 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); 191 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 192 mas->tree->ma_flags = new_flags; 193 } 194 195 static unsigned int mas_mt_height(struct ma_state *mas) 196 { 197 return mt_height(mas->tree); 198 } 199 200 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 201 { 202 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 203 MAPLE_NODE_TYPE_MASK; 204 } 205 206 static inline bool ma_is_dense(const enum maple_type type) 207 { 208 return type < maple_leaf_64; 209 } 210 211 static inline bool ma_is_leaf(const enum maple_type type) 212 { 213 return type < maple_range_64; 214 } 215 216 static inline bool mte_is_leaf(const struct maple_enode *entry) 217 { 218 return ma_is_leaf(mte_node_type(entry)); 219 } 220 221 /* 222 * We also reserve values with the bottom two bits set to '10' which are 223 * below 4096 224 */ 225 static inline bool mt_is_reserved(const void *entry) 226 { 227 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 228 xa_is_internal(entry); 229 } 230 231 static inline void mas_set_err(struct ma_state *mas, long err) 232 { 233 mas->node = MA_ERROR(err); 234 } 235 236 static inline bool mas_is_ptr(struct ma_state *mas) 237 { 238 return mas->node == MAS_ROOT; 239 } 240 241 static inline bool mas_is_start(struct ma_state *mas) 242 { 243 return mas->node == MAS_START; 244 } 245 246 bool mas_is_err(struct ma_state *mas) 247 { 248 return xa_is_err(mas->node); 249 } 250 251 static inline bool mas_searchable(struct ma_state *mas) 252 { 253 if (mas_is_none(mas)) 254 return false; 255 256 if (mas_is_ptr(mas)) 257 return false; 258 259 return true; 260 } 261 262 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 263 { 264 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 265 } 266 267 /* 268 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 269 * @entry: The maple encoded node 270 * 271 * Return: a maple topiary pointer 272 */ 273 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 274 { 275 return (struct maple_topiary *) 276 ((unsigned long)entry & ~MAPLE_NODE_MASK); 277 } 278 279 /* 280 * mas_mn() - Get the maple state node. 281 * @mas: The maple state 282 * 283 * Return: the maple node (not encoded - bare pointer). 284 */ 285 static inline struct maple_node *mas_mn(const struct ma_state *mas) 286 { 287 return mte_to_node(mas->node); 288 } 289 290 /* 291 * mte_set_node_dead() - Set a maple encoded node as dead. 292 * @mn: The maple encoded node. 293 */ 294 static inline void mte_set_node_dead(struct maple_enode *mn) 295 { 296 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 297 smp_wmb(); /* Needed for RCU */ 298 } 299 300 /* Bit 1 indicates the root is a node */ 301 #define MAPLE_ROOT_NODE 0x02 302 /* maple_type stored bit 3-6 */ 303 #define MAPLE_ENODE_TYPE_SHIFT 0x03 304 /* Bit 2 means a NULL somewhere below */ 305 #define MAPLE_ENODE_NULL 0x04 306 307 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 308 enum maple_type type) 309 { 310 return (void *)((unsigned long)node | 311 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 312 } 313 314 static inline void *mte_mk_root(const struct maple_enode *node) 315 { 316 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 317 } 318 319 static inline void *mte_safe_root(const struct maple_enode *node) 320 { 321 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 322 } 323 324 static inline void *mte_set_full(const struct maple_enode *node) 325 { 326 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 327 } 328 329 static inline void *mte_clear_full(const struct maple_enode *node) 330 { 331 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 332 } 333 334 static inline bool mte_has_null(const struct maple_enode *node) 335 { 336 return (unsigned long)node & MAPLE_ENODE_NULL; 337 } 338 339 static inline bool ma_is_root(struct maple_node *node) 340 { 341 return ((unsigned long)node->parent & MA_ROOT_PARENT); 342 } 343 344 static inline bool mte_is_root(const struct maple_enode *node) 345 { 346 return ma_is_root(mte_to_node(node)); 347 } 348 349 static inline bool mas_is_root_limits(const struct ma_state *mas) 350 { 351 return !mas->min && mas->max == ULONG_MAX; 352 } 353 354 static inline bool mt_is_alloc(struct maple_tree *mt) 355 { 356 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 357 } 358 359 /* 360 * The Parent Pointer 361 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 362 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 363 * bit values need an extra bit to store the offset. This extra bit comes from 364 * a reuse of the last bit in the node type. This is possible by using bit 1 to 365 * indicate if bit 2 is part of the type or the slot. 366 * 367 * Note types: 368 * 0x??1 = Root 369 * 0x?00 = 16 bit nodes 370 * 0x010 = 32 bit nodes 371 * 0x110 = 64 bit nodes 372 * 373 * Slot size and alignment 374 * 0b??1 : Root 375 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 376 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 377 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 378 */ 379 380 #define MAPLE_PARENT_ROOT 0x01 381 382 #define MAPLE_PARENT_SLOT_SHIFT 0x03 383 #define MAPLE_PARENT_SLOT_MASK 0xF8 384 385 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 386 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 387 388 #define MAPLE_PARENT_RANGE64 0x06 389 #define MAPLE_PARENT_RANGE32 0x04 390 #define MAPLE_PARENT_NOT_RANGE16 0x02 391 392 /* 393 * mte_parent_shift() - Get the parent shift for the slot storage. 394 * @parent: The parent pointer cast as an unsigned long 395 * Return: The shift into that pointer to the star to of the slot 396 */ 397 static inline unsigned long mte_parent_shift(unsigned long parent) 398 { 399 /* Note bit 1 == 0 means 16B */ 400 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 401 return MAPLE_PARENT_SLOT_SHIFT; 402 403 return MAPLE_PARENT_16B_SLOT_SHIFT; 404 } 405 406 /* 407 * mte_parent_slot_mask() - Get the slot mask for the parent. 408 * @parent: The parent pointer cast as an unsigned long. 409 * Return: The slot mask for that parent. 410 */ 411 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 412 { 413 /* Note bit 1 == 0 means 16B */ 414 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 415 return MAPLE_PARENT_SLOT_MASK; 416 417 return MAPLE_PARENT_16B_SLOT_MASK; 418 } 419 420 /* 421 * mas_parent_enum() - Return the maple_type of the parent from the stored 422 * parent type. 423 * @mas: The maple state 424 * @node: The maple_enode to extract the parent's enum 425 * Return: The node->parent maple_type 426 */ 427 static inline 428 enum maple_type mte_parent_enum(struct maple_enode *p_enode, 429 struct maple_tree *mt) 430 { 431 unsigned long p_type; 432 433 p_type = (unsigned long)p_enode; 434 if (p_type & MAPLE_PARENT_ROOT) 435 return 0; /* Validated in the caller. */ 436 437 p_type &= MAPLE_NODE_MASK; 438 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); 439 440 switch (p_type) { 441 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 442 if (mt_is_alloc(mt)) 443 return maple_arange_64; 444 return maple_range_64; 445 } 446 447 return 0; 448 } 449 450 static inline 451 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) 452 { 453 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); 454 } 455 456 /* 457 * mte_set_parent() - Set the parent node and encode the slot 458 * @enode: The encoded maple node. 459 * @parent: The encoded maple node that is the parent of @enode. 460 * @slot: The slot that @enode resides in @parent. 461 * 462 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 463 * parent type. 464 */ 465 static inline 466 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, 467 unsigned char slot) 468 { 469 unsigned long val = (unsigned long)parent; 470 unsigned long shift; 471 unsigned long type; 472 enum maple_type p_type = mte_node_type(parent); 473 474 BUG_ON(p_type == maple_dense); 475 BUG_ON(p_type == maple_leaf_64); 476 477 switch (p_type) { 478 case maple_range_64: 479 case maple_arange_64: 480 shift = MAPLE_PARENT_SLOT_SHIFT; 481 type = MAPLE_PARENT_RANGE64; 482 break; 483 default: 484 case maple_dense: 485 case maple_leaf_64: 486 shift = type = 0; 487 break; 488 } 489 490 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 491 val |= (slot << shift) | type; 492 mte_to_node(enode)->parent = ma_parent_ptr(val); 493 } 494 495 /* 496 * mte_parent_slot() - get the parent slot of @enode. 497 * @enode: The encoded maple node. 498 * 499 * Return: The slot in the parent node where @enode resides. 500 */ 501 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 502 { 503 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 504 505 if (val & MA_ROOT_PARENT) 506 return 0; 507 508 /* 509 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 510 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 511 */ 512 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 513 } 514 515 /* 516 * mte_parent() - Get the parent of @node. 517 * @node: The encoded maple node. 518 * 519 * Return: The parent maple node. 520 */ 521 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 522 { 523 return (void *)((unsigned long) 524 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 525 } 526 527 /* 528 * ma_dead_node() - check if the @enode is dead. 529 * @enode: The encoded maple node 530 * 531 * Return: true if dead, false otherwise. 532 */ 533 static inline bool ma_dead_node(const struct maple_node *node) 534 { 535 struct maple_node *parent = (void *)((unsigned long) 536 node->parent & ~MAPLE_NODE_MASK); 537 538 return (parent == node); 539 } 540 /* 541 * mte_dead_node() - check if the @enode is dead. 542 * @enode: The encoded maple node 543 * 544 * Return: true if dead, false otherwise. 545 */ 546 static inline bool mte_dead_node(const struct maple_enode *enode) 547 { 548 struct maple_node *parent, *node; 549 550 node = mte_to_node(enode); 551 parent = mte_parent(enode); 552 return (parent == node); 553 } 554 555 /* 556 * mas_allocated() - Get the number of nodes allocated in a maple state. 557 * @mas: The maple state 558 * 559 * The ma_state alloc member is overloaded to hold a pointer to the first 560 * allocated node or to the number of requested nodes to allocate. If bit 0 is 561 * set, then the alloc contains the number of requested nodes. If there is an 562 * allocated node, then the total allocated nodes is in that node. 563 * 564 * Return: The total number of nodes allocated 565 */ 566 static inline unsigned long mas_allocated(const struct ma_state *mas) 567 { 568 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 569 return 0; 570 571 return mas->alloc->total; 572 } 573 574 /* 575 * mas_set_alloc_req() - Set the requested number of allocations. 576 * @mas: the maple state 577 * @count: the number of allocations. 578 * 579 * The requested number of allocations is either in the first allocated node, 580 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 581 * no allocated node. Set the request either in the node or do the necessary 582 * encoding to store in @mas->alloc directly. 583 */ 584 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 585 { 586 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 587 if (!count) 588 mas->alloc = NULL; 589 else 590 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 591 return; 592 } 593 594 mas->alloc->request_count = count; 595 } 596 597 /* 598 * mas_alloc_req() - get the requested number of allocations. 599 * @mas: The maple state 600 * 601 * The alloc count is either stored directly in @mas, or in 602 * @mas->alloc->request_count if there is at least one node allocated. Decode 603 * the request count if it's stored directly in @mas->alloc. 604 * 605 * Return: The allocation request count. 606 */ 607 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 608 { 609 if ((unsigned long)mas->alloc & 0x1) 610 return (unsigned long)(mas->alloc) >> 1; 611 else if (mas->alloc) 612 return mas->alloc->request_count; 613 return 0; 614 } 615 616 /* 617 * ma_pivots() - Get a pointer to the maple node pivots. 618 * @node - the maple node 619 * @type - the node type 620 * 621 * Return: A pointer to the maple node pivots 622 */ 623 static inline unsigned long *ma_pivots(struct maple_node *node, 624 enum maple_type type) 625 { 626 switch (type) { 627 case maple_arange_64: 628 return node->ma64.pivot; 629 case maple_range_64: 630 case maple_leaf_64: 631 return node->mr64.pivot; 632 case maple_dense: 633 return NULL; 634 } 635 return NULL; 636 } 637 638 /* 639 * ma_gaps() - Get a pointer to the maple node gaps. 640 * @node - the maple node 641 * @type - the node type 642 * 643 * Return: A pointer to the maple node gaps 644 */ 645 static inline unsigned long *ma_gaps(struct maple_node *node, 646 enum maple_type type) 647 { 648 switch (type) { 649 case maple_arange_64: 650 return node->ma64.gap; 651 case maple_range_64: 652 case maple_leaf_64: 653 case maple_dense: 654 return NULL; 655 } 656 return NULL; 657 } 658 659 /* 660 * mte_pivot() - Get the pivot at @piv of the maple encoded node. 661 * @mn: The maple encoded node. 662 * @piv: The pivot. 663 * 664 * Return: the pivot at @piv of @mn. 665 */ 666 static inline unsigned long mte_pivot(const struct maple_enode *mn, 667 unsigned char piv) 668 { 669 struct maple_node *node = mte_to_node(mn); 670 enum maple_type type = mte_node_type(mn); 671 672 if (piv >= mt_pivots[type]) { 673 WARN_ON(1); 674 return 0; 675 } 676 switch (type) { 677 case maple_arange_64: 678 return node->ma64.pivot[piv]; 679 case maple_range_64: 680 case maple_leaf_64: 681 return node->mr64.pivot[piv]; 682 case maple_dense: 683 return 0; 684 } 685 return 0; 686 } 687 688 /* 689 * mas_safe_pivot() - get the pivot at @piv or mas->max. 690 * @mas: The maple state 691 * @pivots: The pointer to the maple node pivots 692 * @piv: The pivot to fetch 693 * @type: The maple node type 694 * 695 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 696 * otherwise. 697 */ 698 static inline unsigned long 699 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 700 unsigned char piv, enum maple_type type) 701 { 702 if (piv >= mt_pivots[type]) 703 return mas->max; 704 705 return pivots[piv]; 706 } 707 708 /* 709 * mas_safe_min() - Return the minimum for a given offset. 710 * @mas: The maple state 711 * @pivots: The pointer to the maple node pivots 712 * @offset: The offset into the pivot array 713 * 714 * Return: The minimum range value that is contained in @offset. 715 */ 716 static inline unsigned long 717 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 718 { 719 if (likely(offset)) 720 return pivots[offset - 1] + 1; 721 722 return mas->min; 723 } 724 725 /* 726 * mas_logical_pivot() - Get the logical pivot of a given offset. 727 * @mas: The maple state 728 * @pivots: The pointer to the maple node pivots 729 * @offset: The offset into the pivot array 730 * @type: The maple node type 731 * 732 * When there is no value at a pivot (beyond the end of the data), then the 733 * pivot is actually @mas->max. 734 * 735 * Return: the logical pivot of a given @offset. 736 */ 737 static inline unsigned long 738 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, 739 unsigned char offset, enum maple_type type) 740 { 741 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); 742 743 if (likely(lpiv)) 744 return lpiv; 745 746 if (likely(offset)) 747 return mas->max; 748 749 return lpiv; 750 } 751 752 /* 753 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 754 * @mn: The encoded maple node 755 * @piv: The pivot offset 756 * @val: The value of the pivot 757 */ 758 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 759 unsigned long val) 760 { 761 struct maple_node *node = mte_to_node(mn); 762 enum maple_type type = mte_node_type(mn); 763 764 BUG_ON(piv >= mt_pivots[type]); 765 switch (type) { 766 default: 767 case maple_range_64: 768 case maple_leaf_64: 769 node->mr64.pivot[piv] = val; 770 break; 771 case maple_arange_64: 772 node->ma64.pivot[piv] = val; 773 break; 774 case maple_dense: 775 break; 776 } 777 778 } 779 780 /* 781 * ma_slots() - Get a pointer to the maple node slots. 782 * @mn: The maple node 783 * @mt: The maple node type 784 * 785 * Return: A pointer to the maple node slots 786 */ 787 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 788 { 789 switch (mt) { 790 default: 791 case maple_arange_64: 792 return mn->ma64.slot; 793 case maple_range_64: 794 case maple_leaf_64: 795 return mn->mr64.slot; 796 case maple_dense: 797 return mn->slot; 798 } 799 } 800 801 static inline bool mt_locked(const struct maple_tree *mt) 802 { 803 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 804 lockdep_is_held(&mt->ma_lock); 805 } 806 807 static inline void *mt_slot(const struct maple_tree *mt, 808 void __rcu **slots, unsigned char offset) 809 { 810 return rcu_dereference_check(slots[offset], mt_locked(mt)); 811 } 812 813 /* 814 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 815 * @mas: The maple state 816 * @slots: The pointer to the slots 817 * @offset: The offset into the slots array to fetch 818 * 819 * Return: The entry stored in @slots at the @offset. 820 */ 821 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 822 unsigned char offset) 823 { 824 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree)); 825 } 826 827 /* 828 * mas_slot() - Get the slot value when not holding the maple tree lock. 829 * @mas: The maple state 830 * @slots: The pointer to the slots 831 * @offset: The offset into the slots array to fetch 832 * 833 * Return: The entry stored in @slots at the @offset 834 */ 835 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 836 unsigned char offset) 837 { 838 return mt_slot(mas->tree, slots, offset); 839 } 840 841 /* 842 * mas_root() - Get the maple tree root. 843 * @mas: The maple state. 844 * 845 * Return: The pointer to the root of the tree 846 */ 847 static inline void *mas_root(struct ma_state *mas) 848 { 849 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 850 } 851 852 static inline void *mt_root_locked(struct maple_tree *mt) 853 { 854 return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); 855 } 856 857 /* 858 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 859 * @mas: The maple state. 860 * 861 * Return: The pointer to the root of the tree 862 */ 863 static inline void *mas_root_locked(struct ma_state *mas) 864 { 865 return mt_root_locked(mas->tree); 866 } 867 868 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 869 enum maple_type mt) 870 { 871 switch (mt) { 872 case maple_arange_64: 873 return &mn->ma64.meta; 874 default: 875 return &mn->mr64.meta; 876 } 877 } 878 879 /* 880 * ma_set_meta() - Set the metadata information of a node. 881 * @mn: The maple node 882 * @mt: The maple node type 883 * @offset: The offset of the highest sub-gap in this node. 884 * @end: The end of the data in this node. 885 */ 886 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 887 unsigned char offset, unsigned char end) 888 { 889 struct maple_metadata *meta = ma_meta(mn, mt); 890 891 meta->gap = offset; 892 meta->end = end; 893 } 894 895 /* 896 * ma_meta_end() - Get the data end of a node from the metadata 897 * @mn: The maple node 898 * @mt: The maple node type 899 */ 900 static inline unsigned char ma_meta_end(struct maple_node *mn, 901 enum maple_type mt) 902 { 903 struct maple_metadata *meta = ma_meta(mn, mt); 904 905 return meta->end; 906 } 907 908 /* 909 * ma_meta_gap() - Get the largest gap location of a node from the metadata 910 * @mn: The maple node 911 * @mt: The maple node type 912 */ 913 static inline unsigned char ma_meta_gap(struct maple_node *mn, 914 enum maple_type mt) 915 { 916 BUG_ON(mt != maple_arange_64); 917 918 return mn->ma64.meta.gap; 919 } 920 921 /* 922 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 923 * @mn: The maple node 924 * @mn: The maple node type 925 * @offset: The location of the largest gap. 926 */ 927 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 928 unsigned char offset) 929 { 930 931 struct maple_metadata *meta = ma_meta(mn, mt); 932 933 meta->gap = offset; 934 } 935 936 /* 937 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 938 * @mat - the ma_topiary, a linked list of dead nodes. 939 * @dead_enode - the node to be marked as dead and added to the tail of the list 940 * 941 * Add the @dead_enode to the linked list in @mat. 942 */ 943 static inline void mat_add(struct ma_topiary *mat, 944 struct maple_enode *dead_enode) 945 { 946 mte_set_node_dead(dead_enode); 947 mte_to_mat(dead_enode)->next = NULL; 948 if (!mat->tail) { 949 mat->tail = mat->head = dead_enode; 950 return; 951 } 952 953 mte_to_mat(mat->tail)->next = dead_enode; 954 mat->tail = dead_enode; 955 } 956 957 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 958 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 959 960 /* 961 * mas_mat_free() - Free all nodes in a dead list. 962 * @mas - the maple state 963 * @mat - the ma_topiary linked list of dead nodes to free. 964 * 965 * Free walk a dead list. 966 */ 967 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 968 { 969 struct maple_enode *next; 970 971 while (mat->head) { 972 next = mte_to_mat(mat->head)->next; 973 mas_free(mas, mat->head); 974 mat->head = next; 975 } 976 } 977 978 /* 979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 980 * @mas - the maple state 981 * @mat - the ma_topiary linked list of dead nodes to free. 982 * 983 * Destroy walk a dead list. 984 */ 985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 986 { 987 struct maple_enode *next; 988 989 while (mat->head) { 990 next = mte_to_mat(mat->head)->next; 991 mte_destroy_walk(mat->head, mat->mtree); 992 mat->head = next; 993 } 994 } 995 /* 996 * mas_descend() - Descend into the slot stored in the ma_state. 997 * @mas - the maple state. 998 * 999 * Note: Not RCU safe, only use in write side or debug code. 1000 */ 1001 static inline void mas_descend(struct ma_state *mas) 1002 { 1003 enum maple_type type; 1004 unsigned long *pivots; 1005 struct maple_node *node; 1006 void __rcu **slots; 1007 1008 node = mas_mn(mas); 1009 type = mte_node_type(mas->node); 1010 pivots = ma_pivots(node, type); 1011 slots = ma_slots(node, type); 1012 1013 if (mas->offset) 1014 mas->min = pivots[mas->offset - 1] + 1; 1015 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1016 mas->node = mas_slot(mas, slots, mas->offset); 1017 } 1018 1019 /* 1020 * mte_set_gap() - Set a maple node gap. 1021 * @mn: The encoded maple node 1022 * @gap: The offset of the gap to set 1023 * @val: The gap value 1024 */ 1025 static inline void mte_set_gap(const struct maple_enode *mn, 1026 unsigned char gap, unsigned long val) 1027 { 1028 switch (mte_node_type(mn)) { 1029 default: 1030 break; 1031 case maple_arange_64: 1032 mte_to_node(mn)->ma64.gap[gap] = val; 1033 break; 1034 } 1035 } 1036 1037 /* 1038 * mas_ascend() - Walk up a level of the tree. 1039 * @mas: The maple state 1040 * 1041 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1042 * may cause several levels of walking up to find the correct min and max. 1043 * May find a dead node which will cause a premature return. 1044 * Return: 1 on dead node, 0 otherwise 1045 */ 1046 static int mas_ascend(struct ma_state *mas) 1047 { 1048 struct maple_enode *p_enode; /* parent enode. */ 1049 struct maple_enode *a_enode; /* ancestor enode. */ 1050 struct maple_node *a_node; /* ancestor node. */ 1051 struct maple_node *p_node; /* parent node. */ 1052 unsigned char a_slot; 1053 enum maple_type a_type; 1054 unsigned long min, max; 1055 unsigned long *pivots; 1056 unsigned char offset; 1057 bool set_max = false, set_min = false; 1058 1059 a_node = mas_mn(mas); 1060 if (ma_is_root(a_node)) { 1061 mas->offset = 0; 1062 return 0; 1063 } 1064 1065 p_node = mte_parent(mas->node); 1066 if (unlikely(a_node == p_node)) 1067 return 1; 1068 a_type = mas_parent_enum(mas, mas->node); 1069 offset = mte_parent_slot(mas->node); 1070 a_enode = mt_mk_node(p_node, a_type); 1071 1072 /* Check to make sure all parent information is still accurate */ 1073 if (p_node != mte_parent(mas->node)) 1074 return 1; 1075 1076 mas->node = a_enode; 1077 mas->offset = offset; 1078 1079 if (mte_is_root(a_enode)) { 1080 mas->max = ULONG_MAX; 1081 mas->min = 0; 1082 return 0; 1083 } 1084 1085 min = 0; 1086 max = ULONG_MAX; 1087 do { 1088 p_enode = a_enode; 1089 a_type = mas_parent_enum(mas, p_enode); 1090 a_node = mte_parent(p_enode); 1091 a_slot = mte_parent_slot(p_enode); 1092 pivots = ma_pivots(a_node, a_type); 1093 a_enode = mt_mk_node(a_node, a_type); 1094 1095 if (!set_min && a_slot) { 1096 set_min = true; 1097 min = pivots[a_slot - 1] + 1; 1098 } 1099 1100 if (!set_max && a_slot < mt_pivots[a_type]) { 1101 set_max = true; 1102 max = pivots[a_slot]; 1103 } 1104 1105 if (unlikely(ma_dead_node(a_node))) 1106 return 1; 1107 1108 if (unlikely(ma_is_root(a_node))) 1109 break; 1110 1111 } while (!set_min || !set_max); 1112 1113 mas->max = max; 1114 mas->min = min; 1115 return 0; 1116 } 1117 1118 /* 1119 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1120 * @mas: The maple state 1121 * 1122 * Return: A pointer to a maple node. 1123 */ 1124 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1125 { 1126 struct maple_alloc *ret, *node = mas->alloc; 1127 unsigned long total = mas_allocated(mas); 1128 unsigned int req = mas_alloc_req(mas); 1129 1130 /* nothing or a request pending. */ 1131 if (WARN_ON(!total)) 1132 return NULL; 1133 1134 if (total == 1) { 1135 /* single allocation in this ma_state */ 1136 mas->alloc = NULL; 1137 ret = node; 1138 goto single_node; 1139 } 1140 1141 if (node->node_count == 1) { 1142 /* Single allocation in this node. */ 1143 mas->alloc = node->slot[0]; 1144 mas->alloc->total = node->total - 1; 1145 ret = node; 1146 goto new_head; 1147 } 1148 node->total--; 1149 ret = node->slot[--node->node_count]; 1150 node->slot[node->node_count] = NULL; 1151 1152 single_node: 1153 new_head: 1154 if (req) { 1155 req++; 1156 mas_set_alloc_req(mas, req); 1157 } 1158 1159 memset(ret, 0, sizeof(*ret)); 1160 return (struct maple_node *)ret; 1161 } 1162 1163 /* 1164 * mas_push_node() - Push a node back on the maple state allocation. 1165 * @mas: The maple state 1166 * @used: The used maple node 1167 * 1168 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1169 * requested node count as necessary. 1170 */ 1171 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1172 { 1173 struct maple_alloc *reuse = (struct maple_alloc *)used; 1174 struct maple_alloc *head = mas->alloc; 1175 unsigned long count; 1176 unsigned int requested = mas_alloc_req(mas); 1177 1178 count = mas_allocated(mas); 1179 1180 reuse->request_count = 0; 1181 reuse->node_count = 0; 1182 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { 1183 head->slot[head->node_count++] = reuse; 1184 head->total++; 1185 goto done; 1186 } 1187 1188 reuse->total = 1; 1189 if ((head) && !((unsigned long)head & 0x1)) { 1190 reuse->slot[0] = head; 1191 reuse->node_count = 1; 1192 reuse->total += head->total; 1193 } 1194 1195 mas->alloc = reuse; 1196 done: 1197 if (requested > 1) 1198 mas_set_alloc_req(mas, requested - 1); 1199 } 1200 1201 /* 1202 * mas_alloc_nodes() - Allocate nodes into a maple state 1203 * @mas: The maple state 1204 * @gfp: The GFP Flags 1205 */ 1206 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1207 { 1208 struct maple_alloc *node; 1209 unsigned long allocated = mas_allocated(mas); 1210 unsigned int requested = mas_alloc_req(mas); 1211 unsigned int count; 1212 void **slots = NULL; 1213 unsigned int max_req = 0; 1214 1215 if (!requested) 1216 return; 1217 1218 mas_set_alloc_req(mas, 0); 1219 if (mas->mas_flags & MA_STATE_PREALLOC) { 1220 if (allocated) 1221 return; 1222 WARN_ON(!allocated); 1223 } 1224 1225 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1226 node = (struct maple_alloc *)mt_alloc_one(gfp); 1227 if (!node) 1228 goto nomem_one; 1229 1230 if (allocated) { 1231 node->slot[0] = mas->alloc; 1232 node->node_count = 1; 1233 } else { 1234 node->node_count = 0; 1235 } 1236 1237 mas->alloc = node; 1238 node->total = ++allocated; 1239 requested--; 1240 } 1241 1242 node = mas->alloc; 1243 node->request_count = 0; 1244 while (requested) { 1245 max_req = MAPLE_ALLOC_SLOTS; 1246 if (node->node_count) { 1247 unsigned int offset = node->node_count; 1248 1249 slots = (void **)&node->slot[offset]; 1250 max_req -= offset; 1251 } else { 1252 slots = (void **)&node->slot; 1253 } 1254 1255 max_req = min(requested, max_req); 1256 count = mt_alloc_bulk(gfp, max_req, slots); 1257 if (!count) 1258 goto nomem_bulk; 1259 1260 node->node_count += count; 1261 allocated += count; 1262 node = node->slot[0]; 1263 node->node_count = 0; 1264 node->request_count = 0; 1265 requested -= count; 1266 } 1267 mas->alloc->total = allocated; 1268 return; 1269 1270 nomem_bulk: 1271 /* Clean up potential freed allocations on bulk failure */ 1272 memset(slots, 0, max_req * sizeof(unsigned long)); 1273 nomem_one: 1274 mas_set_alloc_req(mas, requested); 1275 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1276 mas->alloc->total = allocated; 1277 mas_set_err(mas, -ENOMEM); 1278 } 1279 1280 /* 1281 * mas_free() - Free an encoded maple node 1282 * @mas: The maple state 1283 * @used: The encoded maple node to free. 1284 * 1285 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1286 * otherwise. 1287 */ 1288 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1289 { 1290 struct maple_node *tmp = mte_to_node(used); 1291 1292 if (mt_in_rcu(mas->tree)) 1293 ma_free_rcu(tmp); 1294 else 1295 mas_push_node(mas, tmp); 1296 } 1297 1298 /* 1299 * mas_node_count() - Check if enough nodes are allocated and request more if 1300 * there is not enough nodes. 1301 * @mas: The maple state 1302 * @count: The number of nodes needed 1303 * @gfp: the gfp flags 1304 */ 1305 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1306 { 1307 unsigned long allocated = mas_allocated(mas); 1308 1309 if (allocated < count) { 1310 mas_set_alloc_req(mas, count - allocated); 1311 mas_alloc_nodes(mas, gfp); 1312 } 1313 } 1314 1315 /* 1316 * mas_node_count() - Check if enough nodes are allocated and request more if 1317 * there is not enough nodes. 1318 * @mas: The maple state 1319 * @count: The number of nodes needed 1320 * 1321 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1322 */ 1323 static void mas_node_count(struct ma_state *mas, int count) 1324 { 1325 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1326 } 1327 1328 /* 1329 * mas_start() - Sets up maple state for operations. 1330 * @mas: The maple state. 1331 * 1332 * If mas->node == MAS_START, then set the min, max and depth to 1333 * defaults. 1334 * 1335 * Return: 1336 * - If mas->node is an error or not MAS_START, return NULL. 1337 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1338 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1339 * - If it's a tree: NULL & mas->node == safe root node. 1340 */ 1341 static inline struct maple_enode *mas_start(struct ma_state *mas) 1342 { 1343 if (likely(mas_is_start(mas))) { 1344 struct maple_enode *root; 1345 1346 mas->min = 0; 1347 mas->max = ULONG_MAX; 1348 mas->depth = 0; 1349 1350 root = mas_root(mas); 1351 /* Tree with nodes */ 1352 if (likely(xa_is_node(root))) { 1353 mas->depth = 1; 1354 mas->node = mte_safe_root(root); 1355 mas->offset = 0; 1356 return NULL; 1357 } 1358 1359 /* empty tree */ 1360 if (unlikely(!root)) { 1361 mas->node = MAS_NONE; 1362 mas->offset = MAPLE_NODE_SLOTS; 1363 return NULL; 1364 } 1365 1366 /* Single entry tree */ 1367 mas->node = MAS_ROOT; 1368 mas->offset = MAPLE_NODE_SLOTS; 1369 1370 /* Single entry tree. */ 1371 if (mas->index > 0) 1372 return NULL; 1373 1374 return root; 1375 } 1376 1377 return NULL; 1378 } 1379 1380 /* 1381 * ma_data_end() - Find the end of the data in a node. 1382 * @node: The maple node 1383 * @type: The maple node type 1384 * @pivots: The array of pivots in the node 1385 * @max: The maximum value in the node 1386 * 1387 * Uses metadata to find the end of the data when possible. 1388 * Return: The zero indexed last slot with data (may be null). 1389 */ 1390 static inline unsigned char ma_data_end(struct maple_node *node, 1391 enum maple_type type, 1392 unsigned long *pivots, 1393 unsigned long max) 1394 { 1395 unsigned char offset; 1396 1397 if (type == maple_arange_64) 1398 return ma_meta_end(node, type); 1399 1400 offset = mt_pivots[type] - 1; 1401 if (likely(!pivots[offset])) 1402 return ma_meta_end(node, type); 1403 1404 if (likely(pivots[offset] == max)) 1405 return offset; 1406 1407 return mt_pivots[type]; 1408 } 1409 1410 /* 1411 * mas_data_end() - Find the end of the data (slot). 1412 * @mas: the maple state 1413 * 1414 * This method is optimized to check the metadata of a node if the node type 1415 * supports data end metadata. 1416 * 1417 * Return: The zero indexed last slot with data (may be null). 1418 */ 1419 static inline unsigned char mas_data_end(struct ma_state *mas) 1420 { 1421 enum maple_type type; 1422 struct maple_node *node; 1423 unsigned char offset; 1424 unsigned long *pivots; 1425 1426 type = mte_node_type(mas->node); 1427 node = mas_mn(mas); 1428 if (type == maple_arange_64) 1429 return ma_meta_end(node, type); 1430 1431 pivots = ma_pivots(node, type); 1432 offset = mt_pivots[type] - 1; 1433 if (likely(!pivots[offset])) 1434 return ma_meta_end(node, type); 1435 1436 if (likely(pivots[offset] == mas->max)) 1437 return offset; 1438 1439 return mt_pivots[type]; 1440 } 1441 1442 /* 1443 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1444 * @mas - the maple state 1445 * 1446 * Return: The maximum gap in the leaf. 1447 */ 1448 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1449 { 1450 enum maple_type mt; 1451 unsigned long pstart, gap, max_gap; 1452 struct maple_node *mn; 1453 unsigned long *pivots; 1454 void __rcu **slots; 1455 unsigned char i; 1456 unsigned char max_piv; 1457 1458 mt = mte_node_type(mas->node); 1459 mn = mas_mn(mas); 1460 slots = ma_slots(mn, mt); 1461 max_gap = 0; 1462 if (unlikely(ma_is_dense(mt))) { 1463 gap = 0; 1464 for (i = 0; i < mt_slots[mt]; i++) { 1465 if (slots[i]) { 1466 if (gap > max_gap) 1467 max_gap = gap; 1468 gap = 0; 1469 } else { 1470 gap++; 1471 } 1472 } 1473 if (gap > max_gap) 1474 max_gap = gap; 1475 return max_gap; 1476 } 1477 1478 /* 1479 * Check the first implied pivot optimizes the loop below and slot 1 may 1480 * be skipped if there is a gap in slot 0. 1481 */ 1482 pivots = ma_pivots(mn, mt); 1483 if (likely(!slots[0])) { 1484 max_gap = pivots[0] - mas->min + 1; 1485 i = 2; 1486 } else { 1487 i = 1; 1488 } 1489 1490 /* reduce max_piv as the special case is checked before the loop */ 1491 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1492 /* 1493 * Check end implied pivot which can only be a gap on the right most 1494 * node. 1495 */ 1496 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1497 gap = ULONG_MAX - pivots[max_piv]; 1498 if (gap > max_gap) 1499 max_gap = gap; 1500 } 1501 1502 for (; i <= max_piv; i++) { 1503 /* data == no gap. */ 1504 if (likely(slots[i])) 1505 continue; 1506 1507 pstart = pivots[i - 1]; 1508 gap = pivots[i] - pstart; 1509 if (gap > max_gap) 1510 max_gap = gap; 1511 1512 /* There cannot be two gaps in a row. */ 1513 i++; 1514 } 1515 return max_gap; 1516 } 1517 1518 /* 1519 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1520 * @node: The maple node 1521 * @gaps: The pointer to the gaps 1522 * @mt: The maple node type 1523 * @*off: Pointer to store the offset location of the gap. 1524 * 1525 * Uses the metadata data end to scan backwards across set gaps. 1526 * 1527 * Return: The maximum gap value 1528 */ 1529 static inline unsigned long 1530 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1531 unsigned char *off) 1532 { 1533 unsigned char offset, i; 1534 unsigned long max_gap = 0; 1535 1536 i = offset = ma_meta_end(node, mt); 1537 do { 1538 if (gaps[i] > max_gap) { 1539 max_gap = gaps[i]; 1540 offset = i; 1541 } 1542 } while (i--); 1543 1544 *off = offset; 1545 return max_gap; 1546 } 1547 1548 /* 1549 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1550 * @mas: The maple state. 1551 * 1552 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. 1553 * 1554 * Return: The gap value. 1555 */ 1556 static inline unsigned long mas_max_gap(struct ma_state *mas) 1557 { 1558 unsigned long *gaps; 1559 unsigned char offset; 1560 enum maple_type mt; 1561 struct maple_node *node; 1562 1563 mt = mte_node_type(mas->node); 1564 if (ma_is_leaf(mt)) 1565 return mas_leaf_max_gap(mas); 1566 1567 node = mas_mn(mas); 1568 offset = ma_meta_gap(node, mt); 1569 if (offset == MAPLE_ARANGE64_META_MAX) 1570 return 0; 1571 1572 gaps = ma_gaps(node, mt); 1573 return gaps[offset]; 1574 } 1575 1576 /* 1577 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1578 * @mas: The maple state 1579 * @offset: The gap offset in the parent to set 1580 * @new: The new gap value. 1581 * 1582 * Set the parent gap then continue to set the gap upwards, using the metadata 1583 * of the parent to see if it is necessary to check the node above. 1584 */ 1585 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1586 unsigned long new) 1587 { 1588 unsigned long meta_gap = 0; 1589 struct maple_node *pnode; 1590 struct maple_enode *penode; 1591 unsigned long *pgaps; 1592 unsigned char meta_offset; 1593 enum maple_type pmt; 1594 1595 pnode = mte_parent(mas->node); 1596 pmt = mas_parent_enum(mas, mas->node); 1597 penode = mt_mk_node(pnode, pmt); 1598 pgaps = ma_gaps(pnode, pmt); 1599 1600 ascend: 1601 meta_offset = ma_meta_gap(pnode, pmt); 1602 if (meta_offset == MAPLE_ARANGE64_META_MAX) 1603 meta_gap = 0; 1604 else 1605 meta_gap = pgaps[meta_offset]; 1606 1607 pgaps[offset] = new; 1608 1609 if (meta_gap == new) 1610 return; 1611 1612 if (offset != meta_offset) { 1613 if (meta_gap > new) 1614 return; 1615 1616 ma_set_meta_gap(pnode, pmt, offset); 1617 } else if (new < meta_gap) { 1618 meta_offset = 15; 1619 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1620 ma_set_meta_gap(pnode, pmt, meta_offset); 1621 } 1622 1623 if (ma_is_root(pnode)) 1624 return; 1625 1626 /* Go to the parent node. */ 1627 pnode = mte_parent(penode); 1628 pmt = mas_parent_enum(mas, penode); 1629 pgaps = ma_gaps(pnode, pmt); 1630 offset = mte_parent_slot(penode); 1631 penode = mt_mk_node(pnode, pmt); 1632 goto ascend; 1633 } 1634 1635 /* 1636 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1637 * @mas - the maple state. 1638 */ 1639 static inline void mas_update_gap(struct ma_state *mas) 1640 { 1641 unsigned char pslot; 1642 unsigned long p_gap; 1643 unsigned long max_gap; 1644 1645 if (!mt_is_alloc(mas->tree)) 1646 return; 1647 1648 if (mte_is_root(mas->node)) 1649 return; 1650 1651 max_gap = mas_max_gap(mas); 1652 1653 pslot = mte_parent_slot(mas->node); 1654 p_gap = ma_gaps(mte_parent(mas->node), 1655 mas_parent_enum(mas, mas->node))[pslot]; 1656 1657 if (p_gap != max_gap) 1658 mas_parent_gap(mas, pslot, max_gap); 1659 } 1660 1661 /* 1662 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1663 * @parent with the slot encoded. 1664 * @mas - the maple state (for the tree) 1665 * @parent - the maple encoded node containing the children. 1666 */ 1667 static inline void mas_adopt_children(struct ma_state *mas, 1668 struct maple_enode *parent) 1669 { 1670 enum maple_type type = mte_node_type(parent); 1671 struct maple_node *node = mas_mn(mas); 1672 void __rcu **slots = ma_slots(node, type); 1673 unsigned long *pivots = ma_pivots(node, type); 1674 struct maple_enode *child; 1675 unsigned char offset; 1676 1677 offset = ma_data_end(node, type, pivots, mas->max); 1678 do { 1679 child = mas_slot_locked(mas, slots, offset); 1680 mte_set_parent(child, parent, offset); 1681 } while (offset--); 1682 } 1683 1684 /* 1685 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1686 * parent encoding to locate the maple node in the tree. 1687 * @mas - the ma_state to use for operations. 1688 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1689 * leave the node (true) and handle the adoption and free elsewhere. 1690 */ 1691 static inline void mas_replace(struct ma_state *mas, bool advanced) 1692 __must_hold(mas->tree->lock) 1693 { 1694 struct maple_node *mn = mas_mn(mas); 1695 struct maple_enode *old_enode; 1696 unsigned char offset = 0; 1697 void __rcu **slots = NULL; 1698 1699 if (ma_is_root(mn)) { 1700 old_enode = mas_root_locked(mas); 1701 } else { 1702 offset = mte_parent_slot(mas->node); 1703 slots = ma_slots(mte_parent(mas->node), 1704 mas_parent_enum(mas, mas->node)); 1705 old_enode = mas_slot_locked(mas, slots, offset); 1706 } 1707 1708 if (!advanced && !mte_is_leaf(mas->node)) 1709 mas_adopt_children(mas, mas->node); 1710 1711 if (mte_is_root(mas->node)) { 1712 mn->parent = ma_parent_ptr( 1713 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1714 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1715 mas_set_height(mas); 1716 } else { 1717 rcu_assign_pointer(slots[offset], mas->node); 1718 } 1719 1720 if (!advanced) 1721 mas_free(mas, old_enode); 1722 } 1723 1724 /* 1725 * mas_new_child() - Find the new child of a node. 1726 * @mas: the maple state 1727 * @child: the maple state to store the child. 1728 */ 1729 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1730 __must_hold(mas->tree->lock) 1731 { 1732 enum maple_type mt; 1733 unsigned char offset; 1734 unsigned char end; 1735 unsigned long *pivots; 1736 struct maple_enode *entry; 1737 struct maple_node *node; 1738 void __rcu **slots; 1739 1740 mt = mte_node_type(mas->node); 1741 node = mas_mn(mas); 1742 slots = ma_slots(node, mt); 1743 pivots = ma_pivots(node, mt); 1744 end = ma_data_end(node, mt, pivots, mas->max); 1745 for (offset = mas->offset; offset <= end; offset++) { 1746 entry = mas_slot_locked(mas, slots, offset); 1747 if (mte_parent(entry) == node) { 1748 *child = *mas; 1749 mas->offset = offset + 1; 1750 child->offset = offset; 1751 mas_descend(child); 1752 child->offset = 0; 1753 return true; 1754 } 1755 } 1756 return false; 1757 } 1758 1759 /* 1760 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1761 * old data or set b_node->b_end. 1762 * @b_node: the maple_big_node 1763 * @shift: the shift count 1764 */ 1765 static inline void mab_shift_right(struct maple_big_node *b_node, 1766 unsigned char shift) 1767 { 1768 unsigned long size = b_node->b_end * sizeof(unsigned long); 1769 1770 memmove(b_node->pivot + shift, b_node->pivot, size); 1771 memmove(b_node->slot + shift, b_node->slot, size); 1772 if (b_node->type == maple_arange_64) 1773 memmove(b_node->gap + shift, b_node->gap, size); 1774 } 1775 1776 /* 1777 * mab_middle_node() - Check if a middle node is needed (unlikely) 1778 * @b_node: the maple_big_node that contains the data. 1779 * @size: the amount of data in the b_node 1780 * @split: the potential split location 1781 * @slot_count: the size that can be stored in a single node being considered. 1782 * 1783 * Return: true if a middle node is required. 1784 */ 1785 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1786 unsigned char slot_count) 1787 { 1788 unsigned char size = b_node->b_end; 1789 1790 if (size >= 2 * slot_count) 1791 return true; 1792 1793 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1794 return true; 1795 1796 return false; 1797 } 1798 1799 /* 1800 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1801 * @b_node: the maple_big_node with the data 1802 * @split: the suggested split location 1803 * @slot_count: the number of slots in the node being considered. 1804 * 1805 * Return: the split location. 1806 */ 1807 static inline int mab_no_null_split(struct maple_big_node *b_node, 1808 unsigned char split, unsigned char slot_count) 1809 { 1810 if (!b_node->slot[split]) { 1811 /* 1812 * If the split is less than the max slot && the right side will 1813 * still be sufficient, then increment the split on NULL. 1814 */ 1815 if ((split < slot_count - 1) && 1816 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1817 split++; 1818 else 1819 split--; 1820 } 1821 return split; 1822 } 1823 1824 /* 1825 * mab_calc_split() - Calculate the split location and if there needs to be two 1826 * splits. 1827 * @bn: The maple_big_node with the data 1828 * @mid_split: The second split, if required. 0 otherwise. 1829 * 1830 * Return: The first split location. The middle split is set in @mid_split. 1831 */ 1832 static inline int mab_calc_split(struct ma_state *mas, 1833 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1834 { 1835 unsigned char b_end = bn->b_end; 1836 int split = b_end / 2; /* Assume equal split. */ 1837 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1838 1839 /* 1840 * To support gap tracking, all NULL entries are kept together and a node cannot 1841 * end on a NULL entry, with the exception of the left-most leaf. The 1842 * limitation means that the split of a node must be checked for this condition 1843 * and be able to put more data in one direction or the other. 1844 */ 1845 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1846 *mid_split = 0; 1847 split = b_end - mt_min_slots[bn->type]; 1848 1849 if (!ma_is_leaf(bn->type)) 1850 return split; 1851 1852 mas->mas_flags |= MA_STATE_REBALANCE; 1853 if (!bn->slot[split]) 1854 split--; 1855 return split; 1856 } 1857 1858 /* 1859 * Although extremely rare, it is possible to enter what is known as the 3-way 1860 * split scenario. The 3-way split comes about by means of a store of a range 1861 * that overwrites the end and beginning of two full nodes. The result is a set 1862 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1863 * also be located in different parent nodes which are also full. This can 1864 * carry upwards all the way to the root in the worst case. 1865 */ 1866 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1867 split = b_end / 3; 1868 *mid_split = split * 2; 1869 } else { 1870 slot_min = mt_min_slots[bn->type]; 1871 1872 *mid_split = 0; 1873 /* 1874 * Avoid having a range less than the slot count unless it 1875 * causes one node to be deficient. 1876 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1877 */ 1878 while (((bn->pivot[split] - min) < slot_count - 1) && 1879 (split < slot_count - 1) && (b_end - split > slot_min)) 1880 split++; 1881 } 1882 1883 /* Avoid ending a node on a NULL entry */ 1884 split = mab_no_null_split(bn, split, slot_count); 1885 1886 if (unlikely(*mid_split)) 1887 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1888 1889 return split; 1890 } 1891 1892 /* 1893 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1894 * and set @b_node->b_end to the next free slot. 1895 * @mas: The maple state 1896 * @mas_start: The starting slot to copy 1897 * @mas_end: The end slot to copy (inclusively) 1898 * @b_node: The maple_big_node to place the data 1899 * @mab_start: The starting location in maple_big_node to store the data. 1900 */ 1901 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1902 unsigned char mas_end, struct maple_big_node *b_node, 1903 unsigned char mab_start) 1904 { 1905 enum maple_type mt; 1906 struct maple_node *node; 1907 void __rcu **slots; 1908 unsigned long *pivots, *gaps; 1909 int i = mas_start, j = mab_start; 1910 unsigned char piv_end; 1911 1912 node = mas_mn(mas); 1913 mt = mte_node_type(mas->node); 1914 pivots = ma_pivots(node, mt); 1915 if (!i) { 1916 b_node->pivot[j] = pivots[i++]; 1917 if (unlikely(i > mas_end)) 1918 goto complete; 1919 j++; 1920 } 1921 1922 piv_end = min(mas_end, mt_pivots[mt]); 1923 for (; i < piv_end; i++, j++) { 1924 b_node->pivot[j] = pivots[i]; 1925 if (unlikely(!b_node->pivot[j])) 1926 break; 1927 1928 if (unlikely(mas->max == b_node->pivot[j])) 1929 goto complete; 1930 } 1931 1932 if (likely(i <= mas_end)) 1933 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1934 1935 complete: 1936 b_node->b_end = ++j; 1937 j -= mab_start; 1938 slots = ma_slots(node, mt); 1939 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1940 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1941 gaps = ma_gaps(node, mt); 1942 memcpy(b_node->gap + mab_start, gaps + mas_start, 1943 sizeof(unsigned long) * j); 1944 } 1945 } 1946 1947 /* 1948 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1949 * @mas: The maple state 1950 * @node: The maple node 1951 * @pivots: pointer to the maple node pivots 1952 * @mt: The maple type 1953 * @end: The assumed end 1954 * 1955 * Note, end may be incremented within this function but not modified at the 1956 * source. This is fine since the metadata is the last thing to be stored in a 1957 * node during a write. 1958 */ 1959 static inline void mas_leaf_set_meta(struct ma_state *mas, 1960 struct maple_node *node, unsigned long *pivots, 1961 enum maple_type mt, unsigned char end) 1962 { 1963 /* There is no room for metadata already */ 1964 if (mt_pivots[mt] <= end) 1965 return; 1966 1967 if (pivots[end] && pivots[end] < mas->max) 1968 end++; 1969 1970 if (end < mt_slots[mt] - 1) 1971 ma_set_meta(node, mt, 0, end); 1972 } 1973 1974 /* 1975 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 1976 * @b_node: the maple_big_node that has the data 1977 * @mab_start: the start location in @b_node. 1978 * @mab_end: The end location in @b_node (inclusively) 1979 * @mas: The maple state with the maple encoded node. 1980 */ 1981 static inline void mab_mas_cp(struct maple_big_node *b_node, 1982 unsigned char mab_start, unsigned char mab_end, 1983 struct ma_state *mas, bool new_max) 1984 { 1985 int i, j = 0; 1986 enum maple_type mt = mte_node_type(mas->node); 1987 struct maple_node *node = mte_to_node(mas->node); 1988 void __rcu **slots = ma_slots(node, mt); 1989 unsigned long *pivots = ma_pivots(node, mt); 1990 unsigned long *gaps = NULL; 1991 unsigned char end; 1992 1993 if (mab_end - mab_start > mt_pivots[mt]) 1994 mab_end--; 1995 1996 if (!pivots[mt_pivots[mt] - 1]) 1997 slots[mt_pivots[mt]] = NULL; 1998 1999 i = mab_start; 2000 do { 2001 pivots[j++] = b_node->pivot[i++]; 2002 } while (i <= mab_end && likely(b_node->pivot[i])); 2003 2004 memcpy(slots, b_node->slot + mab_start, 2005 sizeof(void *) * (i - mab_start)); 2006 2007 if (new_max) 2008 mas->max = b_node->pivot[i - 1]; 2009 2010 end = j - 1; 2011 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2012 unsigned long max_gap = 0; 2013 unsigned char offset = 15; 2014 2015 gaps = ma_gaps(node, mt); 2016 do { 2017 gaps[--j] = b_node->gap[--i]; 2018 if (gaps[j] > max_gap) { 2019 offset = j; 2020 max_gap = gaps[j]; 2021 } 2022 } while (j); 2023 2024 ma_set_meta(node, mt, offset, end); 2025 } else { 2026 mas_leaf_set_meta(mas, node, pivots, mt, end); 2027 } 2028 } 2029 2030 /* 2031 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2032 * @mas: the maple state with the maple encoded node of the sub-tree. 2033 * 2034 * Descend through a sub-tree and adopt children who do not have the correct 2035 * parents set. Follow the parents which have the correct parents as they are 2036 * the new entries which need to be followed to find other incorrectly set 2037 * parents. 2038 */ 2039 static inline void mas_descend_adopt(struct ma_state *mas) 2040 { 2041 struct ma_state list[3], next[3]; 2042 int i, n; 2043 2044 /* 2045 * At each level there may be up to 3 correct parent pointers which indicates 2046 * the new nodes which need to be walked to find any new nodes at a lower level. 2047 */ 2048 2049 for (i = 0; i < 3; i++) { 2050 list[i] = *mas; 2051 list[i].offset = 0; 2052 next[i].offset = 0; 2053 } 2054 next[0] = *mas; 2055 2056 while (!mte_is_leaf(list[0].node)) { 2057 n = 0; 2058 for (i = 0; i < 3; i++) { 2059 if (mas_is_none(&list[i])) 2060 continue; 2061 2062 if (i && list[i-1].node == list[i].node) 2063 continue; 2064 2065 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2066 n++; 2067 2068 mas_adopt_children(&list[i], list[i].node); 2069 } 2070 2071 while (n < 3) 2072 next[n++].node = MAS_NONE; 2073 2074 /* descend by setting the list to the children */ 2075 for (i = 0; i < 3; i++) 2076 list[i] = next[i]; 2077 } 2078 } 2079 2080 /* 2081 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2082 * @mas: The maple state 2083 * @end: The maple node end 2084 * @mt: The maple node type 2085 */ 2086 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2087 enum maple_type mt) 2088 { 2089 if (!(mas->mas_flags & MA_STATE_BULK)) 2090 return; 2091 2092 if (mte_is_root(mas->node)) 2093 return; 2094 2095 if (end > mt_min_slots[mt]) { 2096 mas->mas_flags &= ~MA_STATE_REBALANCE; 2097 return; 2098 } 2099 } 2100 2101 /* 2102 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2103 * data from a maple encoded node. 2104 * @wr_mas: the maple write state 2105 * @b_node: the maple_big_node to fill with data 2106 * @offset_end: the offset to end copying 2107 * 2108 * Return: The actual end of the data stored in @b_node 2109 */ 2110 static inline void mas_store_b_node(struct ma_wr_state *wr_mas, 2111 struct maple_big_node *b_node, unsigned char offset_end) 2112 { 2113 unsigned char slot; 2114 unsigned char b_end; 2115 /* Possible underflow of piv will wrap back to 0 before use. */ 2116 unsigned long piv; 2117 struct ma_state *mas = wr_mas->mas; 2118 2119 b_node->type = wr_mas->type; 2120 b_end = 0; 2121 slot = mas->offset; 2122 if (slot) { 2123 /* Copy start data up to insert. */ 2124 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2125 b_end = b_node->b_end; 2126 piv = b_node->pivot[b_end - 1]; 2127 } else 2128 piv = mas->min - 1; 2129 2130 if (piv + 1 < mas->index) { 2131 /* Handle range starting after old range */ 2132 b_node->slot[b_end] = wr_mas->content; 2133 if (!wr_mas->content) 2134 b_node->gap[b_end] = mas->index - 1 - piv; 2135 b_node->pivot[b_end++] = mas->index - 1; 2136 } 2137 2138 /* Store the new entry. */ 2139 mas->offset = b_end; 2140 b_node->slot[b_end] = wr_mas->entry; 2141 b_node->pivot[b_end] = mas->last; 2142 2143 /* Appended. */ 2144 if (mas->last >= mas->max) 2145 goto b_end; 2146 2147 /* Handle new range ending before old range ends */ 2148 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2149 if (piv > mas->last) { 2150 if (piv == ULONG_MAX) 2151 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2152 2153 if (offset_end != slot) 2154 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2155 offset_end); 2156 2157 b_node->slot[++b_end] = wr_mas->content; 2158 if (!wr_mas->content) 2159 b_node->gap[b_end] = piv - mas->last + 1; 2160 b_node->pivot[b_end] = piv; 2161 } 2162 2163 slot = offset_end + 1; 2164 if (slot > wr_mas->node_end) 2165 goto b_end; 2166 2167 /* Copy end data to the end of the node. */ 2168 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2169 b_node->b_end--; 2170 return; 2171 2172 b_end: 2173 b_node->b_end = b_end; 2174 } 2175 2176 /* 2177 * mas_prev_sibling() - Find the previous node with the same parent. 2178 * @mas: the maple state 2179 * 2180 * Return: True if there is a previous sibling, false otherwise. 2181 */ 2182 static inline bool mas_prev_sibling(struct ma_state *mas) 2183 { 2184 unsigned int p_slot = mte_parent_slot(mas->node); 2185 2186 if (mte_is_root(mas->node)) 2187 return false; 2188 2189 if (!p_slot) 2190 return false; 2191 2192 mas_ascend(mas); 2193 mas->offset = p_slot - 1; 2194 mas_descend(mas); 2195 return true; 2196 } 2197 2198 /* 2199 * mas_next_sibling() - Find the next node with the same parent. 2200 * @mas: the maple state 2201 * 2202 * Return: true if there is a next sibling, false otherwise. 2203 */ 2204 static inline bool mas_next_sibling(struct ma_state *mas) 2205 { 2206 MA_STATE(parent, mas->tree, mas->index, mas->last); 2207 2208 if (mte_is_root(mas->node)) 2209 return false; 2210 2211 parent = *mas; 2212 mas_ascend(&parent); 2213 parent.offset = mte_parent_slot(mas->node) + 1; 2214 if (parent.offset > mas_data_end(&parent)) 2215 return false; 2216 2217 *mas = parent; 2218 mas_descend(mas); 2219 return true; 2220 } 2221 2222 /* 2223 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2224 * @enode: The encoded maple node. 2225 * 2226 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2227 * 2228 * Return: @enode or MAS_NONE 2229 */ 2230 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2231 { 2232 if (enode) 2233 return enode; 2234 2235 return ma_enode_ptr(MAS_NONE); 2236 } 2237 2238 /* 2239 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2240 * @wr_mas: The maple write state 2241 * 2242 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2243 */ 2244 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2245 { 2246 struct ma_state *mas = wr_mas->mas; 2247 unsigned char count; 2248 unsigned char offset; 2249 unsigned long index, min, max; 2250 2251 if (unlikely(ma_is_dense(wr_mas->type))) { 2252 wr_mas->r_max = wr_mas->r_min = mas->index; 2253 mas->offset = mas->index = mas->min; 2254 return; 2255 } 2256 2257 wr_mas->node = mas_mn(wr_mas->mas); 2258 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2259 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2260 wr_mas->pivots, mas->max); 2261 offset = mas->offset; 2262 min = mas_safe_min(mas, wr_mas->pivots, offset); 2263 if (unlikely(offset == count)) 2264 goto max; 2265 2266 max = wr_mas->pivots[offset]; 2267 index = mas->index; 2268 if (unlikely(index <= max)) 2269 goto done; 2270 2271 if (unlikely(!max && offset)) 2272 goto max; 2273 2274 min = max + 1; 2275 while (++offset < count) { 2276 max = wr_mas->pivots[offset]; 2277 if (index <= max) 2278 goto done; 2279 else if (unlikely(!max)) 2280 break; 2281 2282 min = max + 1; 2283 } 2284 2285 max: 2286 max = mas->max; 2287 done: 2288 wr_mas->r_max = max; 2289 wr_mas->r_min = min; 2290 wr_mas->offset_end = mas->offset = offset; 2291 } 2292 2293 /* 2294 * mas_topiary_range() - Add a range of slots to the topiary. 2295 * @mas: The maple state 2296 * @destroy: The topiary to add the slots (usually destroy) 2297 * @start: The starting slot inclusively 2298 * @end: The end slot inclusively 2299 */ 2300 static inline void mas_topiary_range(struct ma_state *mas, 2301 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2302 { 2303 void __rcu **slots; 2304 unsigned char offset; 2305 2306 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); 2307 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2308 for (offset = start; offset <= end; offset++) { 2309 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2310 2311 if (mte_dead_node(enode)) 2312 continue; 2313 2314 mat_add(destroy, enode); 2315 } 2316 } 2317 2318 /* 2319 * mast_topiary() - Add the portions of the tree to the removal list; either to 2320 * be freed or discarded (destroy walk). 2321 * @mast: The maple_subtree_state. 2322 */ 2323 static inline void mast_topiary(struct maple_subtree_state *mast) 2324 { 2325 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2326 unsigned char r_start, r_end; 2327 unsigned char l_start, l_end; 2328 void __rcu **l_slots, **r_slots; 2329 2330 wr_mas.type = mte_node_type(mast->orig_l->node); 2331 mast->orig_l->index = mast->orig_l->last; 2332 mas_wr_node_walk(&wr_mas); 2333 l_start = mast->orig_l->offset + 1; 2334 l_end = mas_data_end(mast->orig_l); 2335 r_start = 0; 2336 r_end = mast->orig_r->offset; 2337 2338 if (r_end) 2339 r_end--; 2340 2341 l_slots = ma_slots(mas_mn(mast->orig_l), 2342 mte_node_type(mast->orig_l->node)); 2343 2344 r_slots = ma_slots(mas_mn(mast->orig_r), 2345 mte_node_type(mast->orig_r->node)); 2346 2347 if ((l_start < l_end) && 2348 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2349 l_start++; 2350 } 2351 2352 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2353 if (r_end) 2354 r_end--; 2355 } 2356 2357 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2358 return; 2359 2360 /* At the node where left and right sides meet, add the parts between */ 2361 if (mast->orig_l->node == mast->orig_r->node) { 2362 return mas_topiary_range(mast->orig_l, mast->destroy, 2363 l_start, r_end); 2364 } 2365 2366 /* mast->orig_r is different and consumed. */ 2367 if (mte_is_leaf(mast->orig_r->node)) 2368 return; 2369 2370 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2371 l_end--; 2372 2373 2374 if (l_start <= l_end) 2375 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2376 2377 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2378 r_start++; 2379 2380 if (r_start <= r_end) 2381 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2382 } 2383 2384 /* 2385 * mast_rebalance_next() - Rebalance against the next node 2386 * @mast: The maple subtree state 2387 * @old_r: The encoded maple node to the right (next node). 2388 */ 2389 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2390 { 2391 unsigned char b_end = mast->bn->b_end; 2392 2393 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2394 mast->bn, b_end); 2395 mast->orig_r->last = mast->orig_r->max; 2396 } 2397 2398 /* 2399 * mast_rebalance_prev() - Rebalance against the previous node 2400 * @mast: The maple subtree state 2401 * @old_l: The encoded maple node to the left (previous node) 2402 */ 2403 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2404 { 2405 unsigned char end = mas_data_end(mast->orig_l) + 1; 2406 unsigned char b_end = mast->bn->b_end; 2407 2408 mab_shift_right(mast->bn, end); 2409 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2410 mast->l->min = mast->orig_l->min; 2411 mast->orig_l->index = mast->orig_l->min; 2412 mast->bn->b_end = end + b_end; 2413 mast->l->offset += end; 2414 } 2415 2416 /* 2417 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2418 * the node to the right. Checking the nodes to the right then the left at each 2419 * level upwards until root is reached. Free and destroy as needed. 2420 * Data is copied into the @mast->bn. 2421 * @mast: The maple_subtree_state. 2422 */ 2423 static inline 2424 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2425 { 2426 struct ma_state r_tmp = *mast->orig_r; 2427 struct ma_state l_tmp = *mast->orig_l; 2428 struct maple_enode *ancestor = NULL; 2429 unsigned char start, end; 2430 unsigned char depth = 0; 2431 2432 r_tmp = *mast->orig_r; 2433 l_tmp = *mast->orig_l; 2434 do { 2435 mas_ascend(mast->orig_r); 2436 mas_ascend(mast->orig_l); 2437 depth++; 2438 if (!ancestor && 2439 (mast->orig_r->node == mast->orig_l->node)) { 2440 ancestor = mast->orig_r->node; 2441 end = mast->orig_r->offset - 1; 2442 start = mast->orig_l->offset + 1; 2443 } 2444 2445 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2446 if (!ancestor) { 2447 ancestor = mast->orig_r->node; 2448 start = 0; 2449 } 2450 2451 mast->orig_r->offset++; 2452 do { 2453 mas_descend(mast->orig_r); 2454 mast->orig_r->offset = 0; 2455 depth--; 2456 } while (depth); 2457 2458 mast_rebalance_next(mast); 2459 do { 2460 unsigned char l_off = 0; 2461 struct maple_enode *child = r_tmp.node; 2462 2463 mas_ascend(&r_tmp); 2464 if (ancestor == r_tmp.node) 2465 l_off = start; 2466 2467 if (r_tmp.offset) 2468 r_tmp.offset--; 2469 2470 if (l_off < r_tmp.offset) 2471 mas_topiary_range(&r_tmp, mast->destroy, 2472 l_off, r_tmp.offset); 2473 2474 if (l_tmp.node != child) 2475 mat_add(mast->free, child); 2476 2477 } while (r_tmp.node != ancestor); 2478 2479 *mast->orig_l = l_tmp; 2480 return true; 2481 2482 } else if (mast->orig_l->offset != 0) { 2483 if (!ancestor) { 2484 ancestor = mast->orig_l->node; 2485 end = mas_data_end(mast->orig_l); 2486 } 2487 2488 mast->orig_l->offset--; 2489 do { 2490 mas_descend(mast->orig_l); 2491 mast->orig_l->offset = 2492 mas_data_end(mast->orig_l); 2493 depth--; 2494 } while (depth); 2495 2496 mast_rebalance_prev(mast); 2497 do { 2498 unsigned char r_off; 2499 struct maple_enode *child = l_tmp.node; 2500 2501 mas_ascend(&l_tmp); 2502 if (ancestor == l_tmp.node) 2503 r_off = end; 2504 else 2505 r_off = mas_data_end(&l_tmp); 2506 2507 if (l_tmp.offset < r_off) 2508 l_tmp.offset++; 2509 2510 if (l_tmp.offset < r_off) 2511 mas_topiary_range(&l_tmp, mast->destroy, 2512 l_tmp.offset, r_off); 2513 2514 if (r_tmp.node != child) 2515 mat_add(mast->free, child); 2516 2517 } while (l_tmp.node != ancestor); 2518 2519 *mast->orig_r = r_tmp; 2520 return true; 2521 } 2522 } while (!mte_is_root(mast->orig_r->node)); 2523 2524 *mast->orig_r = r_tmp; 2525 *mast->orig_l = l_tmp; 2526 return false; 2527 } 2528 2529 /* 2530 * mast_ascend_free() - Add current original maple state nodes to the free list 2531 * and ascend. 2532 * @mast: the maple subtree state. 2533 * 2534 * Ascend the original left and right sides and add the previous nodes to the 2535 * free list. Set the slots to point to the correct location in the new nodes. 2536 */ 2537 static inline void 2538 mast_ascend_free(struct maple_subtree_state *mast) 2539 { 2540 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2541 struct maple_enode *left = mast->orig_l->node; 2542 struct maple_enode *right = mast->orig_r->node; 2543 2544 mas_ascend(mast->orig_l); 2545 mas_ascend(mast->orig_r); 2546 mat_add(mast->free, left); 2547 2548 if (left != right) 2549 mat_add(mast->free, right); 2550 2551 mast->orig_r->offset = 0; 2552 mast->orig_r->index = mast->r->max; 2553 /* last should be larger than or equal to index */ 2554 if (mast->orig_r->last < mast->orig_r->index) 2555 mast->orig_r->last = mast->orig_r->index; 2556 /* 2557 * The node may not contain the value so set slot to ensure all 2558 * of the nodes contents are freed or destroyed. 2559 */ 2560 wr_mas.type = mte_node_type(mast->orig_r->node); 2561 mas_wr_node_walk(&wr_mas); 2562 /* Set up the left side of things */ 2563 mast->orig_l->offset = 0; 2564 mast->orig_l->index = mast->l->min; 2565 wr_mas.mas = mast->orig_l; 2566 wr_mas.type = mte_node_type(mast->orig_l->node); 2567 mas_wr_node_walk(&wr_mas); 2568 2569 mast->bn->type = wr_mas.type; 2570 } 2571 2572 /* 2573 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2574 * @mas: the maple state with the allocations. 2575 * @b_node: the maple_big_node with the type encoding. 2576 * 2577 * Use the node type from the maple_big_node to allocate a new node from the 2578 * ma_state. This function exists mainly for code readability. 2579 * 2580 * Return: A new maple encoded node 2581 */ 2582 static inline struct maple_enode 2583 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2584 { 2585 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2586 } 2587 2588 /* 2589 * mas_mab_to_node() - Set up right and middle nodes 2590 * 2591 * @mas: the maple state that contains the allocations. 2592 * @b_node: the node which contains the data. 2593 * @left: The pointer which will have the left node 2594 * @right: The pointer which may have the right node 2595 * @middle: the pointer which may have the middle node (rare) 2596 * @mid_split: the split location for the middle node 2597 * 2598 * Return: the split of left. 2599 */ 2600 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2601 struct maple_big_node *b_node, struct maple_enode **left, 2602 struct maple_enode **right, struct maple_enode **middle, 2603 unsigned char *mid_split, unsigned long min) 2604 { 2605 unsigned char split = 0; 2606 unsigned char slot_count = mt_slots[b_node->type]; 2607 2608 *left = mas_new_ma_node(mas, b_node); 2609 *right = NULL; 2610 *middle = NULL; 2611 *mid_split = 0; 2612 2613 if (b_node->b_end < slot_count) { 2614 split = b_node->b_end; 2615 } else { 2616 split = mab_calc_split(mas, b_node, mid_split, min); 2617 *right = mas_new_ma_node(mas, b_node); 2618 } 2619 2620 if (*mid_split) 2621 *middle = mas_new_ma_node(mas, b_node); 2622 2623 return split; 2624 2625 } 2626 2627 /* 2628 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2629 * pointer. 2630 * @b_node - the big node to add the entry 2631 * @mas - the maple state to get the pivot (mas->max) 2632 * @entry - the entry to add, if NULL nothing happens. 2633 */ 2634 static inline void mab_set_b_end(struct maple_big_node *b_node, 2635 struct ma_state *mas, 2636 void *entry) 2637 { 2638 if (!entry) 2639 return; 2640 2641 b_node->slot[b_node->b_end] = entry; 2642 if (mt_is_alloc(mas->tree)) 2643 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2644 b_node->pivot[b_node->b_end++] = mas->max; 2645 } 2646 2647 /* 2648 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2649 * of @mas->node to either @left or @right, depending on @slot and @split 2650 * 2651 * @mas - the maple state with the node that needs a parent 2652 * @left - possible parent 1 2653 * @right - possible parent 2 2654 * @slot - the slot the mas->node was placed 2655 * @split - the split location between @left and @right 2656 */ 2657 static inline void mas_set_split_parent(struct ma_state *mas, 2658 struct maple_enode *left, 2659 struct maple_enode *right, 2660 unsigned char *slot, unsigned char split) 2661 { 2662 if (mas_is_none(mas)) 2663 return; 2664 2665 if ((*slot) <= split) 2666 mte_set_parent(mas->node, left, *slot); 2667 else if (right) 2668 mte_set_parent(mas->node, right, (*slot) - split - 1); 2669 2670 (*slot)++; 2671 } 2672 2673 /* 2674 * mte_mid_split_check() - Check if the next node passes the mid-split 2675 * @**l: Pointer to left encoded maple node. 2676 * @**m: Pointer to middle encoded maple node. 2677 * @**r: Pointer to right encoded maple node. 2678 * @slot: The offset 2679 * @*split: The split location. 2680 * @mid_split: The middle split. 2681 */ 2682 static inline void mte_mid_split_check(struct maple_enode **l, 2683 struct maple_enode **r, 2684 struct maple_enode *right, 2685 unsigned char slot, 2686 unsigned char *split, 2687 unsigned char mid_split) 2688 { 2689 if (*r == right) 2690 return; 2691 2692 if (slot < mid_split) 2693 return; 2694 2695 *l = *r; 2696 *r = right; 2697 *split = mid_split; 2698 } 2699 2700 /* 2701 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2702 * is taken from @mast->l. 2703 * @mast - the maple subtree state 2704 * @left - the left node 2705 * @right - the right node 2706 * @split - the split location. 2707 */ 2708 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2709 struct maple_enode *left, 2710 struct maple_enode *middle, 2711 struct maple_enode *right, 2712 unsigned char split, 2713 unsigned char mid_split) 2714 { 2715 unsigned char slot; 2716 struct maple_enode *l = left; 2717 struct maple_enode *r = right; 2718 2719 if (mas_is_none(mast->l)) 2720 return; 2721 2722 if (middle) 2723 r = middle; 2724 2725 slot = mast->l->offset; 2726 2727 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2728 mas_set_split_parent(mast->l, l, r, &slot, split); 2729 2730 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2731 mas_set_split_parent(mast->m, l, r, &slot, split); 2732 2733 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2734 mas_set_split_parent(mast->r, l, r, &slot, split); 2735 } 2736 2737 /* 2738 * mas_wmb_replace() - Write memory barrier and replace 2739 * @mas: The maple state 2740 * @free: the maple topiary list of nodes to free 2741 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2742 * 2743 * Updates gap as necessary. 2744 */ 2745 static inline void mas_wmb_replace(struct ma_state *mas, 2746 struct ma_topiary *free, 2747 struct ma_topiary *destroy) 2748 { 2749 /* All nodes must see old data as dead prior to replacing that data */ 2750 smp_wmb(); /* Needed for RCU */ 2751 2752 /* Insert the new data in the tree */ 2753 mas_replace(mas, true); 2754 2755 if (!mte_is_leaf(mas->node)) 2756 mas_descend_adopt(mas); 2757 2758 mas_mat_free(mas, free); 2759 2760 if (destroy) 2761 mas_mat_destroy(mas, destroy); 2762 2763 if (mte_is_leaf(mas->node)) 2764 return; 2765 2766 mas_update_gap(mas); 2767 } 2768 2769 /* 2770 * mast_new_root() - Set a new tree root during subtree creation 2771 * @mast: The maple subtree state 2772 * @mas: The maple state 2773 */ 2774 static inline void mast_new_root(struct maple_subtree_state *mast, 2775 struct ma_state *mas) 2776 { 2777 mas_mn(mast->l)->parent = 2778 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2779 if (!mte_dead_node(mast->orig_l->node) && 2780 !mte_is_root(mast->orig_l->node)) { 2781 do { 2782 mast_ascend_free(mast); 2783 mast_topiary(mast); 2784 } while (!mte_is_root(mast->orig_l->node)); 2785 } 2786 if ((mast->orig_l->node != mas->node) && 2787 (mast->l->depth > mas_mt_height(mas))) { 2788 mat_add(mast->free, mas->node); 2789 } 2790 } 2791 2792 /* 2793 * mast_cp_to_nodes() - Copy data out to nodes. 2794 * @mast: The maple subtree state 2795 * @left: The left encoded maple node 2796 * @middle: The middle encoded maple node 2797 * @right: The right encoded maple node 2798 * @split: The location to split between left and (middle ? middle : right) 2799 * @mid_split: The location to split between middle and right. 2800 */ 2801 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2802 struct maple_enode *left, struct maple_enode *middle, 2803 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2804 { 2805 bool new_lmax = true; 2806 2807 mast->l->node = mte_node_or_none(left); 2808 mast->m->node = mte_node_or_none(middle); 2809 mast->r->node = mte_node_or_none(right); 2810 2811 mast->l->min = mast->orig_l->min; 2812 if (split == mast->bn->b_end) { 2813 mast->l->max = mast->orig_r->max; 2814 new_lmax = false; 2815 } 2816 2817 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2818 2819 if (middle) { 2820 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2821 mast->m->min = mast->bn->pivot[split] + 1; 2822 split = mid_split; 2823 } 2824 2825 mast->r->max = mast->orig_r->max; 2826 if (right) { 2827 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2828 mast->r->min = mast->bn->pivot[split] + 1; 2829 } 2830 } 2831 2832 /* 2833 * mast_combine_cp_left - Copy in the original left side of the tree into the 2834 * combined data set in the maple subtree state big node. 2835 * @mast: The maple subtree state 2836 */ 2837 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2838 { 2839 unsigned char l_slot = mast->orig_l->offset; 2840 2841 if (!l_slot) 2842 return; 2843 2844 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2845 } 2846 2847 /* 2848 * mast_combine_cp_right: Copy in the original right side of the tree into the 2849 * combined data set in the maple subtree state big node. 2850 * @mast: The maple subtree state 2851 */ 2852 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2853 { 2854 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2855 return; 2856 2857 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2858 mt_slot_count(mast->orig_r->node), mast->bn, 2859 mast->bn->b_end); 2860 mast->orig_r->last = mast->orig_r->max; 2861 } 2862 2863 /* 2864 * mast_sufficient: Check if the maple subtree state has enough data in the big 2865 * node to create at least one sufficient node 2866 * @mast: the maple subtree state 2867 */ 2868 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2869 { 2870 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2871 return true; 2872 2873 return false; 2874 } 2875 2876 /* 2877 * mast_overflow: Check if there is too much data in the subtree state for a 2878 * single node. 2879 * @mast: The maple subtree state 2880 */ 2881 static inline bool mast_overflow(struct maple_subtree_state *mast) 2882 { 2883 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2884 return true; 2885 2886 return false; 2887 } 2888 2889 static inline void *mtree_range_walk(struct ma_state *mas) 2890 { 2891 unsigned long *pivots; 2892 unsigned char offset; 2893 struct maple_node *node; 2894 struct maple_enode *next, *last; 2895 enum maple_type type; 2896 void __rcu **slots; 2897 unsigned char end; 2898 unsigned long max, min; 2899 unsigned long prev_max, prev_min; 2900 2901 next = mas->node; 2902 min = mas->min; 2903 max = mas->max; 2904 do { 2905 offset = 0; 2906 last = next; 2907 node = mte_to_node(next); 2908 type = mte_node_type(next); 2909 pivots = ma_pivots(node, type); 2910 end = ma_data_end(node, type, pivots, max); 2911 if (unlikely(ma_dead_node(node))) 2912 goto dead_node; 2913 2914 if (pivots[offset] >= mas->index) { 2915 prev_max = max; 2916 prev_min = min; 2917 max = pivots[offset]; 2918 goto next; 2919 } 2920 2921 do { 2922 offset++; 2923 } while ((offset < end) && (pivots[offset] < mas->index)); 2924 2925 prev_min = min; 2926 min = pivots[offset - 1] + 1; 2927 prev_max = max; 2928 if (likely(offset < end && pivots[offset])) 2929 max = pivots[offset]; 2930 2931 next: 2932 slots = ma_slots(node, type); 2933 next = mt_slot(mas->tree, slots, offset); 2934 if (unlikely(ma_dead_node(node))) 2935 goto dead_node; 2936 } while (!ma_is_leaf(type)); 2937 2938 mas->offset = offset; 2939 mas->index = min; 2940 mas->last = max; 2941 mas->min = prev_min; 2942 mas->max = prev_max; 2943 mas->node = last; 2944 return (void *)next; 2945 2946 dead_node: 2947 mas_reset(mas); 2948 return NULL; 2949 } 2950 2951 /* 2952 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2953 * @mas: The starting maple state 2954 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2955 * @count: The estimated count of iterations needed. 2956 * 2957 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2958 * is hit. First @b_node is split into two entries which are inserted into the 2959 * next iteration of the loop. @b_node is returned populated with the final 2960 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2961 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2962 * to account of what has been copied into the new sub-tree. The update of 2963 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2964 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2965 * the new sub-tree in case the sub-tree becomes the full tree. 2966 * 2967 * Return: the number of elements in b_node during the last loop. 2968 */ 2969 static int mas_spanning_rebalance(struct ma_state *mas, 2970 struct maple_subtree_state *mast, unsigned char count) 2971 { 2972 unsigned char split, mid_split; 2973 unsigned char slot = 0; 2974 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2975 2976 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2977 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2978 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2979 MA_TOPIARY(free, mas->tree); 2980 MA_TOPIARY(destroy, mas->tree); 2981 2982 /* 2983 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2984 * Rebalancing is done by use of the ``struct maple_topiary``. 2985 */ 2986 mast->l = &l_mas; 2987 mast->m = &m_mas; 2988 mast->r = &r_mas; 2989 mast->free = &free; 2990 mast->destroy = &destroy; 2991 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 2992 2993 /* Check if this is not root and has sufficient data. */ 2994 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 2995 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 2996 mast_spanning_rebalance(mast); 2997 2998 mast->orig_l->depth = 0; 2999 3000 /* 3001 * Each level of the tree is examined and balanced, pushing data to the left or 3002 * right, or rebalancing against left or right nodes is employed to avoid 3003 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3004 * the tree is created, there may be a mix of new and old nodes. The old nodes 3005 * will have the incorrect parent pointers and currently be in two trees: the 3006 * original tree and the partially new tree. To remedy the parent pointers in 3007 * the old tree, the new data is swapped into the active tree and a walk down 3008 * the tree is performed and the parent pointers are updated. 3009 * See mas_descend_adopt() for more information.. 3010 */ 3011 while (count--) { 3012 mast->bn->b_end--; 3013 mast->bn->type = mte_node_type(mast->orig_l->node); 3014 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3015 &mid_split, mast->orig_l->min); 3016 mast_set_split_parents(mast, left, middle, right, split, 3017 mid_split); 3018 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3019 3020 /* 3021 * Copy data from next level in the tree to mast->bn from next 3022 * iteration 3023 */ 3024 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3025 mast->bn->type = mte_node_type(left); 3026 mast->orig_l->depth++; 3027 3028 /* Root already stored in l->node. */ 3029 if (mas_is_root_limits(mast->l)) 3030 goto new_root; 3031 3032 mast_ascend_free(mast); 3033 mast_combine_cp_left(mast); 3034 l_mas.offset = mast->bn->b_end; 3035 mab_set_b_end(mast->bn, &l_mas, left); 3036 mab_set_b_end(mast->bn, &m_mas, middle); 3037 mab_set_b_end(mast->bn, &r_mas, right); 3038 3039 /* Copy anything necessary out of the right node. */ 3040 mast_combine_cp_right(mast); 3041 mast_topiary(mast); 3042 mast->orig_l->last = mast->orig_l->max; 3043 3044 if (mast_sufficient(mast)) 3045 continue; 3046 3047 if (mast_overflow(mast)) 3048 continue; 3049 3050 /* May be a new root stored in mast->bn */ 3051 if (mas_is_root_limits(mast->orig_l)) 3052 break; 3053 3054 mast_spanning_rebalance(mast); 3055 3056 /* rebalancing from other nodes may require another loop. */ 3057 if (!count) 3058 count++; 3059 } 3060 3061 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3062 mte_node_type(mast->orig_l->node)); 3063 mast->orig_l->depth++; 3064 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3065 mte_set_parent(left, l_mas.node, slot); 3066 if (middle) 3067 mte_set_parent(middle, l_mas.node, ++slot); 3068 3069 if (right) 3070 mte_set_parent(right, l_mas.node, ++slot); 3071 3072 if (mas_is_root_limits(mast->l)) { 3073 new_root: 3074 mast_new_root(mast, mas); 3075 } else { 3076 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3077 } 3078 3079 if (!mte_dead_node(mast->orig_l->node)) 3080 mat_add(&free, mast->orig_l->node); 3081 3082 mas->depth = mast->orig_l->depth; 3083 *mast->orig_l = l_mas; 3084 mte_set_node_dead(mas->node); 3085 3086 /* Set up mas for insertion. */ 3087 mast->orig_l->depth = mas->depth; 3088 mast->orig_l->alloc = mas->alloc; 3089 *mas = *mast->orig_l; 3090 mas_wmb_replace(mas, &free, &destroy); 3091 mtree_range_walk(mas); 3092 return mast->bn->b_end; 3093 } 3094 3095 /* 3096 * mas_rebalance() - Rebalance a given node. 3097 * @mas: The maple state 3098 * @b_node: The big maple node. 3099 * 3100 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3101 * Continue upwards until tree is sufficient. 3102 * 3103 * Return: the number of elements in b_node during the last loop. 3104 */ 3105 static inline int mas_rebalance(struct ma_state *mas, 3106 struct maple_big_node *b_node) 3107 { 3108 char empty_count = mas_mt_height(mas); 3109 struct maple_subtree_state mast; 3110 unsigned char shift, b_end = ++b_node->b_end; 3111 3112 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3113 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3114 3115 trace_ma_op(__func__, mas); 3116 3117 /* 3118 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3119 * against the node to the right if it exists, otherwise the node to the 3120 * left of this node is rebalanced against this node. If rebalancing 3121 * causes just one node to be produced instead of two, then the parent 3122 * is also examined and rebalanced if it is insufficient. Every level 3123 * tries to combine the data in the same way. If one node contains the 3124 * entire range of the tree, then that node is used as a new root node. 3125 */ 3126 mas_node_count(mas, 1 + empty_count * 3); 3127 if (mas_is_err(mas)) 3128 return 0; 3129 3130 mast.orig_l = &l_mas; 3131 mast.orig_r = &r_mas; 3132 mast.bn = b_node; 3133 mast.bn->type = mte_node_type(mas->node); 3134 3135 l_mas = r_mas = *mas; 3136 3137 if (mas_next_sibling(&r_mas)) { 3138 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3139 r_mas.last = r_mas.index = r_mas.max; 3140 } else { 3141 mas_prev_sibling(&l_mas); 3142 shift = mas_data_end(&l_mas) + 1; 3143 mab_shift_right(b_node, shift); 3144 mas->offset += shift; 3145 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3146 b_node->b_end = shift + b_end; 3147 l_mas.index = l_mas.last = l_mas.min; 3148 } 3149 3150 return mas_spanning_rebalance(mas, &mast, empty_count); 3151 } 3152 3153 /* 3154 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3155 * state. 3156 * @mas: The maple state 3157 * @end: The end of the left-most node. 3158 * 3159 * During a mass-insert event (such as forking), it may be necessary to 3160 * rebalance the left-most node when it is not sufficient. 3161 */ 3162 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3163 { 3164 enum maple_type mt = mte_node_type(mas->node); 3165 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3166 struct maple_enode *eparent; 3167 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3168 void __rcu **l_slots, **slots; 3169 unsigned long *l_pivs, *pivs, gap; 3170 bool in_rcu = mt_in_rcu(mas->tree); 3171 3172 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3173 3174 l_mas = *mas; 3175 mas_prev_sibling(&l_mas); 3176 3177 /* set up node. */ 3178 if (in_rcu) { 3179 /* Allocate for both left and right as well as parent. */ 3180 mas_node_count(mas, 3); 3181 if (mas_is_err(mas)) 3182 return; 3183 3184 newnode = mas_pop_node(mas); 3185 } else { 3186 newnode = &reuse; 3187 } 3188 3189 node = mas_mn(mas); 3190 newnode->parent = node->parent; 3191 slots = ma_slots(newnode, mt); 3192 pivs = ma_pivots(newnode, mt); 3193 left = mas_mn(&l_mas); 3194 l_slots = ma_slots(left, mt); 3195 l_pivs = ma_pivots(left, mt); 3196 if (!l_slots[split]) 3197 split++; 3198 tmp = mas_data_end(&l_mas) - split; 3199 3200 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3201 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3202 pivs[tmp] = l_mas.max; 3203 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3204 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3205 3206 l_mas.max = l_pivs[split]; 3207 mas->min = l_mas.max + 1; 3208 eparent = mt_mk_node(mte_parent(l_mas.node), 3209 mas_parent_enum(&l_mas, l_mas.node)); 3210 tmp += end; 3211 if (!in_rcu) { 3212 unsigned char max_p = mt_pivots[mt]; 3213 unsigned char max_s = mt_slots[mt]; 3214 3215 if (tmp < max_p) 3216 memset(pivs + tmp, 0, 3217 sizeof(unsigned long *) * (max_p - tmp)); 3218 3219 if (tmp < mt_slots[mt]) 3220 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3221 3222 memcpy(node, newnode, sizeof(struct maple_node)); 3223 ma_set_meta(node, mt, 0, tmp - 1); 3224 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3225 l_pivs[split]); 3226 3227 /* Remove data from l_pivs. */ 3228 tmp = split + 1; 3229 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3230 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3231 ma_set_meta(left, mt, 0, split); 3232 3233 goto done; 3234 } 3235 3236 /* RCU requires replacing both l_mas, mas, and parent. */ 3237 mas->node = mt_mk_node(newnode, mt); 3238 ma_set_meta(newnode, mt, 0, tmp); 3239 3240 new_left = mas_pop_node(mas); 3241 new_left->parent = left->parent; 3242 mt = mte_node_type(l_mas.node); 3243 slots = ma_slots(new_left, mt); 3244 pivs = ma_pivots(new_left, mt); 3245 memcpy(slots, l_slots, sizeof(void *) * split); 3246 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3247 ma_set_meta(new_left, mt, 0, split); 3248 l_mas.node = mt_mk_node(new_left, mt); 3249 3250 /* replace parent. */ 3251 offset = mte_parent_slot(mas->node); 3252 mt = mas_parent_enum(&l_mas, l_mas.node); 3253 parent = mas_pop_node(mas); 3254 slots = ma_slots(parent, mt); 3255 pivs = ma_pivots(parent, mt); 3256 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3257 rcu_assign_pointer(slots[offset], mas->node); 3258 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3259 pivs[offset - 1] = l_mas.max; 3260 eparent = mt_mk_node(parent, mt); 3261 done: 3262 gap = mas_leaf_max_gap(mas); 3263 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3264 gap = mas_leaf_max_gap(&l_mas); 3265 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3266 mas_ascend(mas); 3267 3268 if (in_rcu) 3269 mas_replace(mas, false); 3270 3271 mas_update_gap(mas); 3272 } 3273 3274 /* 3275 * mas_split_final_node() - Split the final node in a subtree operation. 3276 * @mast: the maple subtree state 3277 * @mas: The maple state 3278 * @height: The height of the tree in case it's a new root. 3279 */ 3280 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3281 struct ma_state *mas, int height) 3282 { 3283 struct maple_enode *ancestor; 3284 3285 if (mte_is_root(mas->node)) { 3286 if (mt_is_alloc(mas->tree)) 3287 mast->bn->type = maple_arange_64; 3288 else 3289 mast->bn->type = maple_range_64; 3290 mas->depth = height; 3291 } 3292 /* 3293 * Only a single node is used here, could be root. 3294 * The Big_node data should just fit in a single node. 3295 */ 3296 ancestor = mas_new_ma_node(mas, mast->bn); 3297 mte_set_parent(mast->l->node, ancestor, mast->l->offset); 3298 mte_set_parent(mast->r->node, ancestor, mast->r->offset); 3299 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3300 3301 mast->l->node = ancestor; 3302 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3303 mas->offset = mast->bn->b_end - 1; 3304 return true; 3305 } 3306 3307 /* 3308 * mast_fill_bnode() - Copy data into the big node in the subtree state 3309 * @mast: The maple subtree state 3310 * @mas: the maple state 3311 * @skip: The number of entries to skip for new nodes insertion. 3312 */ 3313 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3314 struct ma_state *mas, 3315 unsigned char skip) 3316 { 3317 bool cp = true; 3318 struct maple_enode *old = mas->node; 3319 unsigned char split; 3320 3321 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3322 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3323 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3324 mast->bn->b_end = 0; 3325 3326 if (mte_is_root(mas->node)) { 3327 cp = false; 3328 } else { 3329 mas_ascend(mas); 3330 mat_add(mast->free, old); 3331 mas->offset = mte_parent_slot(mas->node); 3332 } 3333 3334 if (cp && mast->l->offset) 3335 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3336 3337 split = mast->bn->b_end; 3338 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3339 mast->r->offset = mast->bn->b_end; 3340 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3341 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3342 cp = false; 3343 3344 if (cp) 3345 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3346 mast->bn, mast->bn->b_end); 3347 3348 mast->bn->b_end--; 3349 mast->bn->type = mte_node_type(mas->node); 3350 } 3351 3352 /* 3353 * mast_split_data() - Split the data in the subtree state big node into regular 3354 * nodes. 3355 * @mast: The maple subtree state 3356 * @mas: The maple state 3357 * @split: The location to split the big node 3358 */ 3359 static inline void mast_split_data(struct maple_subtree_state *mast, 3360 struct ma_state *mas, unsigned char split) 3361 { 3362 unsigned char p_slot; 3363 3364 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3365 mte_set_pivot(mast->r->node, 0, mast->r->max); 3366 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3367 mast->l->offset = mte_parent_slot(mas->node); 3368 mast->l->max = mast->bn->pivot[split]; 3369 mast->r->min = mast->l->max + 1; 3370 if (mte_is_leaf(mas->node)) 3371 return; 3372 3373 p_slot = mast->orig_l->offset; 3374 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3375 &p_slot, split); 3376 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3377 &p_slot, split); 3378 } 3379 3380 /* 3381 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3382 * data to the right or left node if there is room. 3383 * @mas: The maple state 3384 * @height: The current height of the maple state 3385 * @mast: The maple subtree state 3386 * @left: Push left or not. 3387 * 3388 * Keeping the height of the tree low means faster lookups. 3389 * 3390 * Return: True if pushed, false otherwise. 3391 */ 3392 static inline bool mas_push_data(struct ma_state *mas, int height, 3393 struct maple_subtree_state *mast, bool left) 3394 { 3395 unsigned char slot_total = mast->bn->b_end; 3396 unsigned char end, space, split; 3397 3398 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3399 tmp_mas = *mas; 3400 tmp_mas.depth = mast->l->depth; 3401 3402 if (left && !mas_prev_sibling(&tmp_mas)) 3403 return false; 3404 else if (!left && !mas_next_sibling(&tmp_mas)) 3405 return false; 3406 3407 end = mas_data_end(&tmp_mas); 3408 slot_total += end; 3409 space = 2 * mt_slot_count(mas->node) - 2; 3410 /* -2 instead of -1 to ensure there isn't a triple split */ 3411 if (ma_is_leaf(mast->bn->type)) 3412 space--; 3413 3414 if (mas->max == ULONG_MAX) 3415 space--; 3416 3417 if (slot_total >= space) 3418 return false; 3419 3420 /* Get the data; Fill mast->bn */ 3421 mast->bn->b_end++; 3422 if (left) { 3423 mab_shift_right(mast->bn, end + 1); 3424 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3425 mast->bn->b_end = slot_total + 1; 3426 } else { 3427 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3428 } 3429 3430 /* Configure mast for splitting of mast->bn */ 3431 split = mt_slots[mast->bn->type] - 2; 3432 if (left) { 3433 /* Switch mas to prev node */ 3434 mat_add(mast->free, mas->node); 3435 *mas = tmp_mas; 3436 /* Start using mast->l for the left side. */ 3437 tmp_mas.node = mast->l->node; 3438 *mast->l = tmp_mas; 3439 } else { 3440 mat_add(mast->free, tmp_mas.node); 3441 tmp_mas.node = mast->r->node; 3442 *mast->r = tmp_mas; 3443 split = slot_total - split; 3444 } 3445 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3446 /* Update parent slot for split calculation. */ 3447 if (left) 3448 mast->orig_l->offset += end + 1; 3449 3450 mast_split_data(mast, mas, split); 3451 mast_fill_bnode(mast, mas, 2); 3452 mas_split_final_node(mast, mas, height + 1); 3453 return true; 3454 } 3455 3456 /* 3457 * mas_split() - Split data that is too big for one node into two. 3458 * @mas: The maple state 3459 * @b_node: The maple big node 3460 * Return: 1 on success, 0 on failure. 3461 */ 3462 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3463 { 3464 struct maple_subtree_state mast; 3465 int height = 0; 3466 unsigned char mid_split, split = 0; 3467 3468 /* 3469 * Splitting is handled differently from any other B-tree; the Maple 3470 * Tree splits upwards. Splitting up means that the split operation 3471 * occurs when the walk of the tree hits the leaves and not on the way 3472 * down. The reason for splitting up is that it is impossible to know 3473 * how much space will be needed until the leaf is (or leaves are) 3474 * reached. Since overwriting data is allowed and a range could 3475 * overwrite more than one range or result in changing one entry into 3 3476 * entries, it is impossible to know if a split is required until the 3477 * data is examined. 3478 * 3479 * Splitting is a balancing act between keeping allocations to a minimum 3480 * and avoiding a 'jitter' event where a tree is expanded to make room 3481 * for an entry followed by a contraction when the entry is removed. To 3482 * accomplish the balance, there are empty slots remaining in both left 3483 * and right nodes after a split. 3484 */ 3485 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3486 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3487 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3488 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3489 MA_TOPIARY(mat, mas->tree); 3490 3491 trace_ma_op(__func__, mas); 3492 mas->depth = mas_mt_height(mas); 3493 /* Allocation failures will happen early. */ 3494 mas_node_count(mas, 1 + mas->depth * 2); 3495 if (mas_is_err(mas)) 3496 return 0; 3497 3498 mast.l = &l_mas; 3499 mast.r = &r_mas; 3500 mast.orig_l = &prev_l_mas; 3501 mast.orig_r = &prev_r_mas; 3502 mast.free = &mat; 3503 mast.bn = b_node; 3504 3505 while (height++ <= mas->depth) { 3506 if (mt_slots[b_node->type] > b_node->b_end) { 3507 mas_split_final_node(&mast, mas, height); 3508 break; 3509 } 3510 3511 l_mas = r_mas = *mas; 3512 l_mas.node = mas_new_ma_node(mas, b_node); 3513 r_mas.node = mas_new_ma_node(mas, b_node); 3514 /* 3515 * Another way that 'jitter' is avoided is to terminate a split up early if the 3516 * left or right node has space to spare. This is referred to as "pushing left" 3517 * or "pushing right" and is similar to the B* tree, except the nodes left or 3518 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3519 * is a significant savings. 3520 */ 3521 /* Try to push left. */ 3522 if (mas_push_data(mas, height, &mast, true)) 3523 break; 3524 3525 /* Try to push right. */ 3526 if (mas_push_data(mas, height, &mast, false)) 3527 break; 3528 3529 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3530 mast_split_data(&mast, mas, split); 3531 /* 3532 * Usually correct, mab_mas_cp in the above call overwrites 3533 * r->max. 3534 */ 3535 mast.r->max = mas->max; 3536 mast_fill_bnode(&mast, mas, 1); 3537 prev_l_mas = *mast.l; 3538 prev_r_mas = *mast.r; 3539 } 3540 3541 /* Set the original node as dead */ 3542 mat_add(mast.free, mas->node); 3543 mas->node = l_mas.node; 3544 mas_wmb_replace(mas, mast.free, NULL); 3545 mtree_range_walk(mas); 3546 return 1; 3547 } 3548 3549 /* 3550 * mas_reuse_node() - Reuse the node to store the data. 3551 * @wr_mas: The maple write state 3552 * @bn: The maple big node 3553 * @end: The end of the data. 3554 * 3555 * Will always return false in RCU mode. 3556 * 3557 * Return: True if node was reused, false otherwise. 3558 */ 3559 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3560 struct maple_big_node *bn, unsigned char end) 3561 { 3562 /* Need to be rcu safe. */ 3563 if (mt_in_rcu(wr_mas->mas->tree)) 3564 return false; 3565 3566 if (end > bn->b_end) { 3567 int clear = mt_slots[wr_mas->type] - bn->b_end; 3568 3569 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3570 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3571 } 3572 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3573 return true; 3574 } 3575 3576 /* 3577 * mas_commit_b_node() - Commit the big node into the tree. 3578 * @wr_mas: The maple write state 3579 * @b_node: The maple big node 3580 * @end: The end of the data. 3581 */ 3582 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas, 3583 struct maple_big_node *b_node, unsigned char end) 3584 { 3585 struct maple_node *node; 3586 unsigned char b_end = b_node->b_end; 3587 enum maple_type b_type = b_node->type; 3588 3589 if ((b_end < mt_min_slots[b_type]) && 3590 (!mte_is_root(wr_mas->mas->node)) && 3591 (mas_mt_height(wr_mas->mas) > 1)) 3592 return mas_rebalance(wr_mas->mas, b_node); 3593 3594 if (b_end >= mt_slots[b_type]) 3595 return mas_split(wr_mas->mas, b_node); 3596 3597 if (mas_reuse_node(wr_mas, b_node, end)) 3598 goto reuse_node; 3599 3600 mas_node_count(wr_mas->mas, 1); 3601 if (mas_is_err(wr_mas->mas)) 3602 return 0; 3603 3604 node = mas_pop_node(wr_mas->mas); 3605 node->parent = mas_mn(wr_mas->mas)->parent; 3606 wr_mas->mas->node = mt_mk_node(node, b_type); 3607 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3608 mas_replace(wr_mas->mas, false); 3609 reuse_node: 3610 mas_update_gap(wr_mas->mas); 3611 return 1; 3612 } 3613 3614 /* 3615 * mas_root_expand() - Expand a root to a node 3616 * @mas: The maple state 3617 * @entry: The entry to store into the tree 3618 */ 3619 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3620 { 3621 void *contents = mas_root_locked(mas); 3622 enum maple_type type = maple_leaf_64; 3623 struct maple_node *node; 3624 void __rcu **slots; 3625 unsigned long *pivots; 3626 int slot = 0; 3627 3628 mas_node_count(mas, 1); 3629 if (unlikely(mas_is_err(mas))) 3630 return 0; 3631 3632 node = mas_pop_node(mas); 3633 pivots = ma_pivots(node, type); 3634 slots = ma_slots(node, type); 3635 node->parent = ma_parent_ptr( 3636 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3637 mas->node = mt_mk_node(node, type); 3638 3639 if (mas->index) { 3640 if (contents) { 3641 rcu_assign_pointer(slots[slot], contents); 3642 if (likely(mas->index > 1)) 3643 slot++; 3644 } 3645 pivots[slot++] = mas->index - 1; 3646 } 3647 3648 rcu_assign_pointer(slots[slot], entry); 3649 mas->offset = slot; 3650 pivots[slot] = mas->last; 3651 if (mas->last != ULONG_MAX) 3652 slot++; 3653 mas->depth = 1; 3654 mas_set_height(mas); 3655 3656 /* swap the new root into the tree */ 3657 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3658 ma_set_meta(node, maple_leaf_64, 0, slot); 3659 return slot; 3660 } 3661 3662 static inline void mas_store_root(struct ma_state *mas, void *entry) 3663 { 3664 if (likely((mas->last != 0) || (mas->index != 0))) 3665 mas_root_expand(mas, entry); 3666 else if (((unsigned long) (entry) & 3) == 2) 3667 mas_root_expand(mas, entry); 3668 else { 3669 rcu_assign_pointer(mas->tree->ma_root, entry); 3670 mas->node = MAS_START; 3671 } 3672 } 3673 3674 /* 3675 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3676 * spans the node. 3677 * @mas: The maple state 3678 * @piv: The pivot value being written 3679 * @type: The maple node type 3680 * @entry: The data to write 3681 * 3682 * Spanning writes are writes that start in one node and end in another OR if 3683 * the write of a %NULL will cause the node to end with a %NULL. 3684 * 3685 * Return: True if this is a spanning write, false otherwise. 3686 */ 3687 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3688 { 3689 unsigned long max; 3690 unsigned long last = wr_mas->mas->last; 3691 unsigned long piv = wr_mas->r_max; 3692 enum maple_type type = wr_mas->type; 3693 void *entry = wr_mas->entry; 3694 3695 /* Contained in this pivot */ 3696 if (piv > last) 3697 return false; 3698 3699 max = wr_mas->mas->max; 3700 if (unlikely(ma_is_leaf(type))) { 3701 /* Fits in the node, but may span slots. */ 3702 if (last < max) 3703 return false; 3704 3705 /* Writes to the end of the node but not null. */ 3706 if ((last == max) && entry) 3707 return false; 3708 3709 /* 3710 * Writing ULONG_MAX is not a spanning write regardless of the 3711 * value being written as long as the range fits in the node. 3712 */ 3713 if ((last == ULONG_MAX) && (last == max)) 3714 return false; 3715 } else if (piv == last) { 3716 if (entry) 3717 return false; 3718 3719 /* Detect spanning store wr walk */ 3720 if (last == ULONG_MAX) 3721 return false; 3722 } 3723 3724 trace_ma_write(__func__, wr_mas->mas, piv, entry); 3725 3726 return true; 3727 } 3728 3729 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3730 { 3731 wr_mas->type = mte_node_type(wr_mas->mas->node); 3732 mas_wr_node_walk(wr_mas); 3733 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3734 } 3735 3736 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3737 { 3738 wr_mas->mas->max = wr_mas->r_max; 3739 wr_mas->mas->min = wr_mas->r_min; 3740 wr_mas->mas->node = wr_mas->content; 3741 wr_mas->mas->offset = 0; 3742 wr_mas->mas->depth++; 3743 } 3744 /* 3745 * mas_wr_walk() - Walk the tree for a write. 3746 * @wr_mas: The maple write state 3747 * 3748 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3749 * 3750 * Return: True if it's contained in a node, false on spanning write. 3751 */ 3752 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3753 { 3754 struct ma_state *mas = wr_mas->mas; 3755 3756 while (true) { 3757 mas_wr_walk_descend(wr_mas); 3758 if (unlikely(mas_is_span_wr(wr_mas))) 3759 return false; 3760 3761 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3762 mas->offset); 3763 if (ma_is_leaf(wr_mas->type)) 3764 return true; 3765 3766 mas_wr_walk_traverse(wr_mas); 3767 } 3768 3769 return true; 3770 } 3771 3772 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3773 { 3774 struct ma_state *mas = wr_mas->mas; 3775 3776 while (true) { 3777 mas_wr_walk_descend(wr_mas); 3778 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3779 mas->offset); 3780 if (ma_is_leaf(wr_mas->type)) 3781 return true; 3782 mas_wr_walk_traverse(wr_mas); 3783 3784 } 3785 return true; 3786 } 3787 /* 3788 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3789 * @l_wr_mas: The left maple write state 3790 * @r_wr_mas: The right maple write state 3791 */ 3792 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3793 struct ma_wr_state *r_wr_mas) 3794 { 3795 struct ma_state *r_mas = r_wr_mas->mas; 3796 struct ma_state *l_mas = l_wr_mas->mas; 3797 unsigned char l_slot; 3798 3799 l_slot = l_mas->offset; 3800 if (!l_wr_mas->content) 3801 l_mas->index = l_wr_mas->r_min; 3802 3803 if ((l_mas->index == l_wr_mas->r_min) && 3804 (l_slot && 3805 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3806 if (l_slot > 1) 3807 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3808 else 3809 l_mas->index = l_mas->min; 3810 3811 l_mas->offset = l_slot - 1; 3812 } 3813 3814 if (!r_wr_mas->content) { 3815 if (r_mas->last < r_wr_mas->r_max) 3816 r_mas->last = r_wr_mas->r_max; 3817 r_mas->offset++; 3818 } else if ((r_mas->last == r_wr_mas->r_max) && 3819 (r_mas->last < r_mas->max) && 3820 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3821 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3822 r_wr_mas->type, r_mas->offset + 1); 3823 r_mas->offset++; 3824 } 3825 } 3826 3827 static inline void *mas_state_walk(struct ma_state *mas) 3828 { 3829 void *entry; 3830 3831 entry = mas_start(mas); 3832 if (mas_is_none(mas)) 3833 return NULL; 3834 3835 if (mas_is_ptr(mas)) 3836 return entry; 3837 3838 return mtree_range_walk(mas); 3839 } 3840 3841 /* 3842 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3843 * to date. 3844 * 3845 * @mas: The maple state. 3846 * 3847 * Note: Leaves mas in undesirable state. 3848 * Return: The entry for @mas->index or %NULL on dead node. 3849 */ 3850 static inline void *mtree_lookup_walk(struct ma_state *mas) 3851 { 3852 unsigned long *pivots; 3853 unsigned char offset; 3854 struct maple_node *node; 3855 struct maple_enode *next; 3856 enum maple_type type; 3857 void __rcu **slots; 3858 unsigned char end; 3859 unsigned long max; 3860 3861 next = mas->node; 3862 max = ULONG_MAX; 3863 do { 3864 offset = 0; 3865 node = mte_to_node(next); 3866 type = mte_node_type(next); 3867 pivots = ma_pivots(node, type); 3868 end = ma_data_end(node, type, pivots, max); 3869 if (unlikely(ma_dead_node(node))) 3870 goto dead_node; 3871 3872 if (pivots[offset] >= mas->index) 3873 goto next; 3874 3875 do { 3876 offset++; 3877 } while ((offset < end) && (pivots[offset] < mas->index)); 3878 3879 if (likely(offset > end)) 3880 max = pivots[offset]; 3881 3882 next: 3883 slots = ma_slots(node, type); 3884 next = mt_slot(mas->tree, slots, offset); 3885 if (unlikely(ma_dead_node(node))) 3886 goto dead_node; 3887 } while (!ma_is_leaf(type)); 3888 3889 return (void *)next; 3890 3891 dead_node: 3892 mas_reset(mas); 3893 return NULL; 3894 } 3895 3896 /* 3897 * mas_new_root() - Create a new root node that only contains the entry passed 3898 * in. 3899 * @mas: The maple state 3900 * @entry: The entry to store. 3901 * 3902 * Only valid when the index == 0 and the last == ULONG_MAX 3903 * 3904 * Return 0 on error, 1 on success. 3905 */ 3906 static inline int mas_new_root(struct ma_state *mas, void *entry) 3907 { 3908 struct maple_enode *root = mas_root_locked(mas); 3909 enum maple_type type = maple_leaf_64; 3910 struct maple_node *node; 3911 void __rcu **slots; 3912 unsigned long *pivots; 3913 3914 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3915 mas->depth = 0; 3916 mas_set_height(mas); 3917 rcu_assign_pointer(mas->tree->ma_root, entry); 3918 mas->node = MAS_START; 3919 goto done; 3920 } 3921 3922 mas_node_count(mas, 1); 3923 if (mas_is_err(mas)) 3924 return 0; 3925 3926 node = mas_pop_node(mas); 3927 pivots = ma_pivots(node, type); 3928 slots = ma_slots(node, type); 3929 node->parent = ma_parent_ptr( 3930 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3931 mas->node = mt_mk_node(node, type); 3932 rcu_assign_pointer(slots[0], entry); 3933 pivots[0] = mas->last; 3934 mas->depth = 1; 3935 mas_set_height(mas); 3936 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3937 3938 done: 3939 if (xa_is_node(root)) 3940 mte_destroy_walk(root, mas->tree); 3941 3942 return 1; 3943 } 3944 /* 3945 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3946 * and new nodes where necessary, then place the sub-tree in the actual tree. 3947 * Note that mas is expected to point to the node which caused the store to 3948 * span. 3949 * @wr_mas: The maple write state 3950 * 3951 * Return: 0 on error, positive on success. 3952 */ 3953 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3954 { 3955 struct maple_subtree_state mast; 3956 struct maple_big_node b_node; 3957 struct ma_state *mas; 3958 unsigned char height; 3959 3960 /* Left and Right side of spanning store */ 3961 MA_STATE(l_mas, NULL, 0, 0); 3962 MA_STATE(r_mas, NULL, 0, 0); 3963 3964 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3965 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3966 3967 /* 3968 * A store operation that spans multiple nodes is called a spanning 3969 * store and is handled early in the store call stack by the function 3970 * mas_is_span_wr(). When a spanning store is identified, the maple 3971 * state is duplicated. The first maple state walks the left tree path 3972 * to ``index``, the duplicate walks the right tree path to ``last``. 3973 * The data in the two nodes are combined into a single node, two nodes, 3974 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3975 * written to the last entry of a node is considered a spanning store as 3976 * a rebalance is required for the operation to complete and an overflow 3977 * of data may happen. 3978 */ 3979 mas = wr_mas->mas; 3980 trace_ma_op(__func__, mas); 3981 3982 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3983 return mas_new_root(mas, wr_mas->entry); 3984 /* 3985 * Node rebalancing may occur due to this store, so there may be three new 3986 * entries per level plus a new root. 3987 */ 3988 height = mas_mt_height(mas); 3989 mas_node_count(mas, 1 + height * 3); 3990 if (mas_is_err(mas)) 3991 return 0; 3992 3993 /* 3994 * Set up right side. Need to get to the next offset after the spanning 3995 * store to ensure it's not NULL and to combine both the next node and 3996 * the node with the start together. 3997 */ 3998 r_mas = *mas; 3999 /* Avoid overflow, walk to next slot in the tree. */ 4000 if (r_mas.last + 1) 4001 r_mas.last++; 4002 4003 r_mas.index = r_mas.last; 4004 mas_wr_walk_index(&r_wr_mas); 4005 r_mas.last = r_mas.index = mas->last; 4006 4007 /* Set up left side. */ 4008 l_mas = *mas; 4009 mas_wr_walk_index(&l_wr_mas); 4010 4011 if (!wr_mas->entry) { 4012 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4013 mas->offset = l_mas.offset; 4014 mas->index = l_mas.index; 4015 mas->last = l_mas.last = r_mas.last; 4016 } 4017 4018 /* expanding NULLs may make this cover the entire range */ 4019 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4020 mas_set_range(mas, 0, ULONG_MAX); 4021 return mas_new_root(mas, wr_mas->entry); 4022 } 4023 4024 memset(&b_node, 0, sizeof(struct maple_big_node)); 4025 /* Copy l_mas and store the value in b_node. */ 4026 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4027 /* Copy r_mas into b_node. */ 4028 if (r_mas.offset <= r_wr_mas.node_end) 4029 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4030 &b_node, b_node.b_end + 1); 4031 else 4032 b_node.b_end++; 4033 4034 /* Stop spanning searches by searching for just index. */ 4035 l_mas.index = l_mas.last = mas->index; 4036 4037 mast.bn = &b_node; 4038 mast.orig_l = &l_mas; 4039 mast.orig_r = &r_mas; 4040 /* Combine l_mas and r_mas and split them up evenly again. */ 4041 return mas_spanning_rebalance(mas, &mast, height + 1); 4042 } 4043 4044 /* 4045 * mas_wr_node_store() - Attempt to store the value in a node 4046 * @wr_mas: The maple write state 4047 * 4048 * Attempts to reuse the node, but may allocate. 4049 * 4050 * Return: True if stored, false otherwise 4051 */ 4052 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) 4053 { 4054 struct ma_state *mas = wr_mas->mas; 4055 void __rcu **dst_slots; 4056 unsigned long *dst_pivots; 4057 unsigned char dst_offset; 4058 unsigned char new_end = wr_mas->node_end; 4059 unsigned char offset; 4060 unsigned char node_slots = mt_slots[wr_mas->type]; 4061 struct maple_node reuse, *newnode; 4062 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; 4063 bool in_rcu = mt_in_rcu(mas->tree); 4064 4065 offset = mas->offset; 4066 if (mas->last == wr_mas->r_max) { 4067 /* runs right to the end of the node */ 4068 if (mas->last == mas->max) 4069 new_end = offset; 4070 /* don't copy this offset */ 4071 wr_mas->offset_end++; 4072 } else if (mas->last < wr_mas->r_max) { 4073 /* new range ends in this range */ 4074 if (unlikely(wr_mas->r_max == ULONG_MAX)) 4075 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4076 4077 new_end++; 4078 } else { 4079 if (wr_mas->end_piv == mas->last) 4080 wr_mas->offset_end++; 4081 4082 new_end -= wr_mas->offset_end - offset - 1; 4083 } 4084 4085 /* new range starts within a range */ 4086 if (wr_mas->r_min < mas->index) 4087 new_end++; 4088 4089 /* Not enough room */ 4090 if (new_end >= node_slots) 4091 return false; 4092 4093 /* Not enough data. */ 4094 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4095 !(mas->mas_flags & MA_STATE_BULK)) 4096 return false; 4097 4098 /* set up node. */ 4099 if (in_rcu) { 4100 mas_node_count(mas, 1); 4101 if (mas_is_err(mas)) 4102 return false; 4103 4104 newnode = mas_pop_node(mas); 4105 } else { 4106 memset(&reuse, 0, sizeof(struct maple_node)); 4107 newnode = &reuse; 4108 } 4109 4110 newnode->parent = mas_mn(mas)->parent; 4111 dst_pivots = ma_pivots(newnode, wr_mas->type); 4112 dst_slots = ma_slots(newnode, wr_mas->type); 4113 /* Copy from start to insert point */ 4114 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); 4115 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); 4116 dst_offset = offset; 4117 4118 /* Handle insert of new range starting after old range */ 4119 if (wr_mas->r_min < mas->index) { 4120 mas->offset++; 4121 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); 4122 dst_pivots[dst_offset++] = mas->index - 1; 4123 } 4124 4125 /* Store the new entry and range end. */ 4126 if (dst_offset < max_piv) 4127 dst_pivots[dst_offset] = mas->last; 4128 mas->offset = dst_offset; 4129 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); 4130 4131 /* 4132 * this range wrote to the end of the node or it overwrote the rest of 4133 * the data 4134 */ 4135 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { 4136 new_end = dst_offset; 4137 goto done; 4138 } 4139 4140 dst_offset++; 4141 /* Copy to the end of node if necessary. */ 4142 copy_size = wr_mas->node_end - wr_mas->offset_end + 1; 4143 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, 4144 sizeof(void *) * copy_size); 4145 if (dst_offset < max_piv) { 4146 if (copy_size > max_piv - dst_offset) 4147 copy_size = max_piv - dst_offset; 4148 4149 memcpy(dst_pivots + dst_offset, 4150 wr_mas->pivots + wr_mas->offset_end, 4151 sizeof(unsigned long) * copy_size); 4152 } 4153 4154 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) 4155 dst_pivots[new_end] = mas->max; 4156 4157 done: 4158 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4159 if (in_rcu) { 4160 mas->node = mt_mk_node(newnode, wr_mas->type); 4161 mas_replace(mas, false); 4162 } else { 4163 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4164 } 4165 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4166 mas_update_gap(mas); 4167 return true; 4168 } 4169 4170 /* 4171 * mas_wr_slot_store: Attempt to store a value in a slot. 4172 * @wr_mas: the maple write state 4173 * 4174 * Return: True if stored, false otherwise 4175 */ 4176 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4177 { 4178 struct ma_state *mas = wr_mas->mas; 4179 unsigned long lmax; /* Logical max. */ 4180 unsigned char offset = mas->offset; 4181 4182 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || 4183 (offset != wr_mas->node_end))) 4184 return false; 4185 4186 if (offset == wr_mas->node_end - 1) 4187 lmax = mas->max; 4188 else 4189 lmax = wr_mas->pivots[offset + 1]; 4190 4191 /* going to overwrite too many slots. */ 4192 if (lmax < mas->last) 4193 return false; 4194 4195 if (wr_mas->r_min == mas->index) { 4196 /* overwriting two or more ranges with one. */ 4197 if (lmax == mas->last) 4198 return false; 4199 4200 /* Overwriting all of offset and a portion of offset + 1. */ 4201 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); 4202 wr_mas->pivots[offset] = mas->last; 4203 goto done; 4204 } 4205 4206 /* Doesn't end on the next range end. */ 4207 if (lmax != mas->last) 4208 return false; 4209 4210 /* Overwriting a portion of offset and all of offset + 1 */ 4211 if ((offset + 1 < mt_pivots[wr_mas->type]) && 4212 (wr_mas->entry || wr_mas->pivots[offset + 1])) 4213 wr_mas->pivots[offset + 1] = mas->last; 4214 4215 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); 4216 wr_mas->pivots[offset] = mas->index - 1; 4217 mas->offset++; /* Keep mas accurate. */ 4218 4219 done: 4220 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4221 mas_update_gap(mas); 4222 return true; 4223 } 4224 4225 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4226 { 4227 while ((wr_mas->mas->last > wr_mas->end_piv) && 4228 (wr_mas->offset_end < wr_mas->node_end)) 4229 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; 4230 4231 if (wr_mas->mas->last > wr_mas->end_piv) 4232 wr_mas->end_piv = wr_mas->mas->max; 4233 } 4234 4235 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4236 { 4237 struct ma_state *mas = wr_mas->mas; 4238 4239 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) 4240 mas->last = wr_mas->end_piv; 4241 4242 /* Check next slot(s) if we are overwriting the end */ 4243 if ((mas->last == wr_mas->end_piv) && 4244 (wr_mas->node_end != wr_mas->offset_end) && 4245 !wr_mas->slots[wr_mas->offset_end + 1]) { 4246 wr_mas->offset_end++; 4247 if (wr_mas->offset_end == wr_mas->node_end) 4248 mas->last = mas->max; 4249 else 4250 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4251 wr_mas->end_piv = mas->last; 4252 } 4253 4254 if (!wr_mas->content) { 4255 /* If this one is null, the next and prev are not */ 4256 mas->index = wr_mas->r_min; 4257 } else { 4258 /* Check prev slot if we are overwriting the start */ 4259 if (mas->index == wr_mas->r_min && mas->offset && 4260 !wr_mas->slots[mas->offset - 1]) { 4261 mas->offset--; 4262 wr_mas->r_min = mas->index = 4263 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4264 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4265 } 4266 } 4267 } 4268 4269 static inline bool mas_wr_append(struct ma_wr_state *wr_mas) 4270 { 4271 unsigned char end = wr_mas->node_end; 4272 unsigned char new_end = end + 1; 4273 struct ma_state *mas = wr_mas->mas; 4274 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4275 4276 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { 4277 if (new_end < node_pivots) 4278 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4279 4280 if (new_end < node_pivots) 4281 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4282 4283 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); 4284 mas->offset = new_end; 4285 wr_mas->pivots[end] = mas->index - 1; 4286 4287 return true; 4288 } 4289 4290 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { 4291 if (new_end < node_pivots) 4292 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4293 4294 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4295 if (new_end < node_pivots) 4296 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4297 4298 wr_mas->pivots[end] = mas->last; 4299 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4300 return true; 4301 } 4302 4303 return false; 4304 } 4305 4306 /* 4307 * mas_wr_bnode() - Slow path for a modification. 4308 * @wr_mas: The write maple state 4309 * 4310 * This is where split, rebalance end up. 4311 */ 4312 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4313 { 4314 struct maple_big_node b_node; 4315 4316 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4317 memset(&b_node, 0, sizeof(struct maple_big_node)); 4318 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4319 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4320 } 4321 4322 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4323 { 4324 unsigned char node_slots; 4325 unsigned char node_size; 4326 struct ma_state *mas = wr_mas->mas; 4327 4328 /* Direct replacement */ 4329 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4330 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4331 if (!!wr_mas->entry ^ !!wr_mas->content) 4332 mas_update_gap(mas); 4333 return; 4334 } 4335 4336 /* Attempt to append */ 4337 node_slots = mt_slots[wr_mas->type]; 4338 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; 4339 if (mas->max == ULONG_MAX) 4340 node_size++; 4341 4342 /* slot and node store will not fit, go to the slow path */ 4343 if (unlikely(node_size >= node_slots)) 4344 goto slow_path; 4345 4346 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && 4347 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { 4348 if (!wr_mas->content || !wr_mas->entry) 4349 mas_update_gap(mas); 4350 return; 4351 } 4352 4353 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) 4354 return; 4355 else if (mas_wr_node_store(wr_mas)) 4356 return; 4357 4358 if (mas_is_err(mas)) 4359 return; 4360 4361 slow_path: 4362 mas_wr_bnode(wr_mas); 4363 } 4364 4365 /* 4366 * mas_wr_store_entry() - Internal call to store a value 4367 * @mas: The maple state 4368 * @entry: The entry to store. 4369 * 4370 * Return: The contents that was stored at the index. 4371 */ 4372 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4373 { 4374 struct ma_state *mas = wr_mas->mas; 4375 4376 wr_mas->content = mas_start(mas); 4377 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4378 mas_store_root(mas, wr_mas->entry); 4379 return wr_mas->content; 4380 } 4381 4382 if (unlikely(!mas_wr_walk(wr_mas))) { 4383 mas_wr_spanning_store(wr_mas); 4384 return wr_mas->content; 4385 } 4386 4387 /* At this point, we are at the leaf node that needs to be altered. */ 4388 wr_mas->end_piv = wr_mas->r_max; 4389 mas_wr_end_piv(wr_mas); 4390 4391 if (!wr_mas->entry) 4392 mas_wr_extend_null(wr_mas); 4393 4394 /* New root for a single pointer */ 4395 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4396 mas_new_root(mas, wr_mas->entry); 4397 return wr_mas->content; 4398 } 4399 4400 mas_wr_modify(wr_mas); 4401 return wr_mas->content; 4402 } 4403 4404 /** 4405 * mas_insert() - Internal call to insert a value 4406 * @mas: The maple state 4407 * @entry: The entry to store 4408 * 4409 * Return: %NULL or the contents that already exists at the requested index 4410 * otherwise. The maple state needs to be checked for error conditions. 4411 */ 4412 static inline void *mas_insert(struct ma_state *mas, void *entry) 4413 { 4414 MA_WR_STATE(wr_mas, mas, entry); 4415 4416 /* 4417 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4418 * tree. If the insert fits exactly into an existing gap with a value 4419 * of NULL, then the slot only needs to be written with the new value. 4420 * If the range being inserted is adjacent to another range, then only a 4421 * single pivot needs to be inserted (as well as writing the entry). If 4422 * the new range is within a gap but does not touch any other ranges, 4423 * then two pivots need to be inserted: the start - 1, and the end. As 4424 * usual, the entry must be written. Most operations require a new node 4425 * to be allocated and replace an existing node to ensure RCU safety, 4426 * when in RCU mode. The exception to requiring a newly allocated node 4427 * is when inserting at the end of a node (appending). When done 4428 * carefully, appending can reuse the node in place. 4429 */ 4430 wr_mas.content = mas_start(mas); 4431 if (wr_mas.content) 4432 goto exists; 4433 4434 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4435 mas_store_root(mas, entry); 4436 return NULL; 4437 } 4438 4439 /* spanning writes always overwrite something */ 4440 if (!mas_wr_walk(&wr_mas)) 4441 goto exists; 4442 4443 /* At this point, we are at the leaf node that needs to be altered. */ 4444 wr_mas.offset_end = mas->offset; 4445 wr_mas.end_piv = wr_mas.r_max; 4446 4447 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4448 goto exists; 4449 4450 if (!entry) 4451 return NULL; 4452 4453 mas_wr_modify(&wr_mas); 4454 return wr_mas.content; 4455 4456 exists: 4457 mas_set_err(mas, -EEXIST); 4458 return wr_mas.content; 4459 4460 } 4461 4462 /* 4463 * mas_prev_node() - Find the prev non-null entry at the same level in the 4464 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4465 * @mas: The maple state 4466 * @min: The lower limit to search 4467 * 4468 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4469 * Return: 1 if the node is dead, 0 otherwise. 4470 */ 4471 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4472 { 4473 enum maple_type mt; 4474 int offset, level; 4475 void __rcu **slots; 4476 struct maple_node *node; 4477 struct maple_enode *enode; 4478 unsigned long *pivots; 4479 4480 if (mas_is_none(mas)) 4481 return 0; 4482 4483 level = 0; 4484 do { 4485 node = mas_mn(mas); 4486 if (ma_is_root(node)) 4487 goto no_entry; 4488 4489 /* Walk up. */ 4490 if (unlikely(mas_ascend(mas))) 4491 return 1; 4492 offset = mas->offset; 4493 level++; 4494 } while (!offset); 4495 4496 offset--; 4497 mt = mte_node_type(mas->node); 4498 node = mas_mn(mas); 4499 slots = ma_slots(node, mt); 4500 pivots = ma_pivots(node, mt); 4501 mas->max = pivots[offset]; 4502 if (offset) 4503 mas->min = pivots[offset - 1] + 1; 4504 if (unlikely(ma_dead_node(node))) 4505 return 1; 4506 4507 if (mas->max < min) 4508 goto no_entry_min; 4509 4510 while (level > 1) { 4511 level--; 4512 enode = mas_slot(mas, slots, offset); 4513 if (unlikely(ma_dead_node(node))) 4514 return 1; 4515 4516 mas->node = enode; 4517 mt = mte_node_type(mas->node); 4518 node = mas_mn(mas); 4519 slots = ma_slots(node, mt); 4520 pivots = ma_pivots(node, mt); 4521 offset = ma_data_end(node, mt, pivots, mas->max); 4522 if (offset) 4523 mas->min = pivots[offset - 1] + 1; 4524 4525 if (offset < mt_pivots[mt]) 4526 mas->max = pivots[offset]; 4527 4528 if (mas->max < min) 4529 goto no_entry; 4530 } 4531 4532 mas->node = mas_slot(mas, slots, offset); 4533 if (unlikely(ma_dead_node(node))) 4534 return 1; 4535 4536 mas->offset = mas_data_end(mas); 4537 if (unlikely(mte_dead_node(mas->node))) 4538 return 1; 4539 4540 return 0; 4541 4542 no_entry_min: 4543 mas->offset = offset; 4544 if (offset) 4545 mas->min = pivots[offset - 1] + 1; 4546 no_entry: 4547 if (unlikely(ma_dead_node(node))) 4548 return 1; 4549 4550 mas->node = MAS_NONE; 4551 return 0; 4552 } 4553 4554 /* 4555 * mas_next_node() - Get the next node at the same level in the tree. 4556 * @mas: The maple state 4557 * @max: The maximum pivot value to check. 4558 * 4559 * The next value will be mas->node[mas->offset] or MAS_NONE. 4560 * Return: 1 on dead node, 0 otherwise. 4561 */ 4562 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4563 unsigned long max) 4564 { 4565 unsigned long min, pivot; 4566 unsigned long *pivots; 4567 struct maple_enode *enode; 4568 int level = 0; 4569 unsigned char offset; 4570 enum maple_type mt; 4571 void __rcu **slots; 4572 4573 if (mas->max >= max) 4574 goto no_entry; 4575 4576 level = 0; 4577 do { 4578 if (ma_is_root(node)) 4579 goto no_entry; 4580 4581 min = mas->max + 1; 4582 if (min > max) 4583 goto no_entry; 4584 4585 if (unlikely(mas_ascend(mas))) 4586 return 1; 4587 4588 offset = mas->offset; 4589 level++; 4590 node = mas_mn(mas); 4591 mt = mte_node_type(mas->node); 4592 pivots = ma_pivots(node, mt); 4593 } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); 4594 4595 slots = ma_slots(node, mt); 4596 pivot = mas_safe_pivot(mas, pivots, ++offset, mt); 4597 while (unlikely(level > 1)) { 4598 /* Descend, if necessary */ 4599 enode = mas_slot(mas, slots, offset); 4600 if (unlikely(ma_dead_node(node))) 4601 return 1; 4602 4603 mas->node = enode; 4604 level--; 4605 node = mas_mn(mas); 4606 mt = mte_node_type(mas->node); 4607 slots = ma_slots(node, mt); 4608 pivots = ma_pivots(node, mt); 4609 offset = 0; 4610 pivot = pivots[0]; 4611 } 4612 4613 enode = mas_slot(mas, slots, offset); 4614 if (unlikely(ma_dead_node(node))) 4615 return 1; 4616 4617 mas->node = enode; 4618 mas->min = min; 4619 mas->max = pivot; 4620 return 0; 4621 4622 no_entry: 4623 if (unlikely(ma_dead_node(node))) 4624 return 1; 4625 4626 mas->node = MAS_NONE; 4627 return 0; 4628 } 4629 4630 /* 4631 * mas_next_nentry() - Get the next node entry 4632 * @mas: The maple state 4633 * @max: The maximum value to check 4634 * @*range_start: Pointer to store the start of the range. 4635 * 4636 * Sets @mas->offset to the offset of the next node entry, @mas->last to the 4637 * pivot of the entry. 4638 * 4639 * Return: The next entry, %NULL otherwise 4640 */ 4641 static inline void *mas_next_nentry(struct ma_state *mas, 4642 struct maple_node *node, unsigned long max, enum maple_type type) 4643 { 4644 unsigned char count; 4645 unsigned long pivot; 4646 unsigned long *pivots; 4647 void __rcu **slots; 4648 void *entry; 4649 4650 if (mas->last == mas->max) { 4651 mas->index = mas->max; 4652 return NULL; 4653 } 4654 4655 pivots = ma_pivots(node, type); 4656 slots = ma_slots(node, type); 4657 mas->index = mas_safe_min(mas, pivots, mas->offset); 4658 count = ma_data_end(node, type, pivots, mas->max); 4659 if (ma_dead_node(node)) 4660 return NULL; 4661 4662 if (mas->index > max) 4663 return NULL; 4664 4665 if (mas->offset > count) 4666 return NULL; 4667 4668 while (mas->offset < count) { 4669 pivot = pivots[mas->offset]; 4670 entry = mas_slot(mas, slots, mas->offset); 4671 if (ma_dead_node(node)) 4672 return NULL; 4673 4674 if (entry) 4675 goto found; 4676 4677 if (pivot >= max) 4678 return NULL; 4679 4680 mas->index = pivot + 1; 4681 mas->offset++; 4682 } 4683 4684 if (mas->index > mas->max) { 4685 mas->index = mas->last; 4686 return NULL; 4687 } 4688 4689 pivot = mas_safe_pivot(mas, pivots, mas->offset, type); 4690 entry = mas_slot(mas, slots, mas->offset); 4691 if (ma_dead_node(node)) 4692 return NULL; 4693 4694 if (!pivot) 4695 return NULL; 4696 4697 if (!entry) 4698 return NULL; 4699 4700 found: 4701 mas->last = pivot; 4702 return entry; 4703 } 4704 4705 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4706 { 4707 retry: 4708 mas_set(mas, index); 4709 mas_state_walk(mas); 4710 if (mas_is_start(mas)) 4711 goto retry; 4712 } 4713 4714 /* 4715 * mas_next_entry() - Internal function to get the next entry. 4716 * @mas: The maple state 4717 * @limit: The maximum range start. 4718 * 4719 * Set the @mas->node to the next entry and the range_start to 4720 * the beginning value for the entry. Does not check beyond @limit. 4721 * Sets @mas->index and @mas->last to the limit if it is hit. 4722 * Restarts on dead nodes. 4723 * 4724 * Return: the next entry or %NULL. 4725 */ 4726 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4727 { 4728 void *entry = NULL; 4729 struct maple_enode *prev_node; 4730 struct maple_node *node; 4731 unsigned char offset; 4732 unsigned long last; 4733 enum maple_type mt; 4734 4735 if (mas->index > limit) { 4736 mas->index = mas->last = limit; 4737 mas_pause(mas); 4738 return NULL; 4739 } 4740 last = mas->last; 4741 retry: 4742 offset = mas->offset; 4743 prev_node = mas->node; 4744 node = mas_mn(mas); 4745 mt = mte_node_type(mas->node); 4746 mas->offset++; 4747 if (unlikely(mas->offset >= mt_slots[mt])) { 4748 mas->offset = mt_slots[mt] - 1; 4749 goto next_node; 4750 } 4751 4752 while (!mas_is_none(mas)) { 4753 entry = mas_next_nentry(mas, node, limit, mt); 4754 if (unlikely(ma_dead_node(node))) { 4755 mas_rewalk(mas, last); 4756 goto retry; 4757 } 4758 4759 if (likely(entry)) 4760 return entry; 4761 4762 if (unlikely((mas->index > limit))) 4763 break; 4764 4765 next_node: 4766 prev_node = mas->node; 4767 offset = mas->offset; 4768 if (unlikely(mas_next_node(mas, node, limit))) { 4769 mas_rewalk(mas, last); 4770 goto retry; 4771 } 4772 mas->offset = 0; 4773 node = mas_mn(mas); 4774 mt = mte_node_type(mas->node); 4775 } 4776 4777 mas->index = mas->last = limit; 4778 mas->offset = offset; 4779 mas->node = prev_node; 4780 return NULL; 4781 } 4782 4783 /* 4784 * mas_prev_nentry() - Get the previous node entry. 4785 * @mas: The maple state. 4786 * @limit: The lower limit to check for a value. 4787 * 4788 * Return: the entry, %NULL otherwise. 4789 */ 4790 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, 4791 unsigned long index) 4792 { 4793 unsigned long pivot, min; 4794 unsigned char offset; 4795 struct maple_node *mn; 4796 enum maple_type mt; 4797 unsigned long *pivots; 4798 void __rcu **slots; 4799 void *entry; 4800 4801 retry: 4802 if (!mas->offset) 4803 return NULL; 4804 4805 mn = mas_mn(mas); 4806 mt = mte_node_type(mas->node); 4807 offset = mas->offset - 1; 4808 if (offset >= mt_slots[mt]) 4809 offset = mt_slots[mt] - 1; 4810 4811 slots = ma_slots(mn, mt); 4812 pivots = ma_pivots(mn, mt); 4813 if (offset == mt_pivots[mt]) 4814 pivot = mas->max; 4815 else 4816 pivot = pivots[offset]; 4817 4818 if (unlikely(ma_dead_node(mn))) { 4819 mas_rewalk(mas, index); 4820 goto retry; 4821 } 4822 4823 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || 4824 !pivot)) 4825 pivot = pivots[--offset]; 4826 4827 min = mas_safe_min(mas, pivots, offset); 4828 entry = mas_slot(mas, slots, offset); 4829 if (unlikely(ma_dead_node(mn))) { 4830 mas_rewalk(mas, index); 4831 goto retry; 4832 } 4833 4834 if (likely(entry)) { 4835 mas->offset = offset; 4836 mas->last = pivot; 4837 mas->index = min; 4838 } 4839 return entry; 4840 } 4841 4842 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) 4843 { 4844 void *entry; 4845 4846 if (mas->index < min) { 4847 mas->index = mas->last = min; 4848 mas->node = MAS_NONE; 4849 return NULL; 4850 } 4851 retry: 4852 while (likely(!mas_is_none(mas))) { 4853 entry = mas_prev_nentry(mas, min, mas->index); 4854 if (unlikely(mas->last < min)) 4855 goto not_found; 4856 4857 if (likely(entry)) 4858 return entry; 4859 4860 if (unlikely(mas_prev_node(mas, min))) { 4861 mas_rewalk(mas, mas->index); 4862 goto retry; 4863 } 4864 4865 mas->offset++; 4866 } 4867 4868 mas->offset--; 4869 not_found: 4870 mas->index = mas->last = min; 4871 return NULL; 4872 } 4873 4874 /* 4875 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4876 * highest gap address of a given size in a given node and descend. 4877 * @mas: The maple state 4878 * @size: The needed size. 4879 * 4880 * Return: True if found in a leaf, false otherwise. 4881 * 4882 */ 4883 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) 4884 { 4885 enum maple_type type = mte_node_type(mas->node); 4886 struct maple_node *node = mas_mn(mas); 4887 unsigned long *pivots, *gaps; 4888 void __rcu **slots; 4889 unsigned long gap = 0; 4890 unsigned long max, min; 4891 unsigned char offset; 4892 4893 if (unlikely(mas_is_err(mas))) 4894 return true; 4895 4896 if (ma_is_dense(type)) { 4897 /* dense nodes. */ 4898 mas->offset = (unsigned char)(mas->index - mas->min); 4899 return true; 4900 } 4901 4902 pivots = ma_pivots(node, type); 4903 slots = ma_slots(node, type); 4904 gaps = ma_gaps(node, type); 4905 offset = mas->offset; 4906 min = mas_safe_min(mas, pivots, offset); 4907 /* Skip out of bounds. */ 4908 while (mas->last < min) 4909 min = mas_safe_min(mas, pivots, --offset); 4910 4911 max = mas_safe_pivot(mas, pivots, offset, type); 4912 while (mas->index <= max) { 4913 gap = 0; 4914 if (gaps) 4915 gap = gaps[offset]; 4916 else if (!mas_slot(mas, slots, offset)) 4917 gap = max - min + 1; 4918 4919 if (gap) { 4920 if ((size <= gap) && (size <= mas->last - min + 1)) 4921 break; 4922 4923 if (!gaps) { 4924 /* Skip the next slot, it cannot be a gap. */ 4925 if (offset < 2) 4926 goto ascend; 4927 4928 offset -= 2; 4929 max = pivots[offset]; 4930 min = mas_safe_min(mas, pivots, offset); 4931 continue; 4932 } 4933 } 4934 4935 if (!offset) 4936 goto ascend; 4937 4938 offset--; 4939 max = min - 1; 4940 min = mas_safe_min(mas, pivots, offset); 4941 } 4942 4943 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4944 goto no_space; 4945 4946 if (unlikely(ma_is_leaf(type))) { 4947 mas->offset = offset; 4948 mas->min = min; 4949 mas->max = min + gap - 1; 4950 return true; 4951 } 4952 4953 /* descend, only happens under lock. */ 4954 mas->node = mas_slot(mas, slots, offset); 4955 mas->min = min; 4956 mas->max = max; 4957 mas->offset = mas_data_end(mas); 4958 return false; 4959 4960 ascend: 4961 if (!mte_is_root(mas->node)) 4962 return false; 4963 4964 no_space: 4965 mas_set_err(mas, -EBUSY); 4966 return false; 4967 } 4968 4969 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4970 { 4971 enum maple_type type = mte_node_type(mas->node); 4972 unsigned long pivot, min, gap = 0; 4973 unsigned char offset; 4974 unsigned long *gaps; 4975 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 4976 void __rcu **slots = ma_slots(mas_mn(mas), type); 4977 bool found = false; 4978 4979 if (ma_is_dense(type)) { 4980 mas->offset = (unsigned char)(mas->index - mas->min); 4981 return true; 4982 } 4983 4984 gaps = ma_gaps(mte_to_node(mas->node), type); 4985 offset = mas->offset; 4986 min = mas_safe_min(mas, pivots, offset); 4987 for (; offset < mt_slots[type]; offset++) { 4988 pivot = mas_safe_pivot(mas, pivots, offset, type); 4989 if (offset && !pivot) 4990 break; 4991 4992 /* Not within lower bounds */ 4993 if (mas->index > pivot) 4994 goto next_slot; 4995 4996 if (gaps) 4997 gap = gaps[offset]; 4998 else if (!mas_slot(mas, slots, offset)) 4999 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 5000 else 5001 goto next_slot; 5002 5003 if (gap >= size) { 5004 if (ma_is_leaf(type)) { 5005 found = true; 5006 goto done; 5007 } 5008 if (mas->index <= pivot) { 5009 mas->node = mas_slot(mas, slots, offset); 5010 mas->min = min; 5011 mas->max = pivot; 5012 offset = 0; 5013 break; 5014 } 5015 } 5016 next_slot: 5017 min = pivot + 1; 5018 if (mas->last <= pivot) { 5019 mas_set_err(mas, -EBUSY); 5020 return true; 5021 } 5022 } 5023 5024 if (mte_is_root(mas->node)) 5025 found = true; 5026 done: 5027 mas->offset = offset; 5028 return found; 5029 } 5030 5031 /** 5032 * mas_walk() - Search for @mas->index in the tree. 5033 * @mas: The maple state. 5034 * 5035 * mas->index and mas->last will be set to the range if there is a value. If 5036 * mas->node is MAS_NONE, reset to MAS_START. 5037 * 5038 * Return: the entry at the location or %NULL. 5039 */ 5040 void *mas_walk(struct ma_state *mas) 5041 { 5042 void *entry; 5043 5044 retry: 5045 entry = mas_state_walk(mas); 5046 if (mas_is_start(mas)) 5047 goto retry; 5048 5049 if (mas_is_ptr(mas)) { 5050 if (!mas->index) { 5051 mas->last = 0; 5052 } else { 5053 mas->index = 1; 5054 mas->last = ULONG_MAX; 5055 } 5056 return entry; 5057 } 5058 5059 if (mas_is_none(mas)) { 5060 mas->index = 0; 5061 mas->last = ULONG_MAX; 5062 } 5063 5064 return entry; 5065 } 5066 EXPORT_SYMBOL_GPL(mas_walk); 5067 5068 static inline bool mas_rewind_node(struct ma_state *mas) 5069 { 5070 unsigned char slot; 5071 5072 do { 5073 if (mte_is_root(mas->node)) { 5074 slot = mas->offset; 5075 if (!slot) 5076 return false; 5077 } else { 5078 mas_ascend(mas); 5079 slot = mas->offset; 5080 } 5081 } while (!slot); 5082 5083 mas->offset = --slot; 5084 return true; 5085 } 5086 5087 /* 5088 * mas_skip_node() - Internal function. Skip over a node. 5089 * @mas: The maple state. 5090 * 5091 * Return: true if there is another node, false otherwise. 5092 */ 5093 static inline bool mas_skip_node(struct ma_state *mas) 5094 { 5095 unsigned char slot, slot_count; 5096 unsigned long *pivots; 5097 enum maple_type mt; 5098 5099 mt = mte_node_type(mas->node); 5100 slot_count = mt_slots[mt] - 1; 5101 do { 5102 if (mte_is_root(mas->node)) { 5103 slot = mas->offset; 5104 if (slot > slot_count) { 5105 mas_set_err(mas, -EBUSY); 5106 return false; 5107 } 5108 } else { 5109 mas_ascend(mas); 5110 slot = mas->offset; 5111 mt = mte_node_type(mas->node); 5112 slot_count = mt_slots[mt] - 1; 5113 } 5114 } while (slot > slot_count); 5115 5116 mas->offset = ++slot; 5117 pivots = ma_pivots(mas_mn(mas), mt); 5118 if (slot > 0) 5119 mas->min = pivots[slot - 1] + 1; 5120 5121 if (slot <= slot_count) 5122 mas->max = pivots[slot]; 5123 5124 return true; 5125 } 5126 5127 /* 5128 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5129 * @size 5130 * @mas: The maple state 5131 * @size: The size of the gap required 5132 * 5133 * Search between @mas->index and @mas->last for a gap of @size. 5134 */ 5135 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5136 { 5137 struct maple_enode *last = NULL; 5138 5139 /* 5140 * There are 4 options: 5141 * go to child (descend) 5142 * go back to parent (ascend) 5143 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5144 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5145 */ 5146 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5147 if (last == mas->node) 5148 mas_skip_node(mas); 5149 else 5150 last = mas->node; 5151 } 5152 } 5153 5154 /* 5155 * mas_fill_gap() - Fill a located gap with @entry. 5156 * @mas: The maple state 5157 * @entry: The value to store 5158 * @slot: The offset into the node to store the @entry 5159 * @size: The size of the entry 5160 * @index: The start location 5161 */ 5162 static inline void mas_fill_gap(struct ma_state *mas, void *entry, 5163 unsigned char slot, unsigned long size, unsigned long *index) 5164 { 5165 MA_WR_STATE(wr_mas, mas, entry); 5166 unsigned char pslot = mte_parent_slot(mas->node); 5167 struct maple_enode *mn = mas->node; 5168 unsigned long *pivots; 5169 enum maple_type ptype; 5170 /* 5171 * mas->index is the start address for the search 5172 * which may no longer be needed. 5173 * mas->last is the end address for the search 5174 */ 5175 5176 *index = mas->index; 5177 mas->last = mas->index + size - 1; 5178 5179 /* 5180 * It is possible that using mas->max and mas->min to correctly 5181 * calculate the index and last will cause an issue in the gap 5182 * calculation, so fix the ma_state here 5183 */ 5184 mas_ascend(mas); 5185 ptype = mte_node_type(mas->node); 5186 pivots = ma_pivots(mas_mn(mas), ptype); 5187 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); 5188 mas->min = mas_safe_min(mas, pivots, pslot); 5189 mas->node = mn; 5190 mas->offset = slot; 5191 mas_wr_store_entry(&wr_mas); 5192 } 5193 5194 /* 5195 * mas_sparse_area() - Internal function. Return upper or lower limit when 5196 * searching for a gap in an empty tree. 5197 * @mas: The maple state 5198 * @min: the minimum range 5199 * @max: The maximum range 5200 * @size: The size of the gap 5201 * @fwd: Searching forward or back 5202 */ 5203 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, 5204 unsigned long max, unsigned long size, bool fwd) 5205 { 5206 unsigned long start = 0; 5207 5208 if (!unlikely(mas_is_none(mas))) 5209 start++; 5210 /* mas_is_ptr */ 5211 5212 if (start < min) 5213 start = min; 5214 5215 if (fwd) { 5216 mas->index = start; 5217 mas->last = start + size - 1; 5218 return; 5219 } 5220 5221 mas->index = max; 5222 } 5223 5224 /* 5225 * mas_empty_area() - Get the lowest address within the range that is 5226 * sufficient for the size requested. 5227 * @mas: The maple state 5228 * @min: The lowest value of the range 5229 * @max: The highest value of the range 5230 * @size: The size needed 5231 */ 5232 int mas_empty_area(struct ma_state *mas, unsigned long min, 5233 unsigned long max, unsigned long size) 5234 { 5235 unsigned char offset; 5236 unsigned long *pivots; 5237 enum maple_type mt; 5238 5239 if (mas_is_start(mas)) 5240 mas_start(mas); 5241 else if (mas->offset >= 2) 5242 mas->offset -= 2; 5243 else if (!mas_skip_node(mas)) 5244 return -EBUSY; 5245 5246 /* Empty set */ 5247 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5248 mas_sparse_area(mas, min, max, size, true); 5249 return 0; 5250 } 5251 5252 /* The start of the window can only be within these values */ 5253 mas->index = min; 5254 mas->last = max; 5255 mas_awalk(mas, size); 5256 5257 if (unlikely(mas_is_err(mas))) 5258 return xa_err(mas->node); 5259 5260 offset = mas->offset; 5261 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5262 return -EBUSY; 5263 5264 mt = mte_node_type(mas->node); 5265 pivots = ma_pivots(mas_mn(mas), mt); 5266 if (offset) 5267 mas->min = pivots[offset - 1] + 1; 5268 5269 if (offset < mt_pivots[mt]) 5270 mas->max = pivots[offset]; 5271 5272 if (mas->index < mas->min) 5273 mas->index = mas->min; 5274 5275 mas->last = mas->index + size - 1; 5276 return 0; 5277 } 5278 EXPORT_SYMBOL_GPL(mas_empty_area); 5279 5280 /* 5281 * mas_empty_area_rev() - Get the highest address within the range that is 5282 * sufficient for the size requested. 5283 * @mas: The maple state 5284 * @min: The lowest value of the range 5285 * @max: The highest value of the range 5286 * @size: The size needed 5287 */ 5288 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5289 unsigned long max, unsigned long size) 5290 { 5291 struct maple_enode *last = mas->node; 5292 5293 if (mas_is_start(mas)) { 5294 mas_start(mas); 5295 mas->offset = mas_data_end(mas); 5296 } else if (mas->offset >= 2) { 5297 mas->offset -= 2; 5298 } else if (!mas_rewind_node(mas)) { 5299 return -EBUSY; 5300 } 5301 5302 /* Empty set. */ 5303 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5304 mas_sparse_area(mas, min, max, size, false); 5305 return 0; 5306 } 5307 5308 /* The start of the window can only be within these values. */ 5309 mas->index = min; 5310 mas->last = max; 5311 5312 while (!mas_rev_awalk(mas, size)) { 5313 if (last == mas->node) { 5314 if (!mas_rewind_node(mas)) 5315 return -EBUSY; 5316 } else { 5317 last = mas->node; 5318 } 5319 } 5320 5321 if (mas_is_err(mas)) 5322 return xa_err(mas->node); 5323 5324 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5325 return -EBUSY; 5326 5327 /* 5328 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If 5329 * the maximum is outside the window we are searching, then use the last 5330 * location in the search. 5331 * mas->max and mas->min is the range of the gap. 5332 * mas->index and mas->last are currently set to the search range. 5333 */ 5334 5335 /* Trim the upper limit to the max. */ 5336 if (mas->max <= mas->last) 5337 mas->last = mas->max; 5338 5339 mas->index = mas->last - size + 1; 5340 return 0; 5341 } 5342 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5343 5344 static inline int mas_alloc(struct ma_state *mas, void *entry, 5345 unsigned long size, unsigned long *index) 5346 { 5347 unsigned long min; 5348 5349 mas_start(mas); 5350 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5351 mas_root_expand(mas, entry); 5352 if (mas_is_err(mas)) 5353 return xa_err(mas->node); 5354 5355 if (!mas->index) 5356 return mte_pivot(mas->node, 0); 5357 return mte_pivot(mas->node, 1); 5358 } 5359 5360 /* Must be walking a tree. */ 5361 mas_awalk(mas, size); 5362 if (mas_is_err(mas)) 5363 return xa_err(mas->node); 5364 5365 if (mas->offset == MAPLE_NODE_SLOTS) 5366 goto no_gap; 5367 5368 /* 5369 * At this point, mas->node points to the right node and we have an 5370 * offset that has a sufficient gap. 5371 */ 5372 min = mas->min; 5373 if (mas->offset) 5374 min = mte_pivot(mas->node, mas->offset - 1) + 1; 5375 5376 if (mas->index < min) 5377 mas->index = min; 5378 5379 mas_fill_gap(mas, entry, mas->offset, size, index); 5380 return 0; 5381 5382 no_gap: 5383 return -EBUSY; 5384 } 5385 5386 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, 5387 unsigned long max, void *entry, 5388 unsigned long size, unsigned long *index) 5389 { 5390 int ret = 0; 5391 5392 ret = mas_empty_area_rev(mas, min, max, size); 5393 if (ret) 5394 return ret; 5395 5396 if (mas_is_err(mas)) 5397 return xa_err(mas->node); 5398 5399 if (mas->offset == MAPLE_NODE_SLOTS) 5400 goto no_gap; 5401 5402 mas_fill_gap(mas, entry, mas->offset, size, index); 5403 return 0; 5404 5405 no_gap: 5406 return -EBUSY; 5407 } 5408 5409 /* 5410 * mas_dead_leaves() - Mark all leaves of a node as dead. 5411 * @mas: The maple state 5412 * @slots: Pointer to the slot array 5413 * 5414 * Must hold the write lock. 5415 * 5416 * Return: The number of leaves marked as dead. 5417 */ 5418 static inline 5419 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots) 5420 { 5421 struct maple_node *node; 5422 enum maple_type type; 5423 void *entry; 5424 int offset; 5425 5426 for (offset = 0; offset < mt_slot_count(mas->node); offset++) { 5427 entry = mas_slot_locked(mas, slots, offset); 5428 type = mte_node_type(entry); 5429 node = mte_to_node(entry); 5430 /* Use both node and type to catch LE & BE metadata */ 5431 if (!node || !type) 5432 break; 5433 5434 mte_set_node_dead(entry); 5435 smp_wmb(); /* Needed for RCU */ 5436 node->type = type; 5437 rcu_assign_pointer(slots[offset], node); 5438 } 5439 5440 return offset; 5441 } 5442 5443 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset) 5444 { 5445 struct maple_node *node, *next; 5446 void __rcu **slots = NULL; 5447 5448 next = mas_mn(mas); 5449 do { 5450 mas->node = ma_enode_ptr(next); 5451 node = mas_mn(mas); 5452 slots = ma_slots(node, node->type); 5453 next = mas_slot_locked(mas, slots, offset); 5454 offset = 0; 5455 } while (!ma_is_leaf(next->type)); 5456 5457 return slots; 5458 } 5459 5460 static void mt_free_walk(struct rcu_head *head) 5461 { 5462 void __rcu **slots; 5463 struct maple_node *node, *start; 5464 struct maple_tree mt; 5465 unsigned char offset; 5466 enum maple_type type; 5467 MA_STATE(mas, &mt, 0, 0); 5468 5469 node = container_of(head, struct maple_node, rcu); 5470 5471 if (ma_is_leaf(node->type)) 5472 goto free_leaf; 5473 5474 mt_init_flags(&mt, node->ma_flags); 5475 mas_lock(&mas); 5476 start = node; 5477 mas.node = mt_mk_node(node, node->type); 5478 slots = mas_dead_walk(&mas, 0); 5479 node = mas_mn(&mas); 5480 do { 5481 mt_free_bulk(node->slot_len, slots); 5482 offset = node->parent_slot + 1; 5483 mas.node = node->piv_parent; 5484 if (mas_mn(&mas) == node) 5485 goto start_slots_free; 5486 5487 type = mte_node_type(mas.node); 5488 slots = ma_slots(mte_to_node(mas.node), type); 5489 if ((offset < mt_slots[type]) && (slots[offset])) 5490 slots = mas_dead_walk(&mas, offset); 5491 5492 node = mas_mn(&mas); 5493 } while ((node != start) || (node->slot_len < offset)); 5494 5495 slots = ma_slots(node, node->type); 5496 mt_free_bulk(node->slot_len, slots); 5497 5498 start_slots_free: 5499 mas_unlock(&mas); 5500 free_leaf: 5501 mt_free_rcu(&node->rcu); 5502 } 5503 5504 static inline void __rcu **mas_destroy_descend(struct ma_state *mas, 5505 struct maple_enode *prev, unsigned char offset) 5506 { 5507 struct maple_node *node; 5508 struct maple_enode *next = mas->node; 5509 void __rcu **slots = NULL; 5510 5511 do { 5512 mas->node = next; 5513 node = mas_mn(mas); 5514 slots = ma_slots(node, mte_node_type(mas->node)); 5515 next = mas_slot_locked(mas, slots, 0); 5516 if ((mte_dead_node(next))) 5517 next = mas_slot_locked(mas, slots, 1); 5518 5519 mte_set_node_dead(mas->node); 5520 node->type = mte_node_type(mas->node); 5521 node->piv_parent = prev; 5522 node->parent_slot = offset; 5523 offset = 0; 5524 prev = mas->node; 5525 } while (!mte_is_leaf(next)); 5526 5527 return slots; 5528 } 5529 5530 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags, 5531 bool free) 5532 { 5533 void __rcu **slots; 5534 struct maple_node *node = mte_to_node(enode); 5535 struct maple_enode *start; 5536 struct maple_tree mt; 5537 5538 MA_STATE(mas, &mt, 0, 0); 5539 5540 if (mte_is_leaf(enode)) 5541 goto free_leaf; 5542 5543 mt_init_flags(&mt, ma_flags); 5544 mas_lock(&mas); 5545 5546 mas.node = start = enode; 5547 slots = mas_destroy_descend(&mas, start, 0); 5548 node = mas_mn(&mas); 5549 do { 5550 enum maple_type type; 5551 unsigned char offset; 5552 struct maple_enode *parent, *tmp; 5553 5554 node->slot_len = mas_dead_leaves(&mas, slots); 5555 if (free) 5556 mt_free_bulk(node->slot_len, slots); 5557 offset = node->parent_slot + 1; 5558 mas.node = node->piv_parent; 5559 if (mas_mn(&mas) == node) 5560 goto start_slots_free; 5561 5562 type = mte_node_type(mas.node); 5563 slots = ma_slots(mte_to_node(mas.node), type); 5564 if (offset >= mt_slots[type]) 5565 goto next; 5566 5567 tmp = mas_slot_locked(&mas, slots, offset); 5568 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5569 parent = mas.node; 5570 mas.node = tmp; 5571 slots = mas_destroy_descend(&mas, parent, offset); 5572 } 5573 next: 5574 node = mas_mn(&mas); 5575 } while (start != mas.node); 5576 5577 node = mas_mn(&mas); 5578 node->slot_len = mas_dead_leaves(&mas, slots); 5579 if (free) 5580 mt_free_bulk(node->slot_len, slots); 5581 5582 start_slots_free: 5583 mas_unlock(&mas); 5584 5585 free_leaf: 5586 if (free) 5587 mt_free_rcu(&node->rcu); 5588 } 5589 5590 /* 5591 * mte_destroy_walk() - Free a tree or sub-tree. 5592 * @enode: the encoded maple node (maple_enode) to start 5593 * @mt: the tree to free - needed for node types. 5594 * 5595 * Must hold the write lock. 5596 */ 5597 static inline void mte_destroy_walk(struct maple_enode *enode, 5598 struct maple_tree *mt) 5599 { 5600 struct maple_node *node = mte_to_node(enode); 5601 5602 if (mt_in_rcu(mt)) { 5603 mt_destroy_walk(enode, mt->ma_flags, false); 5604 call_rcu(&node->rcu, mt_free_walk); 5605 } else { 5606 mt_destroy_walk(enode, mt->ma_flags, true); 5607 } 5608 } 5609 5610 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5611 { 5612 if (unlikely(mas_is_paused(wr_mas->mas))) 5613 mas_reset(wr_mas->mas); 5614 5615 if (!mas_is_start(wr_mas->mas)) { 5616 if (mas_is_none(wr_mas->mas)) { 5617 mas_reset(wr_mas->mas); 5618 } else { 5619 wr_mas->r_max = wr_mas->mas->max; 5620 wr_mas->type = mte_node_type(wr_mas->mas->node); 5621 if (mas_is_span_wr(wr_mas)) 5622 mas_reset(wr_mas->mas); 5623 } 5624 } 5625 } 5626 5627 /* Interface */ 5628 5629 /** 5630 * mas_store() - Store an @entry. 5631 * @mas: The maple state. 5632 * @entry: The entry to store. 5633 * 5634 * The @mas->index and @mas->last is used to set the range for the @entry. 5635 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5636 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5637 * 5638 * Return: the first entry between mas->index and mas->last or %NULL. 5639 */ 5640 void *mas_store(struct ma_state *mas, void *entry) 5641 { 5642 MA_WR_STATE(wr_mas, mas, entry); 5643 5644 trace_ma_write(__func__, mas, 0, entry); 5645 #ifdef CONFIG_DEBUG_MAPLE_TREE 5646 if (mas->index > mas->last) 5647 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); 5648 MT_BUG_ON(mas->tree, mas->index > mas->last); 5649 if (mas->index > mas->last) { 5650 mas_set_err(mas, -EINVAL); 5651 return NULL; 5652 } 5653 5654 #endif 5655 5656 /* 5657 * Storing is the same operation as insert with the added caveat that it 5658 * can overwrite entries. Although this seems simple enough, one may 5659 * want to examine what happens if a single store operation was to 5660 * overwrite multiple entries within a self-balancing B-Tree. 5661 */ 5662 mas_wr_store_setup(&wr_mas); 5663 mas_wr_store_entry(&wr_mas); 5664 return wr_mas.content; 5665 } 5666 EXPORT_SYMBOL_GPL(mas_store); 5667 5668 /** 5669 * mas_store_gfp() - Store a value into the tree. 5670 * @mas: The maple state 5671 * @entry: The entry to store 5672 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5673 * 5674 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5675 * be allocated. 5676 */ 5677 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5678 { 5679 MA_WR_STATE(wr_mas, mas, entry); 5680 5681 mas_wr_store_setup(&wr_mas); 5682 trace_ma_write(__func__, mas, 0, entry); 5683 retry: 5684 mas_wr_store_entry(&wr_mas); 5685 if (unlikely(mas_nomem(mas, gfp))) 5686 goto retry; 5687 5688 if (unlikely(mas_is_err(mas))) 5689 return xa_err(mas->node); 5690 5691 return 0; 5692 } 5693 EXPORT_SYMBOL_GPL(mas_store_gfp); 5694 5695 /** 5696 * mas_store_prealloc() - Store a value into the tree using memory 5697 * preallocated in the maple state. 5698 * @mas: The maple state 5699 * @entry: The entry to store. 5700 */ 5701 void mas_store_prealloc(struct ma_state *mas, void *entry) 5702 { 5703 MA_WR_STATE(wr_mas, mas, entry); 5704 5705 mas_wr_store_setup(&wr_mas); 5706 trace_ma_write(__func__, mas, 0, entry); 5707 mas_wr_store_entry(&wr_mas); 5708 BUG_ON(mas_is_err(mas)); 5709 mas_destroy(mas); 5710 } 5711 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5712 5713 /** 5714 * mas_preallocate() - Preallocate enough nodes for a store operation 5715 * @mas: The maple state 5716 * @gfp: The GFP_FLAGS to use for allocations. 5717 * 5718 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5719 */ 5720 int mas_preallocate(struct ma_state *mas, gfp_t gfp) 5721 { 5722 int ret; 5723 5724 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5725 mas->mas_flags |= MA_STATE_PREALLOC; 5726 if (likely(!mas_is_err(mas))) 5727 return 0; 5728 5729 mas_set_alloc_req(mas, 0); 5730 ret = xa_err(mas->node); 5731 mas_reset(mas); 5732 mas_destroy(mas); 5733 mas_reset(mas); 5734 return ret; 5735 } 5736 5737 /* 5738 * mas_destroy() - destroy a maple state. 5739 * @mas: The maple state 5740 * 5741 * Upon completion, check the left-most node and rebalance against the node to 5742 * the right if necessary. Frees any allocated nodes associated with this maple 5743 * state. 5744 */ 5745 void mas_destroy(struct ma_state *mas) 5746 { 5747 struct maple_alloc *node; 5748 unsigned long total; 5749 5750 /* 5751 * When using mas_for_each() to insert an expected number of elements, 5752 * it is possible that the number inserted is less than the expected 5753 * number. To fix an invalid final node, a check is performed here to 5754 * rebalance the previous node with the final node. 5755 */ 5756 if (mas->mas_flags & MA_STATE_REBALANCE) { 5757 unsigned char end; 5758 5759 if (mas_is_start(mas)) 5760 mas_start(mas); 5761 5762 mtree_range_walk(mas); 5763 end = mas_data_end(mas) + 1; 5764 if (end < mt_min_slot_count(mas->node) - 1) 5765 mas_destroy_rebalance(mas, end); 5766 5767 mas->mas_flags &= ~MA_STATE_REBALANCE; 5768 } 5769 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5770 5771 total = mas_allocated(mas); 5772 while (total) { 5773 node = mas->alloc; 5774 mas->alloc = node->slot[0]; 5775 if (node->node_count > 1) { 5776 size_t count = node->node_count - 1; 5777 5778 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5779 total -= count; 5780 } 5781 kmem_cache_free(maple_node_cache, node); 5782 total--; 5783 } 5784 5785 mas->alloc = NULL; 5786 } 5787 EXPORT_SYMBOL_GPL(mas_destroy); 5788 5789 /* 5790 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5791 * @mas: The maple state 5792 * @nr_entries: The number of expected entries. 5793 * 5794 * This will attempt to pre-allocate enough nodes to store the expected number 5795 * of entries. The allocations will occur using the bulk allocator interface 5796 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5797 * to ensure any unused nodes are freed. 5798 * 5799 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5800 */ 5801 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5802 { 5803 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5804 struct maple_enode *enode = mas->node; 5805 int nr_nodes; 5806 int ret; 5807 5808 /* 5809 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5810 * forking a process and duplicating the VMAs from one tree to a new 5811 * tree. When such a situation arises, it is known that the new tree is 5812 * not going to be used until the entire tree is populated. For 5813 * performance reasons, it is best to use a bulk load with RCU disabled. 5814 * This allows for optimistic splitting that favours the left and reuse 5815 * of nodes during the operation. 5816 */ 5817 5818 /* Optimize splitting for bulk insert in-order */ 5819 mas->mas_flags |= MA_STATE_BULK; 5820 5821 /* 5822 * Avoid overflow, assume a gap between each entry and a trailing null. 5823 * If this is wrong, it just means allocation can happen during 5824 * insertion of entries. 5825 */ 5826 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5827 if (!mt_is_alloc(mas->tree)) 5828 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5829 5830 /* Leaves; reduce slots to keep space for expansion */ 5831 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5832 /* Internal nodes */ 5833 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5834 /* Add working room for split (2 nodes) + new parents */ 5835 mas_node_count(mas, nr_nodes + 3); 5836 5837 /* Detect if allocations run out */ 5838 mas->mas_flags |= MA_STATE_PREALLOC; 5839 5840 if (!mas_is_err(mas)) 5841 return 0; 5842 5843 ret = xa_err(mas->node); 5844 mas->node = enode; 5845 mas_destroy(mas); 5846 return ret; 5847 5848 } 5849 EXPORT_SYMBOL_GPL(mas_expected_entries); 5850 5851 /** 5852 * mas_next() - Get the next entry. 5853 * @mas: The maple state 5854 * @max: The maximum index to check. 5855 * 5856 * Returns the next entry after @mas->index. 5857 * Must hold rcu_read_lock or the write lock. 5858 * Can return the zero entry. 5859 * 5860 * Return: The next entry or %NULL 5861 */ 5862 void *mas_next(struct ma_state *mas, unsigned long max) 5863 { 5864 if (mas_is_none(mas) || mas_is_paused(mas)) 5865 mas->node = MAS_START; 5866 5867 if (mas_is_start(mas)) 5868 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5869 5870 if (mas_is_ptr(mas)) { 5871 if (!mas->index) { 5872 mas->index = 1; 5873 mas->last = ULONG_MAX; 5874 } 5875 return NULL; 5876 } 5877 5878 if (mas->last == ULONG_MAX) 5879 return NULL; 5880 5881 /* Retries on dead nodes handled by mas_next_entry */ 5882 return mas_next_entry(mas, max); 5883 } 5884 EXPORT_SYMBOL_GPL(mas_next); 5885 5886 /** 5887 * mt_next() - get the next value in the maple tree 5888 * @mt: The maple tree 5889 * @index: The start index 5890 * @max: The maximum index to check 5891 * 5892 * Return: The entry at @index or higher, or %NULL if nothing is found. 5893 */ 5894 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5895 { 5896 void *entry = NULL; 5897 MA_STATE(mas, mt, index, index); 5898 5899 rcu_read_lock(); 5900 entry = mas_next(&mas, max); 5901 rcu_read_unlock(); 5902 return entry; 5903 } 5904 EXPORT_SYMBOL_GPL(mt_next); 5905 5906 /** 5907 * mas_prev() - Get the previous entry 5908 * @mas: The maple state 5909 * @min: The minimum value to check. 5910 * 5911 * Must hold rcu_read_lock or the write lock. 5912 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5913 * searchable nodes. 5914 * 5915 * Return: the previous value or %NULL. 5916 */ 5917 void *mas_prev(struct ma_state *mas, unsigned long min) 5918 { 5919 if (!mas->index) { 5920 /* Nothing comes before 0 */ 5921 mas->last = 0; 5922 mas->node = MAS_NONE; 5923 return NULL; 5924 } 5925 5926 if (unlikely(mas_is_ptr(mas))) 5927 return NULL; 5928 5929 if (mas_is_none(mas) || mas_is_paused(mas)) 5930 mas->node = MAS_START; 5931 5932 if (mas_is_start(mas)) { 5933 mas_walk(mas); 5934 if (!mas->index) 5935 return NULL; 5936 } 5937 5938 if (mas_is_ptr(mas)) { 5939 if (!mas->index) { 5940 mas->last = 0; 5941 return NULL; 5942 } 5943 5944 mas->index = mas->last = 0; 5945 return mas_root_locked(mas); 5946 } 5947 return mas_prev_entry(mas, min); 5948 } 5949 EXPORT_SYMBOL_GPL(mas_prev); 5950 5951 /** 5952 * mt_prev() - get the previous value in the maple tree 5953 * @mt: The maple tree 5954 * @index: The start index 5955 * @min: The minimum index to check 5956 * 5957 * Return: The entry at @index or lower, or %NULL if nothing is found. 5958 */ 5959 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5960 { 5961 void *entry = NULL; 5962 MA_STATE(mas, mt, index, index); 5963 5964 rcu_read_lock(); 5965 entry = mas_prev(&mas, min); 5966 rcu_read_unlock(); 5967 return entry; 5968 } 5969 EXPORT_SYMBOL_GPL(mt_prev); 5970 5971 /** 5972 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5973 * @mas: The maple state to pause 5974 * 5975 * Some users need to pause a walk and drop the lock they're holding in 5976 * order to yield to a higher priority thread or carry out an operation 5977 * on an entry. Those users should call this function before they drop 5978 * the lock. It resets the @mas to be suitable for the next iteration 5979 * of the loop after the user has reacquired the lock. If most entries 5980 * found during a walk require you to call mas_pause(), the mt_for_each() 5981 * iterator may be more appropriate. 5982 * 5983 */ 5984 void mas_pause(struct ma_state *mas) 5985 { 5986 mas->node = MAS_PAUSE; 5987 } 5988 EXPORT_SYMBOL_GPL(mas_pause); 5989 5990 /** 5991 * mas_find() - On the first call, find the entry at or after mas->index up to 5992 * %max. Otherwise, find the entry after mas->index. 5993 * @mas: The maple state 5994 * @max: The maximum value to check. 5995 * 5996 * Must hold rcu_read_lock or the write lock. 5997 * If an entry exists, last and index are updated accordingly. 5998 * May set @mas->node to MAS_NONE. 5999 * 6000 * Return: The entry or %NULL. 6001 */ 6002 void *mas_find(struct ma_state *mas, unsigned long max) 6003 { 6004 if (unlikely(mas_is_paused(mas))) { 6005 if (unlikely(mas->last == ULONG_MAX)) { 6006 mas->node = MAS_NONE; 6007 return NULL; 6008 } 6009 mas->node = MAS_START; 6010 mas->index = ++mas->last; 6011 } 6012 6013 if (unlikely(mas_is_none(mas))) 6014 mas->node = MAS_START; 6015 6016 if (unlikely(mas_is_start(mas))) { 6017 /* First run or continue */ 6018 void *entry; 6019 6020 if (mas->index > max) 6021 return NULL; 6022 6023 entry = mas_walk(mas); 6024 if (entry) 6025 return entry; 6026 } 6027 6028 if (unlikely(!mas_searchable(mas))) 6029 return NULL; 6030 6031 /* Retries on dead nodes handled by mas_next_entry */ 6032 return mas_next_entry(mas, max); 6033 } 6034 EXPORT_SYMBOL_GPL(mas_find); 6035 6036 /** 6037 * mas_find_rev: On the first call, find the first non-null entry at or below 6038 * mas->index down to %min. Otherwise find the first non-null entry below 6039 * mas->index down to %min. 6040 * @mas: The maple state 6041 * @min: The minimum value to check. 6042 * 6043 * Must hold rcu_read_lock or the write lock. 6044 * If an entry exists, last and index are updated accordingly. 6045 * May set @mas->node to MAS_NONE. 6046 * 6047 * Return: The entry or %NULL. 6048 */ 6049 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6050 { 6051 if (unlikely(mas_is_paused(mas))) { 6052 if (unlikely(mas->last == ULONG_MAX)) { 6053 mas->node = MAS_NONE; 6054 return NULL; 6055 } 6056 mas->node = MAS_START; 6057 mas->last = --mas->index; 6058 } 6059 6060 if (unlikely(mas_is_start(mas))) { 6061 /* First run or continue */ 6062 void *entry; 6063 6064 if (mas->index < min) 6065 return NULL; 6066 6067 entry = mas_walk(mas); 6068 if (entry) 6069 return entry; 6070 } 6071 6072 if (unlikely(!mas_searchable(mas))) 6073 return NULL; 6074 6075 if (mas->index < min) 6076 return NULL; 6077 6078 /* Retries on dead nodes handled by mas_prev_entry */ 6079 return mas_prev_entry(mas, min); 6080 } 6081 EXPORT_SYMBOL_GPL(mas_find_rev); 6082 6083 /** 6084 * mas_erase() - Find the range in which index resides and erase the entire 6085 * range. 6086 * @mas: The maple state 6087 * 6088 * Must hold the write lock. 6089 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6090 * erases that range. 6091 * 6092 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6093 */ 6094 void *mas_erase(struct ma_state *mas) 6095 { 6096 void *entry; 6097 MA_WR_STATE(wr_mas, mas, NULL); 6098 6099 if (mas_is_none(mas) || mas_is_paused(mas)) 6100 mas->node = MAS_START; 6101 6102 /* Retry unnecessary when holding the write lock. */ 6103 entry = mas_state_walk(mas); 6104 if (!entry) 6105 return NULL; 6106 6107 write_retry: 6108 /* Must reset to ensure spanning writes of last slot are detected */ 6109 mas_reset(mas); 6110 mas_wr_store_setup(&wr_mas); 6111 mas_wr_store_entry(&wr_mas); 6112 if (mas_nomem(mas, GFP_KERNEL)) 6113 goto write_retry; 6114 6115 return entry; 6116 } 6117 EXPORT_SYMBOL_GPL(mas_erase); 6118 6119 /** 6120 * mas_nomem() - Check if there was an error allocating and do the allocation 6121 * if necessary If there are allocations, then free them. 6122 * @mas: The maple state 6123 * @gfp: The GFP_FLAGS to use for allocations 6124 * Return: true on allocation, false otherwise. 6125 */ 6126 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6127 __must_hold(mas->tree->lock) 6128 { 6129 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6130 mas_destroy(mas); 6131 return false; 6132 } 6133 6134 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6135 mtree_unlock(mas->tree); 6136 mas_alloc_nodes(mas, gfp); 6137 mtree_lock(mas->tree); 6138 } else { 6139 mas_alloc_nodes(mas, gfp); 6140 } 6141 6142 if (!mas_allocated(mas)) 6143 return false; 6144 6145 mas->node = MAS_START; 6146 return true; 6147 } 6148 6149 void __init maple_tree_init(void) 6150 { 6151 maple_node_cache = kmem_cache_create("maple_node", 6152 sizeof(struct maple_node), sizeof(struct maple_node), 6153 SLAB_PANIC, NULL); 6154 } 6155 6156 /** 6157 * mtree_load() - Load a value stored in a maple tree 6158 * @mt: The maple tree 6159 * @index: The index to load 6160 * 6161 * Return: the entry or %NULL 6162 */ 6163 void *mtree_load(struct maple_tree *mt, unsigned long index) 6164 { 6165 MA_STATE(mas, mt, index, index); 6166 void *entry; 6167 6168 trace_ma_read(__func__, &mas); 6169 rcu_read_lock(); 6170 retry: 6171 entry = mas_start(&mas); 6172 if (unlikely(mas_is_none(&mas))) 6173 goto unlock; 6174 6175 if (unlikely(mas_is_ptr(&mas))) { 6176 if (index) 6177 entry = NULL; 6178 6179 goto unlock; 6180 } 6181 6182 entry = mtree_lookup_walk(&mas); 6183 if (!entry && unlikely(mas_is_start(&mas))) 6184 goto retry; 6185 unlock: 6186 rcu_read_unlock(); 6187 if (xa_is_zero(entry)) 6188 return NULL; 6189 6190 return entry; 6191 } 6192 EXPORT_SYMBOL(mtree_load); 6193 6194 /** 6195 * mtree_store_range() - Store an entry at a given range. 6196 * @mt: The maple tree 6197 * @index: The start of the range 6198 * @last: The end of the range 6199 * @entry: The entry to store 6200 * @gfp: The GFP_FLAGS to use for allocations 6201 * 6202 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6203 * be allocated. 6204 */ 6205 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6206 unsigned long last, void *entry, gfp_t gfp) 6207 { 6208 MA_STATE(mas, mt, index, last); 6209 MA_WR_STATE(wr_mas, &mas, entry); 6210 6211 trace_ma_write(__func__, &mas, 0, entry); 6212 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6213 return -EINVAL; 6214 6215 if (index > last) 6216 return -EINVAL; 6217 6218 mtree_lock(mt); 6219 retry: 6220 mas_wr_store_entry(&wr_mas); 6221 if (mas_nomem(&mas, gfp)) 6222 goto retry; 6223 6224 mtree_unlock(mt); 6225 if (mas_is_err(&mas)) 6226 return xa_err(mas.node); 6227 6228 return 0; 6229 } 6230 EXPORT_SYMBOL(mtree_store_range); 6231 6232 /** 6233 * mtree_store() - Store an entry at a given index. 6234 * @mt: The maple tree 6235 * @index: The index to store the value 6236 * @entry: The entry to store 6237 * @gfp: The GFP_FLAGS to use for allocations 6238 * 6239 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6240 * be allocated. 6241 */ 6242 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6243 gfp_t gfp) 6244 { 6245 return mtree_store_range(mt, index, index, entry, gfp); 6246 } 6247 EXPORT_SYMBOL(mtree_store); 6248 6249 /** 6250 * mtree_insert_range() - Insert an entry at a give range if there is no value. 6251 * @mt: The maple tree 6252 * @first: The start of the range 6253 * @last: The end of the range 6254 * @entry: The entry to store 6255 * @gfp: The GFP_FLAGS to use for allocations. 6256 * 6257 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6258 * request, -ENOMEM if memory could not be allocated. 6259 */ 6260 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6261 unsigned long last, void *entry, gfp_t gfp) 6262 { 6263 MA_STATE(ms, mt, first, last); 6264 6265 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6266 return -EINVAL; 6267 6268 if (first > last) 6269 return -EINVAL; 6270 6271 mtree_lock(mt); 6272 retry: 6273 mas_insert(&ms, entry); 6274 if (mas_nomem(&ms, gfp)) 6275 goto retry; 6276 6277 mtree_unlock(mt); 6278 if (mas_is_err(&ms)) 6279 return xa_err(ms.node); 6280 6281 return 0; 6282 } 6283 EXPORT_SYMBOL(mtree_insert_range); 6284 6285 /** 6286 * mtree_insert() - Insert an entry at a give index if there is no value. 6287 * @mt: The maple tree 6288 * @index : The index to store the value 6289 * @entry: The entry to store 6290 * @gfp: The FGP_FLAGS to use for allocations. 6291 * 6292 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6293 * request, -ENOMEM if memory could not be allocated. 6294 */ 6295 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6296 gfp_t gfp) 6297 { 6298 return mtree_insert_range(mt, index, index, entry, gfp); 6299 } 6300 EXPORT_SYMBOL(mtree_insert); 6301 6302 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6303 void *entry, unsigned long size, unsigned long min, 6304 unsigned long max, gfp_t gfp) 6305 { 6306 int ret = 0; 6307 6308 MA_STATE(mas, mt, min, max - size); 6309 if (!mt_is_alloc(mt)) 6310 return -EINVAL; 6311 6312 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6313 return -EINVAL; 6314 6315 if (min > max) 6316 return -EINVAL; 6317 6318 if (max < size) 6319 return -EINVAL; 6320 6321 if (!size) 6322 return -EINVAL; 6323 6324 mtree_lock(mt); 6325 retry: 6326 mas.offset = 0; 6327 mas.index = min; 6328 mas.last = max - size; 6329 ret = mas_alloc(&mas, entry, size, startp); 6330 if (mas_nomem(&mas, gfp)) 6331 goto retry; 6332 6333 mtree_unlock(mt); 6334 return ret; 6335 } 6336 EXPORT_SYMBOL(mtree_alloc_range); 6337 6338 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6339 void *entry, unsigned long size, unsigned long min, 6340 unsigned long max, gfp_t gfp) 6341 { 6342 int ret = 0; 6343 6344 MA_STATE(mas, mt, min, max - size); 6345 if (!mt_is_alloc(mt)) 6346 return -EINVAL; 6347 6348 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6349 return -EINVAL; 6350 6351 if (min >= max) 6352 return -EINVAL; 6353 6354 if (max < size - 1) 6355 return -EINVAL; 6356 6357 if (!size) 6358 return -EINVAL; 6359 6360 mtree_lock(mt); 6361 retry: 6362 ret = mas_rev_alloc(&mas, min, max, entry, size, startp); 6363 if (mas_nomem(&mas, gfp)) 6364 goto retry; 6365 6366 mtree_unlock(mt); 6367 return ret; 6368 } 6369 EXPORT_SYMBOL(mtree_alloc_rrange); 6370 6371 /** 6372 * mtree_erase() - Find an index and erase the entire range. 6373 * @mt: The maple tree 6374 * @index: The index to erase 6375 * 6376 * Erasing is the same as a walk to an entry then a store of a NULL to that 6377 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6378 * 6379 * Return: The entry stored at the @index or %NULL 6380 */ 6381 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6382 { 6383 void *entry = NULL; 6384 6385 MA_STATE(mas, mt, index, index); 6386 trace_ma_op(__func__, &mas); 6387 6388 mtree_lock(mt); 6389 entry = mas_erase(&mas); 6390 mtree_unlock(mt); 6391 6392 return entry; 6393 } 6394 EXPORT_SYMBOL(mtree_erase); 6395 6396 /** 6397 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6398 * @mt: The maple tree 6399 * 6400 * Note: Does not handle locking. 6401 */ 6402 void __mt_destroy(struct maple_tree *mt) 6403 { 6404 void *root = mt_root_locked(mt); 6405 6406 rcu_assign_pointer(mt->ma_root, NULL); 6407 if (xa_is_node(root)) 6408 mte_destroy_walk(root, mt); 6409 6410 mt->ma_flags = 0; 6411 } 6412 EXPORT_SYMBOL_GPL(__mt_destroy); 6413 6414 /** 6415 * mtree_destroy() - Destroy a maple tree 6416 * @mt: The maple tree 6417 * 6418 * Frees all resources used by the tree. Handles locking. 6419 */ 6420 void mtree_destroy(struct maple_tree *mt) 6421 { 6422 mtree_lock(mt); 6423 __mt_destroy(mt); 6424 mtree_unlock(mt); 6425 } 6426 EXPORT_SYMBOL(mtree_destroy); 6427 6428 /** 6429 * mt_find() - Search from the start up until an entry is found. 6430 * @mt: The maple tree 6431 * @index: Pointer which contains the start location of the search 6432 * @max: The maximum value to check 6433 * 6434 * Handles locking. @index will be incremented to one beyond the range. 6435 * 6436 * Return: The entry at or after the @index or %NULL 6437 */ 6438 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6439 { 6440 MA_STATE(mas, mt, *index, *index); 6441 void *entry; 6442 #ifdef CONFIG_DEBUG_MAPLE_TREE 6443 unsigned long copy = *index; 6444 #endif 6445 6446 trace_ma_read(__func__, &mas); 6447 6448 if ((*index) > max) 6449 return NULL; 6450 6451 rcu_read_lock(); 6452 retry: 6453 entry = mas_state_walk(&mas); 6454 if (mas_is_start(&mas)) 6455 goto retry; 6456 6457 if (unlikely(xa_is_zero(entry))) 6458 entry = NULL; 6459 6460 if (entry) 6461 goto unlock; 6462 6463 while (mas_searchable(&mas) && (mas.index < max)) { 6464 entry = mas_next_entry(&mas, max); 6465 if (likely(entry && !xa_is_zero(entry))) 6466 break; 6467 } 6468 6469 if (unlikely(xa_is_zero(entry))) 6470 entry = NULL; 6471 unlock: 6472 rcu_read_unlock(); 6473 if (likely(entry)) { 6474 *index = mas.last + 1; 6475 #ifdef CONFIG_DEBUG_MAPLE_TREE 6476 if ((*index) && (*index) <= copy) 6477 pr_err("index not increased! %lx <= %lx\n", 6478 *index, copy); 6479 MT_BUG_ON(mt, (*index) && ((*index) <= copy)); 6480 #endif 6481 } 6482 6483 return entry; 6484 } 6485 EXPORT_SYMBOL(mt_find); 6486 6487 /** 6488 * mt_find_after() - Search from the start up until an entry is found. 6489 * @mt: The maple tree 6490 * @index: Pointer which contains the start location of the search 6491 * @max: The maximum value to check 6492 * 6493 * Handles locking, detects wrapping on index == 0 6494 * 6495 * Return: The entry at or after the @index or %NULL 6496 */ 6497 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6498 unsigned long max) 6499 { 6500 if (!(*index)) 6501 return NULL; 6502 6503 return mt_find(mt, index, max); 6504 } 6505 EXPORT_SYMBOL(mt_find_after); 6506 6507 #ifdef CONFIG_DEBUG_MAPLE_TREE 6508 atomic_t maple_tree_tests_run; 6509 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6510 atomic_t maple_tree_tests_passed; 6511 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6512 6513 #ifndef __KERNEL__ 6514 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6515 void mt_set_non_kernel(unsigned int val) 6516 { 6517 kmem_cache_set_non_kernel(maple_node_cache, val); 6518 } 6519 6520 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6521 unsigned long mt_get_alloc_size(void) 6522 { 6523 return kmem_cache_get_alloc(maple_node_cache); 6524 } 6525 6526 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6527 void mt_zero_nr_tallocated(void) 6528 { 6529 kmem_cache_zero_nr_tallocated(maple_node_cache); 6530 } 6531 6532 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6533 unsigned int mt_nr_tallocated(void) 6534 { 6535 return kmem_cache_nr_tallocated(maple_node_cache); 6536 } 6537 6538 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6539 unsigned int mt_nr_allocated(void) 6540 { 6541 return kmem_cache_nr_allocated(maple_node_cache); 6542 } 6543 6544 /* 6545 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6546 * @mas: The maple state 6547 * @index: The index to restore in @mas. 6548 * 6549 * Used in test code. 6550 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6551 */ 6552 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6553 { 6554 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6555 return 0; 6556 6557 if (likely(!mte_dead_node(mas->node))) 6558 return 0; 6559 6560 mas_rewalk(mas, index); 6561 return 1; 6562 } 6563 6564 void mt_cache_shrink(void) 6565 { 6566 } 6567 #else 6568 /* 6569 * mt_cache_shrink() - For testing, don't use this. 6570 * 6571 * Certain testcases can trigger an OOM when combined with other memory 6572 * debugging configuration options. This function is used to reduce the 6573 * possibility of an out of memory even due to kmem_cache objects remaining 6574 * around for longer than usual. 6575 */ 6576 void mt_cache_shrink(void) 6577 { 6578 kmem_cache_shrink(maple_node_cache); 6579 6580 } 6581 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6582 6583 #endif /* not defined __KERNEL__ */ 6584 /* 6585 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6586 * @mas: The maple state 6587 * @offset: The offset into the slot array to fetch. 6588 * 6589 * Return: The entry stored at @offset. 6590 */ 6591 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6592 unsigned char offset) 6593 { 6594 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6595 offset); 6596 } 6597 6598 6599 /* 6600 * mas_first_entry() - Go the first leaf and find the first entry. 6601 * @mas: the maple state. 6602 * @limit: the maximum index to check. 6603 * @*r_start: Pointer to set to the range start. 6604 * 6605 * Sets mas->offset to the offset of the entry, r_start to the range minimum. 6606 * 6607 * Return: The first entry or MAS_NONE. 6608 */ 6609 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, 6610 unsigned long limit, enum maple_type mt) 6611 6612 { 6613 unsigned long max; 6614 unsigned long *pivots; 6615 void __rcu **slots; 6616 void *entry = NULL; 6617 6618 mas->index = mas->min; 6619 if (mas->index > limit) 6620 goto none; 6621 6622 max = mas->max; 6623 mas->offset = 0; 6624 while (likely(!ma_is_leaf(mt))) { 6625 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6626 slots = ma_slots(mn, mt); 6627 pivots = ma_pivots(mn, mt); 6628 max = pivots[0]; 6629 entry = mas_slot(mas, slots, 0); 6630 if (unlikely(ma_dead_node(mn))) 6631 return NULL; 6632 mas->node = entry; 6633 mn = mas_mn(mas); 6634 mt = mte_node_type(mas->node); 6635 } 6636 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6637 6638 mas->max = max; 6639 slots = ma_slots(mn, mt); 6640 entry = mas_slot(mas, slots, 0); 6641 if (unlikely(ma_dead_node(mn))) 6642 return NULL; 6643 6644 /* Slot 0 or 1 must be set */ 6645 if (mas->index > limit) 6646 goto none; 6647 6648 if (likely(entry)) 6649 return entry; 6650 6651 pivots = ma_pivots(mn, mt); 6652 mas->index = pivots[0] + 1; 6653 mas->offset = 1; 6654 entry = mas_slot(mas, slots, 1); 6655 if (unlikely(ma_dead_node(mn))) 6656 return NULL; 6657 6658 if (mas->index > limit) 6659 goto none; 6660 6661 if (likely(entry)) 6662 return entry; 6663 6664 none: 6665 if (likely(!ma_dead_node(mn))) 6666 mas->node = MAS_NONE; 6667 return NULL; 6668 } 6669 6670 /* Depth first search, post-order */ 6671 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6672 { 6673 6674 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6675 unsigned long p_min, p_max; 6676 6677 mas_next_node(mas, mas_mn(mas), max); 6678 if (!mas_is_none(mas)) 6679 return; 6680 6681 if (mte_is_root(mn)) 6682 return; 6683 6684 mas->node = mn; 6685 mas_ascend(mas); 6686 while (mas->node != MAS_NONE) { 6687 p = mas->node; 6688 p_min = mas->min; 6689 p_max = mas->max; 6690 mas_prev_node(mas, 0); 6691 } 6692 6693 if (p == MAS_NONE) 6694 return; 6695 6696 mas->node = p; 6697 mas->max = p_max; 6698 mas->min = p_min; 6699 } 6700 6701 /* Tree validations */ 6702 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6703 unsigned long min, unsigned long max, unsigned int depth); 6704 static void mt_dump_range(unsigned long min, unsigned long max, 6705 unsigned int depth) 6706 { 6707 static const char spaces[] = " "; 6708 6709 if (min == max) 6710 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6711 else 6712 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6713 } 6714 6715 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6716 unsigned int depth) 6717 { 6718 mt_dump_range(min, max, depth); 6719 6720 if (xa_is_value(entry)) 6721 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6722 xa_to_value(entry), entry); 6723 else if (xa_is_zero(entry)) 6724 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6725 else if (mt_is_reserved(entry)) 6726 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6727 else 6728 pr_cont("%p\n", entry); 6729 } 6730 6731 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6732 unsigned long min, unsigned long max, unsigned int depth) 6733 { 6734 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6735 bool leaf = mte_is_leaf(entry); 6736 unsigned long first = min; 6737 int i; 6738 6739 pr_cont(" contents: "); 6740 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) 6741 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6742 pr_cont("%p\n", node->slot[i]); 6743 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6744 unsigned long last = max; 6745 6746 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6747 last = node->pivot[i]; 6748 else if (!node->slot[i] && max != mt_node_max(entry)) 6749 break; 6750 if (last == 0 && i > 0) 6751 break; 6752 if (leaf) 6753 mt_dump_entry(mt_slot(mt, node->slot, i), 6754 first, last, depth + 1); 6755 else if (node->slot[i]) 6756 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6757 first, last, depth + 1); 6758 6759 if (last == max) 6760 break; 6761 if (last > max) { 6762 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6763 node, last, max, i); 6764 break; 6765 } 6766 first = last + 1; 6767 } 6768 } 6769 6770 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6771 unsigned long min, unsigned long max, unsigned int depth) 6772 { 6773 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6774 bool leaf = mte_is_leaf(entry); 6775 unsigned long first = min; 6776 int i; 6777 6778 pr_cont(" contents: "); 6779 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6780 pr_cont("%lu ", node->gap[i]); 6781 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6782 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6783 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6784 pr_cont("%p\n", node->slot[i]); 6785 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6786 unsigned long last = max; 6787 6788 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6789 last = node->pivot[i]; 6790 else if (!node->slot[i]) 6791 break; 6792 if (last == 0 && i > 0) 6793 break; 6794 if (leaf) 6795 mt_dump_entry(mt_slot(mt, node->slot, i), 6796 first, last, depth + 1); 6797 else if (node->slot[i]) 6798 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6799 first, last, depth + 1); 6800 6801 if (last == max) 6802 break; 6803 if (last > max) { 6804 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6805 node, last, max, i); 6806 break; 6807 } 6808 first = last + 1; 6809 } 6810 } 6811 6812 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6813 unsigned long min, unsigned long max, unsigned int depth) 6814 { 6815 struct maple_node *node = mte_to_node(entry); 6816 unsigned int type = mte_node_type(entry); 6817 unsigned int i; 6818 6819 mt_dump_range(min, max, depth); 6820 6821 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6822 node ? node->parent : NULL); 6823 switch (type) { 6824 case maple_dense: 6825 pr_cont("\n"); 6826 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6827 if (min + i > max) 6828 pr_cont("OUT OF RANGE: "); 6829 mt_dump_entry(mt_slot(mt, node->slot, i), 6830 min + i, min + i, depth); 6831 } 6832 break; 6833 case maple_leaf_64: 6834 case maple_range_64: 6835 mt_dump_range64(mt, entry, min, max, depth); 6836 break; 6837 case maple_arange_64: 6838 mt_dump_arange64(mt, entry, min, max, depth); 6839 break; 6840 6841 default: 6842 pr_cont(" UNKNOWN TYPE\n"); 6843 } 6844 } 6845 6846 void mt_dump(const struct maple_tree *mt) 6847 { 6848 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6849 6850 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6851 mt, mt->ma_flags, mt_height(mt), entry); 6852 if (!xa_is_node(entry)) 6853 mt_dump_entry(entry, 0, 0, 0); 6854 else if (entry) 6855 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); 6856 } 6857 EXPORT_SYMBOL_GPL(mt_dump); 6858 6859 /* 6860 * Calculate the maximum gap in a node and check if that's what is reported in 6861 * the parent (unless root). 6862 */ 6863 static void mas_validate_gaps(struct ma_state *mas) 6864 { 6865 struct maple_enode *mte = mas->node; 6866 struct maple_node *p_mn; 6867 unsigned long gap = 0, max_gap = 0; 6868 unsigned long p_end, p_start = mas->min; 6869 unsigned char p_slot; 6870 unsigned long *gaps = NULL; 6871 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); 6872 int i; 6873 6874 if (ma_is_dense(mte_node_type(mte))) { 6875 for (i = 0; i < mt_slot_count(mte); i++) { 6876 if (mas_get_slot(mas, i)) { 6877 if (gap > max_gap) 6878 max_gap = gap; 6879 gap = 0; 6880 continue; 6881 } 6882 gap++; 6883 } 6884 goto counted; 6885 } 6886 6887 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); 6888 for (i = 0; i < mt_slot_count(mte); i++) { 6889 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); 6890 6891 if (!gaps) { 6892 if (mas_get_slot(mas, i)) { 6893 gap = 0; 6894 goto not_empty; 6895 } 6896 6897 gap += p_end - p_start + 1; 6898 } else { 6899 void *entry = mas_get_slot(mas, i); 6900 6901 gap = gaps[i]; 6902 if (!entry) { 6903 if (gap != p_end - p_start + 1) { 6904 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", 6905 mas_mn(mas), i, 6906 mas_get_slot(mas, i), gap, 6907 p_end, p_start); 6908 mt_dump(mas->tree); 6909 6910 MT_BUG_ON(mas->tree, 6911 gap != p_end - p_start + 1); 6912 } 6913 } else { 6914 if (gap > p_end - p_start + 1) { 6915 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6916 mas_mn(mas), i, gap, p_end, p_start, 6917 p_end - p_start + 1); 6918 MT_BUG_ON(mas->tree, 6919 gap > p_end - p_start + 1); 6920 } 6921 } 6922 } 6923 6924 if (gap > max_gap) 6925 max_gap = gap; 6926 not_empty: 6927 p_start = p_end + 1; 6928 if (p_end >= mas->max) 6929 break; 6930 } 6931 6932 counted: 6933 if (mte_is_root(mte)) 6934 return; 6935 6936 p_slot = mte_parent_slot(mas->node); 6937 p_mn = mte_parent(mte); 6938 MT_BUG_ON(mas->tree, max_gap > mas->max); 6939 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { 6940 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 6941 mt_dump(mas->tree); 6942 } 6943 6944 MT_BUG_ON(mas->tree, 6945 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); 6946 } 6947 6948 static void mas_validate_parent_slot(struct ma_state *mas) 6949 { 6950 struct maple_node *parent; 6951 struct maple_enode *node; 6952 enum maple_type p_type = mas_parent_enum(mas, mas->node); 6953 unsigned char p_slot = mte_parent_slot(mas->node); 6954 void __rcu **slots; 6955 int i; 6956 6957 if (mte_is_root(mas->node)) 6958 return; 6959 6960 parent = mte_parent(mas->node); 6961 slots = ma_slots(parent, p_type); 6962 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 6963 6964 /* Check prev/next parent slot for duplicate node entry */ 6965 6966 for (i = 0; i < mt_slots[p_type]; i++) { 6967 node = mas_slot(mas, slots, i); 6968 if (i == p_slot) { 6969 if (node != mas->node) 6970 pr_err("parent %p[%u] does not have %p\n", 6971 parent, i, mas_mn(mas)); 6972 MT_BUG_ON(mas->tree, node != mas->node); 6973 } else if (node == mas->node) { 6974 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 6975 mas_mn(mas), parent, i, p_slot); 6976 MT_BUG_ON(mas->tree, node == mas->node); 6977 } 6978 } 6979 } 6980 6981 static void mas_validate_child_slot(struct ma_state *mas) 6982 { 6983 enum maple_type type = mte_node_type(mas->node); 6984 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 6985 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 6986 struct maple_enode *child; 6987 unsigned char i; 6988 6989 if (mte_is_leaf(mas->node)) 6990 return; 6991 6992 for (i = 0; i < mt_slots[type]; i++) { 6993 child = mas_slot(mas, slots, i); 6994 if (!pivots[i] || pivots[i] == mas->max) 6995 break; 6996 6997 if (!child) 6998 break; 6999 7000 if (mte_parent_slot(child) != i) { 7001 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 7002 mas_mn(mas), i, mte_to_node(child), 7003 mte_parent_slot(child)); 7004 MT_BUG_ON(mas->tree, 1); 7005 } 7006 7007 if (mte_parent(child) != mte_to_node(mas->node)) { 7008 pr_err("child %p has parent %p not %p\n", 7009 mte_to_node(child), mte_parent(child), 7010 mte_to_node(mas->node)); 7011 MT_BUG_ON(mas->tree, 1); 7012 } 7013 } 7014 } 7015 7016 /* 7017 * Validate all pivots are within mas->min and mas->max. 7018 */ 7019 static void mas_validate_limits(struct ma_state *mas) 7020 { 7021 int i; 7022 unsigned long prev_piv = 0; 7023 enum maple_type type = mte_node_type(mas->node); 7024 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7025 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7026 7027 /* all limits are fine here. */ 7028 if (mte_is_root(mas->node)) 7029 return; 7030 7031 for (i = 0; i < mt_slots[type]; i++) { 7032 unsigned long piv; 7033 7034 piv = mas_safe_pivot(mas, pivots, i, type); 7035 7036 if (!piv && (i != 0)) 7037 break; 7038 7039 if (!mte_is_leaf(mas->node)) { 7040 void *entry = mas_slot(mas, slots, i); 7041 7042 if (!entry) 7043 pr_err("%p[%u] cannot be null\n", 7044 mas_mn(mas), i); 7045 7046 MT_BUG_ON(mas->tree, !entry); 7047 } 7048 7049 if (prev_piv > piv) { 7050 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7051 mas_mn(mas), i, piv, prev_piv); 7052 MT_BUG_ON(mas->tree, piv < prev_piv); 7053 } 7054 7055 if (piv < mas->min) { 7056 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7057 piv, mas->min); 7058 MT_BUG_ON(mas->tree, piv < mas->min); 7059 } 7060 if (piv > mas->max) { 7061 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7062 piv, mas->max); 7063 MT_BUG_ON(mas->tree, piv > mas->max); 7064 } 7065 prev_piv = piv; 7066 if (piv == mas->max) 7067 break; 7068 } 7069 for (i += 1; i < mt_slots[type]; i++) { 7070 void *entry = mas_slot(mas, slots, i); 7071 7072 if (entry && (i != mt_slots[type] - 1)) { 7073 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7074 i, entry); 7075 MT_BUG_ON(mas->tree, entry != NULL); 7076 } 7077 7078 if (i < mt_pivots[type]) { 7079 unsigned long piv = pivots[i]; 7080 7081 if (!piv) 7082 continue; 7083 7084 pr_err("%p[%u] should not have piv %lu\n", 7085 mas_mn(mas), i, piv); 7086 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1); 7087 } 7088 } 7089 } 7090 7091 static void mt_validate_nulls(struct maple_tree *mt) 7092 { 7093 void *entry, *last = (void *)1; 7094 unsigned char offset = 0; 7095 void __rcu **slots; 7096 MA_STATE(mas, mt, 0, 0); 7097 7098 mas_start(&mas); 7099 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7100 return; 7101 7102 while (!mte_is_leaf(mas.node)) 7103 mas_descend(&mas); 7104 7105 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7106 do { 7107 entry = mas_slot(&mas, slots, offset); 7108 if (!last && !entry) { 7109 pr_err("Sequential nulls end at %p[%u]\n", 7110 mas_mn(&mas), offset); 7111 } 7112 MT_BUG_ON(mt, !last && !entry); 7113 last = entry; 7114 if (offset == mas_data_end(&mas)) { 7115 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7116 if (mas_is_none(&mas)) 7117 return; 7118 offset = 0; 7119 slots = ma_slots(mte_to_node(mas.node), 7120 mte_node_type(mas.node)); 7121 } else { 7122 offset++; 7123 } 7124 7125 } while (!mas_is_none(&mas)); 7126 } 7127 7128 /* 7129 * validate a maple tree by checking: 7130 * 1. The limits (pivots are within mas->min to mas->max) 7131 * 2. The gap is correctly set in the parents 7132 */ 7133 void mt_validate(struct maple_tree *mt) 7134 { 7135 unsigned char end; 7136 7137 MA_STATE(mas, mt, 0, 0); 7138 rcu_read_lock(); 7139 mas_start(&mas); 7140 if (!mas_searchable(&mas)) 7141 goto done; 7142 7143 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); 7144 while (!mas_is_none(&mas)) { 7145 MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); 7146 if (!mte_is_root(mas.node)) { 7147 end = mas_data_end(&mas); 7148 if ((end < mt_min_slot_count(mas.node)) && 7149 (mas.max != ULONG_MAX)) { 7150 pr_err("Invalid size %u of %p\n", end, 7151 mas_mn(&mas)); 7152 MT_BUG_ON(mas.tree, 1); 7153 } 7154 7155 } 7156 mas_validate_parent_slot(&mas); 7157 mas_validate_child_slot(&mas); 7158 mas_validate_limits(&mas); 7159 if (mt_is_alloc(mt)) 7160 mas_validate_gaps(&mas); 7161 mas_dfs_postorder(&mas, ULONG_MAX); 7162 } 7163 mt_validate_nulls(mt); 7164 done: 7165 rcu_read_unlock(); 7166 7167 } 7168 EXPORT_SYMBOL_GPL(mt_validate); 7169 7170 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7171