1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 /* Functions */ 150 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 151 { 152 return kmem_cache_alloc(maple_node_cache, gfp); 153 } 154 155 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 156 { 157 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 158 } 159 160 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 161 { 162 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 163 } 164 165 static void mt_free_rcu(struct rcu_head *head) 166 { 167 struct maple_node *node = container_of(head, struct maple_node, rcu); 168 169 kmem_cache_free(maple_node_cache, node); 170 } 171 172 /* 173 * ma_free_rcu() - Use rcu callback to free a maple node 174 * @node: The node to free 175 * 176 * The maple tree uses the parent pointer to indicate this node is no longer in 177 * use and will be freed. 178 */ 179 static void ma_free_rcu(struct maple_node *node) 180 { 181 node->parent = ma_parent_ptr(node); 182 call_rcu(&node->rcu, mt_free_rcu); 183 } 184 185 static void mas_set_height(struct ma_state *mas) 186 { 187 unsigned int new_flags = mas->tree->ma_flags; 188 189 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 190 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); 191 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 192 mas->tree->ma_flags = new_flags; 193 } 194 195 static unsigned int mas_mt_height(struct ma_state *mas) 196 { 197 return mt_height(mas->tree); 198 } 199 200 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 201 { 202 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 203 MAPLE_NODE_TYPE_MASK; 204 } 205 206 static inline bool ma_is_dense(const enum maple_type type) 207 { 208 return type < maple_leaf_64; 209 } 210 211 static inline bool ma_is_leaf(const enum maple_type type) 212 { 213 return type < maple_range_64; 214 } 215 216 static inline bool mte_is_leaf(const struct maple_enode *entry) 217 { 218 return ma_is_leaf(mte_node_type(entry)); 219 } 220 221 /* 222 * We also reserve values with the bottom two bits set to '10' which are 223 * below 4096 224 */ 225 static inline bool mt_is_reserved(const void *entry) 226 { 227 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 228 xa_is_internal(entry); 229 } 230 231 static inline void mas_set_err(struct ma_state *mas, long err) 232 { 233 mas->node = MA_ERROR(err); 234 } 235 236 static inline bool mas_is_ptr(struct ma_state *mas) 237 { 238 return mas->node == MAS_ROOT; 239 } 240 241 static inline bool mas_is_start(struct ma_state *mas) 242 { 243 return mas->node == MAS_START; 244 } 245 246 bool mas_is_err(struct ma_state *mas) 247 { 248 return xa_is_err(mas->node); 249 } 250 251 static inline bool mas_searchable(struct ma_state *mas) 252 { 253 if (mas_is_none(mas)) 254 return false; 255 256 if (mas_is_ptr(mas)) 257 return false; 258 259 return true; 260 } 261 262 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 263 { 264 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 265 } 266 267 /* 268 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 269 * @entry: The maple encoded node 270 * 271 * Return: a maple topiary pointer 272 */ 273 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 274 { 275 return (struct maple_topiary *) 276 ((unsigned long)entry & ~MAPLE_NODE_MASK); 277 } 278 279 /* 280 * mas_mn() - Get the maple state node. 281 * @mas: The maple state 282 * 283 * Return: the maple node (not encoded - bare pointer). 284 */ 285 static inline struct maple_node *mas_mn(const struct ma_state *mas) 286 { 287 return mte_to_node(mas->node); 288 } 289 290 /* 291 * mte_set_node_dead() - Set a maple encoded node as dead. 292 * @mn: The maple encoded node. 293 */ 294 static inline void mte_set_node_dead(struct maple_enode *mn) 295 { 296 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 297 smp_wmb(); /* Needed for RCU */ 298 } 299 300 /* Bit 1 indicates the root is a node */ 301 #define MAPLE_ROOT_NODE 0x02 302 /* maple_type stored bit 3-6 */ 303 #define MAPLE_ENODE_TYPE_SHIFT 0x03 304 /* Bit 2 means a NULL somewhere below */ 305 #define MAPLE_ENODE_NULL 0x04 306 307 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 308 enum maple_type type) 309 { 310 return (void *)((unsigned long)node | 311 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 312 } 313 314 static inline void *mte_mk_root(const struct maple_enode *node) 315 { 316 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 317 } 318 319 static inline void *mte_safe_root(const struct maple_enode *node) 320 { 321 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 322 } 323 324 static inline void *mte_set_full(const struct maple_enode *node) 325 { 326 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 327 } 328 329 static inline void *mte_clear_full(const struct maple_enode *node) 330 { 331 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 332 } 333 334 static inline bool mte_has_null(const struct maple_enode *node) 335 { 336 return (unsigned long)node & MAPLE_ENODE_NULL; 337 } 338 339 static inline bool ma_is_root(struct maple_node *node) 340 { 341 return ((unsigned long)node->parent & MA_ROOT_PARENT); 342 } 343 344 static inline bool mte_is_root(const struct maple_enode *node) 345 { 346 return ma_is_root(mte_to_node(node)); 347 } 348 349 static inline bool mas_is_root_limits(const struct ma_state *mas) 350 { 351 return !mas->min && mas->max == ULONG_MAX; 352 } 353 354 static inline bool mt_is_alloc(struct maple_tree *mt) 355 { 356 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 357 } 358 359 /* 360 * The Parent Pointer 361 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 362 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 363 * bit values need an extra bit to store the offset. This extra bit comes from 364 * a reuse of the last bit in the node type. This is possible by using bit 1 to 365 * indicate if bit 2 is part of the type or the slot. 366 * 367 * Note types: 368 * 0x??1 = Root 369 * 0x?00 = 16 bit nodes 370 * 0x010 = 32 bit nodes 371 * 0x110 = 64 bit nodes 372 * 373 * Slot size and alignment 374 * 0b??1 : Root 375 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 376 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 377 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 378 */ 379 380 #define MAPLE_PARENT_ROOT 0x01 381 382 #define MAPLE_PARENT_SLOT_SHIFT 0x03 383 #define MAPLE_PARENT_SLOT_MASK 0xF8 384 385 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 386 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 387 388 #define MAPLE_PARENT_RANGE64 0x06 389 #define MAPLE_PARENT_RANGE32 0x04 390 #define MAPLE_PARENT_NOT_RANGE16 0x02 391 392 /* 393 * mte_parent_shift() - Get the parent shift for the slot storage. 394 * @parent: The parent pointer cast as an unsigned long 395 * Return: The shift into that pointer to the star to of the slot 396 */ 397 static inline unsigned long mte_parent_shift(unsigned long parent) 398 { 399 /* Note bit 1 == 0 means 16B */ 400 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 401 return MAPLE_PARENT_SLOT_SHIFT; 402 403 return MAPLE_PARENT_16B_SLOT_SHIFT; 404 } 405 406 /* 407 * mte_parent_slot_mask() - Get the slot mask for the parent. 408 * @parent: The parent pointer cast as an unsigned long. 409 * Return: The slot mask for that parent. 410 */ 411 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 412 { 413 /* Note bit 1 == 0 means 16B */ 414 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 415 return MAPLE_PARENT_SLOT_MASK; 416 417 return MAPLE_PARENT_16B_SLOT_MASK; 418 } 419 420 /* 421 * mas_parent_enum() - Return the maple_type of the parent from the stored 422 * parent type. 423 * @mas: The maple state 424 * @node: The maple_enode to extract the parent's enum 425 * Return: The node->parent maple_type 426 */ 427 static inline 428 enum maple_type mte_parent_enum(struct maple_enode *p_enode, 429 struct maple_tree *mt) 430 { 431 unsigned long p_type; 432 433 p_type = (unsigned long)p_enode; 434 if (p_type & MAPLE_PARENT_ROOT) 435 return 0; /* Validated in the caller. */ 436 437 p_type &= MAPLE_NODE_MASK; 438 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); 439 440 switch (p_type) { 441 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 442 if (mt_is_alloc(mt)) 443 return maple_arange_64; 444 return maple_range_64; 445 } 446 447 return 0; 448 } 449 450 static inline 451 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) 452 { 453 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); 454 } 455 456 /* 457 * mte_set_parent() - Set the parent node and encode the slot 458 * @enode: The encoded maple node. 459 * @parent: The encoded maple node that is the parent of @enode. 460 * @slot: The slot that @enode resides in @parent. 461 * 462 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 463 * parent type. 464 */ 465 static inline 466 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, 467 unsigned char slot) 468 { 469 unsigned long val = (unsigned long)parent; 470 unsigned long shift; 471 unsigned long type; 472 enum maple_type p_type = mte_node_type(parent); 473 474 BUG_ON(p_type == maple_dense); 475 BUG_ON(p_type == maple_leaf_64); 476 477 switch (p_type) { 478 case maple_range_64: 479 case maple_arange_64: 480 shift = MAPLE_PARENT_SLOT_SHIFT; 481 type = MAPLE_PARENT_RANGE64; 482 break; 483 default: 484 case maple_dense: 485 case maple_leaf_64: 486 shift = type = 0; 487 break; 488 } 489 490 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 491 val |= (slot << shift) | type; 492 mte_to_node(enode)->parent = ma_parent_ptr(val); 493 } 494 495 /* 496 * mte_parent_slot() - get the parent slot of @enode. 497 * @enode: The encoded maple node. 498 * 499 * Return: The slot in the parent node where @enode resides. 500 */ 501 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 502 { 503 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 504 505 if (val & MA_ROOT_PARENT) 506 return 0; 507 508 /* 509 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 510 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 511 */ 512 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 513 } 514 515 /* 516 * mte_parent() - Get the parent of @node. 517 * @node: The encoded maple node. 518 * 519 * Return: The parent maple node. 520 */ 521 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 522 { 523 return (void *)((unsigned long) 524 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 525 } 526 527 /* 528 * ma_dead_node() - check if the @enode is dead. 529 * @enode: The encoded maple node 530 * 531 * Return: true if dead, false otherwise. 532 */ 533 static inline bool ma_dead_node(const struct maple_node *node) 534 { 535 struct maple_node *parent = (void *)((unsigned long) 536 node->parent & ~MAPLE_NODE_MASK); 537 538 return (parent == node); 539 } 540 /* 541 * mte_dead_node() - check if the @enode is dead. 542 * @enode: The encoded maple node 543 * 544 * Return: true if dead, false otherwise. 545 */ 546 static inline bool mte_dead_node(const struct maple_enode *enode) 547 { 548 struct maple_node *parent, *node; 549 550 node = mte_to_node(enode); 551 parent = mte_parent(enode); 552 return (parent == node); 553 } 554 555 /* 556 * mas_allocated() - Get the number of nodes allocated in a maple state. 557 * @mas: The maple state 558 * 559 * The ma_state alloc member is overloaded to hold a pointer to the first 560 * allocated node or to the number of requested nodes to allocate. If bit 0 is 561 * set, then the alloc contains the number of requested nodes. If there is an 562 * allocated node, then the total allocated nodes is in that node. 563 * 564 * Return: The total number of nodes allocated 565 */ 566 static inline unsigned long mas_allocated(const struct ma_state *mas) 567 { 568 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 569 return 0; 570 571 return mas->alloc->total; 572 } 573 574 /* 575 * mas_set_alloc_req() - Set the requested number of allocations. 576 * @mas: the maple state 577 * @count: the number of allocations. 578 * 579 * The requested number of allocations is either in the first allocated node, 580 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 581 * no allocated node. Set the request either in the node or do the necessary 582 * encoding to store in @mas->alloc directly. 583 */ 584 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 585 { 586 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 587 if (!count) 588 mas->alloc = NULL; 589 else 590 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 591 return; 592 } 593 594 mas->alloc->request_count = count; 595 } 596 597 /* 598 * mas_alloc_req() - get the requested number of allocations. 599 * @mas: The maple state 600 * 601 * The alloc count is either stored directly in @mas, or in 602 * @mas->alloc->request_count if there is at least one node allocated. Decode 603 * the request count if it's stored directly in @mas->alloc. 604 * 605 * Return: The allocation request count. 606 */ 607 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 608 { 609 if ((unsigned long)mas->alloc & 0x1) 610 return (unsigned long)(mas->alloc) >> 1; 611 else if (mas->alloc) 612 return mas->alloc->request_count; 613 return 0; 614 } 615 616 /* 617 * ma_pivots() - Get a pointer to the maple node pivots. 618 * @node - the maple node 619 * @type - the node type 620 * 621 * Return: A pointer to the maple node pivots 622 */ 623 static inline unsigned long *ma_pivots(struct maple_node *node, 624 enum maple_type type) 625 { 626 switch (type) { 627 case maple_arange_64: 628 return node->ma64.pivot; 629 case maple_range_64: 630 case maple_leaf_64: 631 return node->mr64.pivot; 632 case maple_dense: 633 return NULL; 634 } 635 return NULL; 636 } 637 638 /* 639 * ma_gaps() - Get a pointer to the maple node gaps. 640 * @node - the maple node 641 * @type - the node type 642 * 643 * Return: A pointer to the maple node gaps 644 */ 645 static inline unsigned long *ma_gaps(struct maple_node *node, 646 enum maple_type type) 647 { 648 switch (type) { 649 case maple_arange_64: 650 return node->ma64.gap; 651 case maple_range_64: 652 case maple_leaf_64: 653 case maple_dense: 654 return NULL; 655 } 656 return NULL; 657 } 658 659 /* 660 * mte_pivot() - Get the pivot at @piv of the maple encoded node. 661 * @mn: The maple encoded node. 662 * @piv: The pivot. 663 * 664 * Return: the pivot at @piv of @mn. 665 */ 666 static inline unsigned long mte_pivot(const struct maple_enode *mn, 667 unsigned char piv) 668 { 669 struct maple_node *node = mte_to_node(mn); 670 enum maple_type type = mte_node_type(mn); 671 672 if (piv >= mt_pivots[type]) { 673 WARN_ON(1); 674 return 0; 675 } 676 switch (type) { 677 case maple_arange_64: 678 return node->ma64.pivot[piv]; 679 case maple_range_64: 680 case maple_leaf_64: 681 return node->mr64.pivot[piv]; 682 case maple_dense: 683 return 0; 684 } 685 return 0; 686 } 687 688 /* 689 * mas_safe_pivot() - get the pivot at @piv or mas->max. 690 * @mas: The maple state 691 * @pivots: The pointer to the maple node pivots 692 * @piv: The pivot to fetch 693 * @type: The maple node type 694 * 695 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 696 * otherwise. 697 */ 698 static inline unsigned long 699 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 700 unsigned char piv, enum maple_type type) 701 { 702 if (piv >= mt_pivots[type]) 703 return mas->max; 704 705 return pivots[piv]; 706 } 707 708 /* 709 * mas_safe_min() - Return the minimum for a given offset. 710 * @mas: The maple state 711 * @pivots: The pointer to the maple node pivots 712 * @offset: The offset into the pivot array 713 * 714 * Return: The minimum range value that is contained in @offset. 715 */ 716 static inline unsigned long 717 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 718 { 719 if (likely(offset)) 720 return pivots[offset - 1] + 1; 721 722 return mas->min; 723 } 724 725 /* 726 * mas_logical_pivot() - Get the logical pivot of a given offset. 727 * @mas: The maple state 728 * @pivots: The pointer to the maple node pivots 729 * @offset: The offset into the pivot array 730 * @type: The maple node type 731 * 732 * When there is no value at a pivot (beyond the end of the data), then the 733 * pivot is actually @mas->max. 734 * 735 * Return: the logical pivot of a given @offset. 736 */ 737 static inline unsigned long 738 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, 739 unsigned char offset, enum maple_type type) 740 { 741 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); 742 743 if (likely(lpiv)) 744 return lpiv; 745 746 if (likely(offset)) 747 return mas->max; 748 749 return lpiv; 750 } 751 752 /* 753 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 754 * @mn: The encoded maple node 755 * @piv: The pivot offset 756 * @val: The value of the pivot 757 */ 758 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 759 unsigned long val) 760 { 761 struct maple_node *node = mte_to_node(mn); 762 enum maple_type type = mte_node_type(mn); 763 764 BUG_ON(piv >= mt_pivots[type]); 765 switch (type) { 766 default: 767 case maple_range_64: 768 case maple_leaf_64: 769 node->mr64.pivot[piv] = val; 770 break; 771 case maple_arange_64: 772 node->ma64.pivot[piv] = val; 773 break; 774 case maple_dense: 775 break; 776 } 777 778 } 779 780 /* 781 * ma_slots() - Get a pointer to the maple node slots. 782 * @mn: The maple node 783 * @mt: The maple node type 784 * 785 * Return: A pointer to the maple node slots 786 */ 787 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 788 { 789 switch (mt) { 790 default: 791 case maple_arange_64: 792 return mn->ma64.slot; 793 case maple_range_64: 794 case maple_leaf_64: 795 return mn->mr64.slot; 796 case maple_dense: 797 return mn->slot; 798 } 799 } 800 801 static inline bool mt_locked(const struct maple_tree *mt) 802 { 803 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 804 lockdep_is_held(&mt->ma_lock); 805 } 806 807 static inline void *mt_slot(const struct maple_tree *mt, 808 void __rcu **slots, unsigned char offset) 809 { 810 return rcu_dereference_check(slots[offset], mt_locked(mt)); 811 } 812 813 /* 814 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 815 * @mas: The maple state 816 * @slots: The pointer to the slots 817 * @offset: The offset into the slots array to fetch 818 * 819 * Return: The entry stored in @slots at the @offset. 820 */ 821 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 822 unsigned char offset) 823 { 824 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree)); 825 } 826 827 /* 828 * mas_slot() - Get the slot value when not holding the maple tree lock. 829 * @mas: The maple state 830 * @slots: The pointer to the slots 831 * @offset: The offset into the slots array to fetch 832 * 833 * Return: The entry stored in @slots at the @offset 834 */ 835 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 836 unsigned char offset) 837 { 838 return mt_slot(mas->tree, slots, offset); 839 } 840 841 /* 842 * mas_root() - Get the maple tree root. 843 * @mas: The maple state. 844 * 845 * Return: The pointer to the root of the tree 846 */ 847 static inline void *mas_root(struct ma_state *mas) 848 { 849 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 850 } 851 852 static inline void *mt_root_locked(struct maple_tree *mt) 853 { 854 return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); 855 } 856 857 /* 858 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 859 * @mas: The maple state. 860 * 861 * Return: The pointer to the root of the tree 862 */ 863 static inline void *mas_root_locked(struct ma_state *mas) 864 { 865 return mt_root_locked(mas->tree); 866 } 867 868 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 869 enum maple_type mt) 870 { 871 switch (mt) { 872 case maple_arange_64: 873 return &mn->ma64.meta; 874 default: 875 return &mn->mr64.meta; 876 } 877 } 878 879 /* 880 * ma_set_meta() - Set the metadata information of a node. 881 * @mn: The maple node 882 * @mt: The maple node type 883 * @offset: The offset of the highest sub-gap in this node. 884 * @end: The end of the data in this node. 885 */ 886 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 887 unsigned char offset, unsigned char end) 888 { 889 struct maple_metadata *meta = ma_meta(mn, mt); 890 891 meta->gap = offset; 892 meta->end = end; 893 } 894 895 /* 896 * ma_meta_end() - Get the data end of a node from the metadata 897 * @mn: The maple node 898 * @mt: The maple node type 899 */ 900 static inline unsigned char ma_meta_end(struct maple_node *mn, 901 enum maple_type mt) 902 { 903 struct maple_metadata *meta = ma_meta(mn, mt); 904 905 return meta->end; 906 } 907 908 /* 909 * ma_meta_gap() - Get the largest gap location of a node from the metadata 910 * @mn: The maple node 911 * @mt: The maple node type 912 */ 913 static inline unsigned char ma_meta_gap(struct maple_node *mn, 914 enum maple_type mt) 915 { 916 BUG_ON(mt != maple_arange_64); 917 918 return mn->ma64.meta.gap; 919 } 920 921 /* 922 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 923 * @mn: The maple node 924 * @mn: The maple node type 925 * @offset: The location of the largest gap. 926 */ 927 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 928 unsigned char offset) 929 { 930 931 struct maple_metadata *meta = ma_meta(mn, mt); 932 933 meta->gap = offset; 934 } 935 936 /* 937 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 938 * @mat - the ma_topiary, a linked list of dead nodes. 939 * @dead_enode - the node to be marked as dead and added to the tail of the list 940 * 941 * Add the @dead_enode to the linked list in @mat. 942 */ 943 static inline void mat_add(struct ma_topiary *mat, 944 struct maple_enode *dead_enode) 945 { 946 mte_set_node_dead(dead_enode); 947 mte_to_mat(dead_enode)->next = NULL; 948 if (!mat->tail) { 949 mat->tail = mat->head = dead_enode; 950 return; 951 } 952 953 mte_to_mat(mat->tail)->next = dead_enode; 954 mat->tail = dead_enode; 955 } 956 957 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 958 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 959 960 /* 961 * mas_mat_free() - Free all nodes in a dead list. 962 * @mas - the maple state 963 * @mat - the ma_topiary linked list of dead nodes to free. 964 * 965 * Free walk a dead list. 966 */ 967 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 968 { 969 struct maple_enode *next; 970 971 while (mat->head) { 972 next = mte_to_mat(mat->head)->next; 973 mas_free(mas, mat->head); 974 mat->head = next; 975 } 976 } 977 978 /* 979 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 980 * @mas - the maple state 981 * @mat - the ma_topiary linked list of dead nodes to free. 982 * 983 * Destroy walk a dead list. 984 */ 985 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 986 { 987 struct maple_enode *next; 988 989 while (mat->head) { 990 next = mte_to_mat(mat->head)->next; 991 mte_destroy_walk(mat->head, mat->mtree); 992 mat->head = next; 993 } 994 } 995 /* 996 * mas_descend() - Descend into the slot stored in the ma_state. 997 * @mas - the maple state. 998 * 999 * Note: Not RCU safe, only use in write side or debug code. 1000 */ 1001 static inline void mas_descend(struct ma_state *mas) 1002 { 1003 enum maple_type type; 1004 unsigned long *pivots; 1005 struct maple_node *node; 1006 void __rcu **slots; 1007 1008 node = mas_mn(mas); 1009 type = mte_node_type(mas->node); 1010 pivots = ma_pivots(node, type); 1011 slots = ma_slots(node, type); 1012 1013 if (mas->offset) 1014 mas->min = pivots[mas->offset - 1] + 1; 1015 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1016 mas->node = mas_slot(mas, slots, mas->offset); 1017 } 1018 1019 /* 1020 * mte_set_gap() - Set a maple node gap. 1021 * @mn: The encoded maple node 1022 * @gap: The offset of the gap to set 1023 * @val: The gap value 1024 */ 1025 static inline void mte_set_gap(const struct maple_enode *mn, 1026 unsigned char gap, unsigned long val) 1027 { 1028 switch (mte_node_type(mn)) { 1029 default: 1030 break; 1031 case maple_arange_64: 1032 mte_to_node(mn)->ma64.gap[gap] = val; 1033 break; 1034 } 1035 } 1036 1037 /* 1038 * mas_ascend() - Walk up a level of the tree. 1039 * @mas: The maple state 1040 * 1041 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1042 * may cause several levels of walking up to find the correct min and max. 1043 * May find a dead node which will cause a premature return. 1044 * Return: 1 on dead node, 0 otherwise 1045 */ 1046 static int mas_ascend(struct ma_state *mas) 1047 { 1048 struct maple_enode *p_enode; /* parent enode. */ 1049 struct maple_enode *a_enode; /* ancestor enode. */ 1050 struct maple_node *a_node; /* ancestor node. */ 1051 struct maple_node *p_node; /* parent node. */ 1052 unsigned char a_slot; 1053 enum maple_type a_type; 1054 unsigned long min, max; 1055 unsigned long *pivots; 1056 unsigned char offset; 1057 bool set_max = false, set_min = false; 1058 1059 a_node = mas_mn(mas); 1060 if (ma_is_root(a_node)) { 1061 mas->offset = 0; 1062 return 0; 1063 } 1064 1065 p_node = mte_parent(mas->node); 1066 if (unlikely(a_node == p_node)) 1067 return 1; 1068 a_type = mas_parent_enum(mas, mas->node); 1069 offset = mte_parent_slot(mas->node); 1070 a_enode = mt_mk_node(p_node, a_type); 1071 1072 /* Check to make sure all parent information is still accurate */ 1073 if (p_node != mte_parent(mas->node)) 1074 return 1; 1075 1076 mas->node = a_enode; 1077 mas->offset = offset; 1078 1079 if (mte_is_root(a_enode)) { 1080 mas->max = ULONG_MAX; 1081 mas->min = 0; 1082 return 0; 1083 } 1084 1085 min = 0; 1086 max = ULONG_MAX; 1087 do { 1088 p_enode = a_enode; 1089 a_type = mas_parent_enum(mas, p_enode); 1090 a_node = mte_parent(p_enode); 1091 a_slot = mte_parent_slot(p_enode); 1092 pivots = ma_pivots(a_node, a_type); 1093 a_enode = mt_mk_node(a_node, a_type); 1094 1095 if (!set_min && a_slot) { 1096 set_min = true; 1097 min = pivots[a_slot - 1] + 1; 1098 } 1099 1100 if (!set_max && a_slot < mt_pivots[a_type]) { 1101 set_max = true; 1102 max = pivots[a_slot]; 1103 } 1104 1105 if (unlikely(ma_dead_node(a_node))) 1106 return 1; 1107 1108 if (unlikely(ma_is_root(a_node))) 1109 break; 1110 1111 } while (!set_min || !set_max); 1112 1113 mas->max = max; 1114 mas->min = min; 1115 return 0; 1116 } 1117 1118 /* 1119 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1120 * @mas: The maple state 1121 * 1122 * Return: A pointer to a maple node. 1123 */ 1124 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1125 { 1126 struct maple_alloc *ret, *node = mas->alloc; 1127 unsigned long total = mas_allocated(mas); 1128 unsigned int req = mas_alloc_req(mas); 1129 1130 /* nothing or a request pending. */ 1131 if (WARN_ON(!total)) 1132 return NULL; 1133 1134 if (total == 1) { 1135 /* single allocation in this ma_state */ 1136 mas->alloc = NULL; 1137 ret = node; 1138 goto single_node; 1139 } 1140 1141 if (node->node_count == 1) { 1142 /* Single allocation in this node. */ 1143 mas->alloc = node->slot[0]; 1144 mas->alloc->total = node->total - 1; 1145 ret = node; 1146 goto new_head; 1147 } 1148 node->total--; 1149 ret = node->slot[--node->node_count]; 1150 node->slot[node->node_count] = NULL; 1151 1152 single_node: 1153 new_head: 1154 if (req) { 1155 req++; 1156 mas_set_alloc_req(mas, req); 1157 } 1158 1159 memset(ret, 0, sizeof(*ret)); 1160 return (struct maple_node *)ret; 1161 } 1162 1163 /* 1164 * mas_push_node() - Push a node back on the maple state allocation. 1165 * @mas: The maple state 1166 * @used: The used maple node 1167 * 1168 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1169 * requested node count as necessary. 1170 */ 1171 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1172 { 1173 struct maple_alloc *reuse = (struct maple_alloc *)used; 1174 struct maple_alloc *head = mas->alloc; 1175 unsigned long count; 1176 unsigned int requested = mas_alloc_req(mas); 1177 1178 count = mas_allocated(mas); 1179 1180 reuse->request_count = 0; 1181 reuse->node_count = 0; 1182 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { 1183 head->slot[head->node_count++] = reuse; 1184 head->total++; 1185 goto done; 1186 } 1187 1188 reuse->total = 1; 1189 if ((head) && !((unsigned long)head & 0x1)) { 1190 reuse->slot[0] = head; 1191 reuse->node_count = 1; 1192 reuse->total += head->total; 1193 } 1194 1195 mas->alloc = reuse; 1196 done: 1197 if (requested > 1) 1198 mas_set_alloc_req(mas, requested - 1); 1199 } 1200 1201 /* 1202 * mas_alloc_nodes() - Allocate nodes into a maple state 1203 * @mas: The maple state 1204 * @gfp: The GFP Flags 1205 */ 1206 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1207 { 1208 struct maple_alloc *node; 1209 unsigned long allocated = mas_allocated(mas); 1210 unsigned int requested = mas_alloc_req(mas); 1211 unsigned int count; 1212 void **slots = NULL; 1213 unsigned int max_req = 0; 1214 1215 if (!requested) 1216 return; 1217 1218 mas_set_alloc_req(mas, 0); 1219 if (mas->mas_flags & MA_STATE_PREALLOC) { 1220 if (allocated) 1221 return; 1222 WARN_ON(!allocated); 1223 } 1224 1225 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1226 node = (struct maple_alloc *)mt_alloc_one(gfp); 1227 if (!node) 1228 goto nomem_one; 1229 1230 if (allocated) { 1231 node->slot[0] = mas->alloc; 1232 node->node_count = 1; 1233 } else { 1234 node->node_count = 0; 1235 } 1236 1237 mas->alloc = node; 1238 node->total = ++allocated; 1239 requested--; 1240 } 1241 1242 node = mas->alloc; 1243 node->request_count = 0; 1244 while (requested) { 1245 max_req = MAPLE_ALLOC_SLOTS; 1246 if (node->node_count) { 1247 unsigned int offset = node->node_count; 1248 1249 slots = (void **)&node->slot[offset]; 1250 max_req -= offset; 1251 } else { 1252 slots = (void **)&node->slot; 1253 } 1254 1255 max_req = min(requested, max_req); 1256 count = mt_alloc_bulk(gfp, max_req, slots); 1257 if (!count) 1258 goto nomem_bulk; 1259 1260 node->node_count += count; 1261 allocated += count; 1262 node = node->slot[0]; 1263 node->node_count = 0; 1264 node->request_count = 0; 1265 requested -= count; 1266 } 1267 mas->alloc->total = allocated; 1268 return; 1269 1270 nomem_bulk: 1271 /* Clean up potential freed allocations on bulk failure */ 1272 memset(slots, 0, max_req * sizeof(unsigned long)); 1273 nomem_one: 1274 mas_set_alloc_req(mas, requested); 1275 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1276 mas->alloc->total = allocated; 1277 mas_set_err(mas, -ENOMEM); 1278 } 1279 1280 /* 1281 * mas_free() - Free an encoded maple node 1282 * @mas: The maple state 1283 * @used: The encoded maple node to free. 1284 * 1285 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1286 * otherwise. 1287 */ 1288 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1289 { 1290 struct maple_node *tmp = mte_to_node(used); 1291 1292 if (mt_in_rcu(mas->tree)) 1293 ma_free_rcu(tmp); 1294 else 1295 mas_push_node(mas, tmp); 1296 } 1297 1298 /* 1299 * mas_node_count() - Check if enough nodes are allocated and request more if 1300 * there is not enough nodes. 1301 * @mas: The maple state 1302 * @count: The number of nodes needed 1303 * @gfp: the gfp flags 1304 */ 1305 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1306 { 1307 unsigned long allocated = mas_allocated(mas); 1308 1309 if (allocated < count) { 1310 mas_set_alloc_req(mas, count - allocated); 1311 mas_alloc_nodes(mas, gfp); 1312 } 1313 } 1314 1315 /* 1316 * mas_node_count() - Check if enough nodes are allocated and request more if 1317 * there is not enough nodes. 1318 * @mas: The maple state 1319 * @count: The number of nodes needed 1320 * 1321 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1322 */ 1323 static void mas_node_count(struct ma_state *mas, int count) 1324 { 1325 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1326 } 1327 1328 /* 1329 * mas_start() - Sets up maple state for operations. 1330 * @mas: The maple state. 1331 * 1332 * If mas->node == MAS_START, then set the min, max and depth to 1333 * defaults. 1334 * 1335 * Return: 1336 * - If mas->node is an error or not MAS_START, return NULL. 1337 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1338 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1339 * - If it's a tree: NULL & mas->node == safe root node. 1340 */ 1341 static inline struct maple_enode *mas_start(struct ma_state *mas) 1342 { 1343 if (likely(mas_is_start(mas))) { 1344 struct maple_enode *root; 1345 1346 mas->min = 0; 1347 mas->max = ULONG_MAX; 1348 mas->depth = 0; 1349 1350 root = mas_root(mas); 1351 /* Tree with nodes */ 1352 if (likely(xa_is_node(root))) { 1353 mas->depth = 1; 1354 mas->node = mte_safe_root(root); 1355 mas->offset = 0; 1356 return NULL; 1357 } 1358 1359 /* empty tree */ 1360 if (unlikely(!root)) { 1361 mas->node = MAS_NONE; 1362 mas->offset = MAPLE_NODE_SLOTS; 1363 return NULL; 1364 } 1365 1366 /* Single entry tree */ 1367 mas->node = MAS_ROOT; 1368 mas->offset = MAPLE_NODE_SLOTS; 1369 1370 /* Single entry tree. */ 1371 if (mas->index > 0) 1372 return NULL; 1373 1374 return root; 1375 } 1376 1377 return NULL; 1378 } 1379 1380 /* 1381 * ma_data_end() - Find the end of the data in a node. 1382 * @node: The maple node 1383 * @type: The maple node type 1384 * @pivots: The array of pivots in the node 1385 * @max: The maximum value in the node 1386 * 1387 * Uses metadata to find the end of the data when possible. 1388 * Return: The zero indexed last slot with data (may be null). 1389 */ 1390 static inline unsigned char ma_data_end(struct maple_node *node, 1391 enum maple_type type, 1392 unsigned long *pivots, 1393 unsigned long max) 1394 { 1395 unsigned char offset; 1396 1397 if (type == maple_arange_64) 1398 return ma_meta_end(node, type); 1399 1400 offset = mt_pivots[type] - 1; 1401 if (likely(!pivots[offset])) 1402 return ma_meta_end(node, type); 1403 1404 if (likely(pivots[offset] == max)) 1405 return offset; 1406 1407 return mt_pivots[type]; 1408 } 1409 1410 /* 1411 * mas_data_end() - Find the end of the data (slot). 1412 * @mas: the maple state 1413 * 1414 * This method is optimized to check the metadata of a node if the node type 1415 * supports data end metadata. 1416 * 1417 * Return: The zero indexed last slot with data (may be null). 1418 */ 1419 static inline unsigned char mas_data_end(struct ma_state *mas) 1420 { 1421 enum maple_type type; 1422 struct maple_node *node; 1423 unsigned char offset; 1424 unsigned long *pivots; 1425 1426 type = mte_node_type(mas->node); 1427 node = mas_mn(mas); 1428 if (type == maple_arange_64) 1429 return ma_meta_end(node, type); 1430 1431 pivots = ma_pivots(node, type); 1432 offset = mt_pivots[type] - 1; 1433 if (likely(!pivots[offset])) 1434 return ma_meta_end(node, type); 1435 1436 if (likely(pivots[offset] == mas->max)) 1437 return offset; 1438 1439 return mt_pivots[type]; 1440 } 1441 1442 /* 1443 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1444 * @mas - the maple state 1445 * 1446 * Return: The maximum gap in the leaf. 1447 */ 1448 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1449 { 1450 enum maple_type mt; 1451 unsigned long pstart, gap, max_gap; 1452 struct maple_node *mn; 1453 unsigned long *pivots; 1454 void __rcu **slots; 1455 unsigned char i; 1456 unsigned char max_piv; 1457 1458 mt = mte_node_type(mas->node); 1459 mn = mas_mn(mas); 1460 slots = ma_slots(mn, mt); 1461 max_gap = 0; 1462 if (unlikely(ma_is_dense(mt))) { 1463 gap = 0; 1464 for (i = 0; i < mt_slots[mt]; i++) { 1465 if (slots[i]) { 1466 if (gap > max_gap) 1467 max_gap = gap; 1468 gap = 0; 1469 } else { 1470 gap++; 1471 } 1472 } 1473 if (gap > max_gap) 1474 max_gap = gap; 1475 return max_gap; 1476 } 1477 1478 /* 1479 * Check the first implied pivot optimizes the loop below and slot 1 may 1480 * be skipped if there is a gap in slot 0. 1481 */ 1482 pivots = ma_pivots(mn, mt); 1483 if (likely(!slots[0])) { 1484 max_gap = pivots[0] - mas->min + 1; 1485 i = 2; 1486 } else { 1487 i = 1; 1488 } 1489 1490 /* reduce max_piv as the special case is checked before the loop */ 1491 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1492 /* 1493 * Check end implied pivot which can only be a gap on the right most 1494 * node. 1495 */ 1496 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1497 gap = ULONG_MAX - pivots[max_piv]; 1498 if (gap > max_gap) 1499 max_gap = gap; 1500 } 1501 1502 for (; i <= max_piv; i++) { 1503 /* data == no gap. */ 1504 if (likely(slots[i])) 1505 continue; 1506 1507 pstart = pivots[i - 1]; 1508 gap = pivots[i] - pstart; 1509 if (gap > max_gap) 1510 max_gap = gap; 1511 1512 /* There cannot be two gaps in a row. */ 1513 i++; 1514 } 1515 return max_gap; 1516 } 1517 1518 /* 1519 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1520 * @node: The maple node 1521 * @gaps: The pointer to the gaps 1522 * @mt: The maple node type 1523 * @*off: Pointer to store the offset location of the gap. 1524 * 1525 * Uses the metadata data end to scan backwards across set gaps. 1526 * 1527 * Return: The maximum gap value 1528 */ 1529 static inline unsigned long 1530 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1531 unsigned char *off) 1532 { 1533 unsigned char offset, i; 1534 unsigned long max_gap = 0; 1535 1536 i = offset = ma_meta_end(node, mt); 1537 do { 1538 if (gaps[i] > max_gap) { 1539 max_gap = gaps[i]; 1540 offset = i; 1541 } 1542 } while (i--); 1543 1544 *off = offset; 1545 return max_gap; 1546 } 1547 1548 /* 1549 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1550 * @mas: The maple state. 1551 * 1552 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. 1553 * 1554 * Return: The gap value. 1555 */ 1556 static inline unsigned long mas_max_gap(struct ma_state *mas) 1557 { 1558 unsigned long *gaps; 1559 unsigned char offset; 1560 enum maple_type mt; 1561 struct maple_node *node; 1562 1563 mt = mte_node_type(mas->node); 1564 if (ma_is_leaf(mt)) 1565 return mas_leaf_max_gap(mas); 1566 1567 node = mas_mn(mas); 1568 offset = ma_meta_gap(node, mt); 1569 if (offset == MAPLE_ARANGE64_META_MAX) 1570 return 0; 1571 1572 gaps = ma_gaps(node, mt); 1573 return gaps[offset]; 1574 } 1575 1576 /* 1577 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1578 * @mas: The maple state 1579 * @offset: The gap offset in the parent to set 1580 * @new: The new gap value. 1581 * 1582 * Set the parent gap then continue to set the gap upwards, using the metadata 1583 * of the parent to see if it is necessary to check the node above. 1584 */ 1585 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1586 unsigned long new) 1587 { 1588 unsigned long meta_gap = 0; 1589 struct maple_node *pnode; 1590 struct maple_enode *penode; 1591 unsigned long *pgaps; 1592 unsigned char meta_offset; 1593 enum maple_type pmt; 1594 1595 pnode = mte_parent(mas->node); 1596 pmt = mas_parent_enum(mas, mas->node); 1597 penode = mt_mk_node(pnode, pmt); 1598 pgaps = ma_gaps(pnode, pmt); 1599 1600 ascend: 1601 meta_offset = ma_meta_gap(pnode, pmt); 1602 if (meta_offset == MAPLE_ARANGE64_META_MAX) 1603 meta_gap = 0; 1604 else 1605 meta_gap = pgaps[meta_offset]; 1606 1607 pgaps[offset] = new; 1608 1609 if (meta_gap == new) 1610 return; 1611 1612 if (offset != meta_offset) { 1613 if (meta_gap > new) 1614 return; 1615 1616 ma_set_meta_gap(pnode, pmt, offset); 1617 } else if (new < meta_gap) { 1618 meta_offset = 15; 1619 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1620 ma_set_meta_gap(pnode, pmt, meta_offset); 1621 } 1622 1623 if (ma_is_root(pnode)) 1624 return; 1625 1626 /* Go to the parent node. */ 1627 pnode = mte_parent(penode); 1628 pmt = mas_parent_enum(mas, penode); 1629 pgaps = ma_gaps(pnode, pmt); 1630 offset = mte_parent_slot(penode); 1631 penode = mt_mk_node(pnode, pmt); 1632 goto ascend; 1633 } 1634 1635 /* 1636 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1637 * @mas - the maple state. 1638 */ 1639 static inline void mas_update_gap(struct ma_state *mas) 1640 { 1641 unsigned char pslot; 1642 unsigned long p_gap; 1643 unsigned long max_gap; 1644 1645 if (!mt_is_alloc(mas->tree)) 1646 return; 1647 1648 if (mte_is_root(mas->node)) 1649 return; 1650 1651 max_gap = mas_max_gap(mas); 1652 1653 pslot = mte_parent_slot(mas->node); 1654 p_gap = ma_gaps(mte_parent(mas->node), 1655 mas_parent_enum(mas, mas->node))[pslot]; 1656 1657 if (p_gap != max_gap) 1658 mas_parent_gap(mas, pslot, max_gap); 1659 } 1660 1661 /* 1662 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1663 * @parent with the slot encoded. 1664 * @mas - the maple state (for the tree) 1665 * @parent - the maple encoded node containing the children. 1666 */ 1667 static inline void mas_adopt_children(struct ma_state *mas, 1668 struct maple_enode *parent) 1669 { 1670 enum maple_type type = mte_node_type(parent); 1671 struct maple_node *node = mas_mn(mas); 1672 void __rcu **slots = ma_slots(node, type); 1673 unsigned long *pivots = ma_pivots(node, type); 1674 struct maple_enode *child; 1675 unsigned char offset; 1676 1677 offset = ma_data_end(node, type, pivots, mas->max); 1678 do { 1679 child = mas_slot_locked(mas, slots, offset); 1680 mte_set_parent(child, parent, offset); 1681 } while (offset--); 1682 } 1683 1684 /* 1685 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1686 * parent encoding to locate the maple node in the tree. 1687 * @mas - the ma_state to use for operations. 1688 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1689 * leave the node (true) and handle the adoption and free elsewhere. 1690 */ 1691 static inline void mas_replace(struct ma_state *mas, bool advanced) 1692 __must_hold(mas->tree->lock) 1693 { 1694 struct maple_node *mn = mas_mn(mas); 1695 struct maple_enode *old_enode; 1696 unsigned char offset = 0; 1697 void __rcu **slots = NULL; 1698 1699 if (ma_is_root(mn)) { 1700 old_enode = mas_root_locked(mas); 1701 } else { 1702 offset = mte_parent_slot(mas->node); 1703 slots = ma_slots(mte_parent(mas->node), 1704 mas_parent_enum(mas, mas->node)); 1705 old_enode = mas_slot_locked(mas, slots, offset); 1706 } 1707 1708 if (!advanced && !mte_is_leaf(mas->node)) 1709 mas_adopt_children(mas, mas->node); 1710 1711 if (mte_is_root(mas->node)) { 1712 mn->parent = ma_parent_ptr( 1713 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1714 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1715 mas_set_height(mas); 1716 } else { 1717 rcu_assign_pointer(slots[offset], mas->node); 1718 } 1719 1720 if (!advanced) 1721 mas_free(mas, old_enode); 1722 } 1723 1724 /* 1725 * mas_new_child() - Find the new child of a node. 1726 * @mas: the maple state 1727 * @child: the maple state to store the child. 1728 */ 1729 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1730 __must_hold(mas->tree->lock) 1731 { 1732 enum maple_type mt; 1733 unsigned char offset; 1734 unsigned char end; 1735 unsigned long *pivots; 1736 struct maple_enode *entry; 1737 struct maple_node *node; 1738 void __rcu **slots; 1739 1740 mt = mte_node_type(mas->node); 1741 node = mas_mn(mas); 1742 slots = ma_slots(node, mt); 1743 pivots = ma_pivots(node, mt); 1744 end = ma_data_end(node, mt, pivots, mas->max); 1745 for (offset = mas->offset; offset <= end; offset++) { 1746 entry = mas_slot_locked(mas, slots, offset); 1747 if (mte_parent(entry) == node) { 1748 *child = *mas; 1749 mas->offset = offset + 1; 1750 child->offset = offset; 1751 mas_descend(child); 1752 child->offset = 0; 1753 return true; 1754 } 1755 } 1756 return false; 1757 } 1758 1759 /* 1760 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1761 * old data or set b_node->b_end. 1762 * @b_node: the maple_big_node 1763 * @shift: the shift count 1764 */ 1765 static inline void mab_shift_right(struct maple_big_node *b_node, 1766 unsigned char shift) 1767 { 1768 unsigned long size = b_node->b_end * sizeof(unsigned long); 1769 1770 memmove(b_node->pivot + shift, b_node->pivot, size); 1771 memmove(b_node->slot + shift, b_node->slot, size); 1772 if (b_node->type == maple_arange_64) 1773 memmove(b_node->gap + shift, b_node->gap, size); 1774 } 1775 1776 /* 1777 * mab_middle_node() - Check if a middle node is needed (unlikely) 1778 * @b_node: the maple_big_node that contains the data. 1779 * @size: the amount of data in the b_node 1780 * @split: the potential split location 1781 * @slot_count: the size that can be stored in a single node being considered. 1782 * 1783 * Return: true if a middle node is required. 1784 */ 1785 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1786 unsigned char slot_count) 1787 { 1788 unsigned char size = b_node->b_end; 1789 1790 if (size >= 2 * slot_count) 1791 return true; 1792 1793 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1794 return true; 1795 1796 return false; 1797 } 1798 1799 /* 1800 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1801 * @b_node: the maple_big_node with the data 1802 * @split: the suggested split location 1803 * @slot_count: the number of slots in the node being considered. 1804 * 1805 * Return: the split location. 1806 */ 1807 static inline int mab_no_null_split(struct maple_big_node *b_node, 1808 unsigned char split, unsigned char slot_count) 1809 { 1810 if (!b_node->slot[split]) { 1811 /* 1812 * If the split is less than the max slot && the right side will 1813 * still be sufficient, then increment the split on NULL. 1814 */ 1815 if ((split < slot_count - 1) && 1816 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1817 split++; 1818 else 1819 split--; 1820 } 1821 return split; 1822 } 1823 1824 /* 1825 * mab_calc_split() - Calculate the split location and if there needs to be two 1826 * splits. 1827 * @bn: The maple_big_node with the data 1828 * @mid_split: The second split, if required. 0 otherwise. 1829 * 1830 * Return: The first split location. The middle split is set in @mid_split. 1831 */ 1832 static inline int mab_calc_split(struct ma_state *mas, 1833 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1834 { 1835 unsigned char b_end = bn->b_end; 1836 int split = b_end / 2; /* Assume equal split. */ 1837 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1838 1839 /* 1840 * To support gap tracking, all NULL entries are kept together and a node cannot 1841 * end on a NULL entry, with the exception of the left-most leaf. The 1842 * limitation means that the split of a node must be checked for this condition 1843 * and be able to put more data in one direction or the other. 1844 */ 1845 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1846 *mid_split = 0; 1847 split = b_end - mt_min_slots[bn->type]; 1848 1849 if (!ma_is_leaf(bn->type)) 1850 return split; 1851 1852 mas->mas_flags |= MA_STATE_REBALANCE; 1853 if (!bn->slot[split]) 1854 split--; 1855 return split; 1856 } 1857 1858 /* 1859 * Although extremely rare, it is possible to enter what is known as the 3-way 1860 * split scenario. The 3-way split comes about by means of a store of a range 1861 * that overwrites the end and beginning of two full nodes. The result is a set 1862 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1863 * also be located in different parent nodes which are also full. This can 1864 * carry upwards all the way to the root in the worst case. 1865 */ 1866 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1867 split = b_end / 3; 1868 *mid_split = split * 2; 1869 } else { 1870 slot_min = mt_min_slots[bn->type]; 1871 1872 *mid_split = 0; 1873 /* 1874 * Avoid having a range less than the slot count unless it 1875 * causes one node to be deficient. 1876 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1877 */ 1878 while (((bn->pivot[split] - min) < slot_count - 1) && 1879 (split < slot_count - 1) && (b_end - split > slot_min)) 1880 split++; 1881 } 1882 1883 /* Avoid ending a node on a NULL entry */ 1884 split = mab_no_null_split(bn, split, slot_count); 1885 1886 if (unlikely(*mid_split)) 1887 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1888 1889 return split; 1890 } 1891 1892 /* 1893 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1894 * and set @b_node->b_end to the next free slot. 1895 * @mas: The maple state 1896 * @mas_start: The starting slot to copy 1897 * @mas_end: The end slot to copy (inclusively) 1898 * @b_node: The maple_big_node to place the data 1899 * @mab_start: The starting location in maple_big_node to store the data. 1900 */ 1901 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1902 unsigned char mas_end, struct maple_big_node *b_node, 1903 unsigned char mab_start) 1904 { 1905 enum maple_type mt; 1906 struct maple_node *node; 1907 void __rcu **slots; 1908 unsigned long *pivots, *gaps; 1909 int i = mas_start, j = mab_start; 1910 unsigned char piv_end; 1911 1912 node = mas_mn(mas); 1913 mt = mte_node_type(mas->node); 1914 pivots = ma_pivots(node, mt); 1915 if (!i) { 1916 b_node->pivot[j] = pivots[i++]; 1917 if (unlikely(i > mas_end)) 1918 goto complete; 1919 j++; 1920 } 1921 1922 piv_end = min(mas_end, mt_pivots[mt]); 1923 for (; i < piv_end; i++, j++) { 1924 b_node->pivot[j] = pivots[i]; 1925 if (unlikely(!b_node->pivot[j])) 1926 break; 1927 1928 if (unlikely(mas->max == b_node->pivot[j])) 1929 goto complete; 1930 } 1931 1932 if (likely(i <= mas_end)) 1933 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1934 1935 complete: 1936 b_node->b_end = ++j; 1937 j -= mab_start; 1938 slots = ma_slots(node, mt); 1939 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1940 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1941 gaps = ma_gaps(node, mt); 1942 memcpy(b_node->gap + mab_start, gaps + mas_start, 1943 sizeof(unsigned long) * j); 1944 } 1945 } 1946 1947 /* 1948 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1949 * @mas: The maple state 1950 * @node: The maple node 1951 * @pivots: pointer to the maple node pivots 1952 * @mt: The maple type 1953 * @end: The assumed end 1954 * 1955 * Note, end may be incremented within this function but not modified at the 1956 * source. This is fine since the metadata is the last thing to be stored in a 1957 * node during a write. 1958 */ 1959 static inline void mas_leaf_set_meta(struct ma_state *mas, 1960 struct maple_node *node, unsigned long *pivots, 1961 enum maple_type mt, unsigned char end) 1962 { 1963 /* There is no room for metadata already */ 1964 if (mt_pivots[mt] <= end) 1965 return; 1966 1967 if (pivots[end] && pivots[end] < mas->max) 1968 end++; 1969 1970 if (end < mt_slots[mt] - 1) 1971 ma_set_meta(node, mt, 0, end); 1972 } 1973 1974 /* 1975 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 1976 * @b_node: the maple_big_node that has the data 1977 * @mab_start: the start location in @b_node. 1978 * @mab_end: The end location in @b_node (inclusively) 1979 * @mas: The maple state with the maple encoded node. 1980 */ 1981 static inline void mab_mas_cp(struct maple_big_node *b_node, 1982 unsigned char mab_start, unsigned char mab_end, 1983 struct ma_state *mas, bool new_max) 1984 { 1985 int i, j = 0; 1986 enum maple_type mt = mte_node_type(mas->node); 1987 struct maple_node *node = mte_to_node(mas->node); 1988 void __rcu **slots = ma_slots(node, mt); 1989 unsigned long *pivots = ma_pivots(node, mt); 1990 unsigned long *gaps = NULL; 1991 unsigned char end; 1992 1993 if (mab_end - mab_start > mt_pivots[mt]) 1994 mab_end--; 1995 1996 if (!pivots[mt_pivots[mt] - 1]) 1997 slots[mt_pivots[mt]] = NULL; 1998 1999 i = mab_start; 2000 do { 2001 pivots[j++] = b_node->pivot[i++]; 2002 } while (i <= mab_end && likely(b_node->pivot[i])); 2003 2004 memcpy(slots, b_node->slot + mab_start, 2005 sizeof(void *) * (i - mab_start)); 2006 2007 if (new_max) 2008 mas->max = b_node->pivot[i - 1]; 2009 2010 end = j - 1; 2011 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2012 unsigned long max_gap = 0; 2013 unsigned char offset = 15; 2014 2015 gaps = ma_gaps(node, mt); 2016 do { 2017 gaps[--j] = b_node->gap[--i]; 2018 if (gaps[j] > max_gap) { 2019 offset = j; 2020 max_gap = gaps[j]; 2021 } 2022 } while (j); 2023 2024 ma_set_meta(node, mt, offset, end); 2025 } else { 2026 mas_leaf_set_meta(mas, node, pivots, mt, end); 2027 } 2028 } 2029 2030 /* 2031 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2032 * @mas: the maple state with the maple encoded node of the sub-tree. 2033 * 2034 * Descend through a sub-tree and adopt children who do not have the correct 2035 * parents set. Follow the parents which have the correct parents as they are 2036 * the new entries which need to be followed to find other incorrectly set 2037 * parents. 2038 */ 2039 static inline void mas_descend_adopt(struct ma_state *mas) 2040 { 2041 struct ma_state list[3], next[3]; 2042 int i, n; 2043 2044 /* 2045 * At each level there may be up to 3 correct parent pointers which indicates 2046 * the new nodes which need to be walked to find any new nodes at a lower level. 2047 */ 2048 2049 for (i = 0; i < 3; i++) { 2050 list[i] = *mas; 2051 list[i].offset = 0; 2052 next[i].offset = 0; 2053 } 2054 next[0] = *mas; 2055 2056 while (!mte_is_leaf(list[0].node)) { 2057 n = 0; 2058 for (i = 0; i < 3; i++) { 2059 if (mas_is_none(&list[i])) 2060 continue; 2061 2062 if (i && list[i-1].node == list[i].node) 2063 continue; 2064 2065 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2066 n++; 2067 2068 mas_adopt_children(&list[i], list[i].node); 2069 } 2070 2071 while (n < 3) 2072 next[n++].node = MAS_NONE; 2073 2074 /* descend by setting the list to the children */ 2075 for (i = 0; i < 3; i++) 2076 list[i] = next[i]; 2077 } 2078 } 2079 2080 /* 2081 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2082 * @mas: The maple state 2083 * @end: The maple node end 2084 * @mt: The maple node type 2085 */ 2086 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2087 enum maple_type mt) 2088 { 2089 if (!(mas->mas_flags & MA_STATE_BULK)) 2090 return; 2091 2092 if (mte_is_root(mas->node)) 2093 return; 2094 2095 if (end > mt_min_slots[mt]) { 2096 mas->mas_flags &= ~MA_STATE_REBALANCE; 2097 return; 2098 } 2099 } 2100 2101 /* 2102 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2103 * data from a maple encoded node. 2104 * @wr_mas: the maple write state 2105 * @b_node: the maple_big_node to fill with data 2106 * @offset_end: the offset to end copying 2107 * 2108 * Return: The actual end of the data stored in @b_node 2109 */ 2110 static inline void mas_store_b_node(struct ma_wr_state *wr_mas, 2111 struct maple_big_node *b_node, unsigned char offset_end) 2112 { 2113 unsigned char slot; 2114 unsigned char b_end; 2115 /* Possible underflow of piv will wrap back to 0 before use. */ 2116 unsigned long piv; 2117 struct ma_state *mas = wr_mas->mas; 2118 2119 b_node->type = wr_mas->type; 2120 b_end = 0; 2121 slot = mas->offset; 2122 if (slot) { 2123 /* Copy start data up to insert. */ 2124 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2125 b_end = b_node->b_end; 2126 piv = b_node->pivot[b_end - 1]; 2127 } else 2128 piv = mas->min - 1; 2129 2130 if (piv + 1 < mas->index) { 2131 /* Handle range starting after old range */ 2132 b_node->slot[b_end] = wr_mas->content; 2133 if (!wr_mas->content) 2134 b_node->gap[b_end] = mas->index - 1 - piv; 2135 b_node->pivot[b_end++] = mas->index - 1; 2136 } 2137 2138 /* Store the new entry. */ 2139 mas->offset = b_end; 2140 b_node->slot[b_end] = wr_mas->entry; 2141 b_node->pivot[b_end] = mas->last; 2142 2143 /* Appended. */ 2144 if (mas->last >= mas->max) 2145 goto b_end; 2146 2147 /* Handle new range ending before old range ends */ 2148 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2149 if (piv > mas->last) { 2150 if (piv == ULONG_MAX) 2151 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2152 2153 if (offset_end != slot) 2154 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2155 offset_end); 2156 2157 b_node->slot[++b_end] = wr_mas->content; 2158 if (!wr_mas->content) 2159 b_node->gap[b_end] = piv - mas->last + 1; 2160 b_node->pivot[b_end] = piv; 2161 } 2162 2163 slot = offset_end + 1; 2164 if (slot > wr_mas->node_end) 2165 goto b_end; 2166 2167 /* Copy end data to the end of the node. */ 2168 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2169 b_node->b_end--; 2170 return; 2171 2172 b_end: 2173 b_node->b_end = b_end; 2174 } 2175 2176 /* 2177 * mas_prev_sibling() - Find the previous node with the same parent. 2178 * @mas: the maple state 2179 * 2180 * Return: True if there is a previous sibling, false otherwise. 2181 */ 2182 static inline bool mas_prev_sibling(struct ma_state *mas) 2183 { 2184 unsigned int p_slot = mte_parent_slot(mas->node); 2185 2186 if (mte_is_root(mas->node)) 2187 return false; 2188 2189 if (!p_slot) 2190 return false; 2191 2192 mas_ascend(mas); 2193 mas->offset = p_slot - 1; 2194 mas_descend(mas); 2195 return true; 2196 } 2197 2198 /* 2199 * mas_next_sibling() - Find the next node with the same parent. 2200 * @mas: the maple state 2201 * 2202 * Return: true if there is a next sibling, false otherwise. 2203 */ 2204 static inline bool mas_next_sibling(struct ma_state *mas) 2205 { 2206 MA_STATE(parent, mas->tree, mas->index, mas->last); 2207 2208 if (mte_is_root(mas->node)) 2209 return false; 2210 2211 parent = *mas; 2212 mas_ascend(&parent); 2213 parent.offset = mte_parent_slot(mas->node) + 1; 2214 if (parent.offset > mas_data_end(&parent)) 2215 return false; 2216 2217 *mas = parent; 2218 mas_descend(mas); 2219 return true; 2220 } 2221 2222 /* 2223 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2224 * @enode: The encoded maple node. 2225 * 2226 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2227 * 2228 * Return: @enode or MAS_NONE 2229 */ 2230 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2231 { 2232 if (enode) 2233 return enode; 2234 2235 return ma_enode_ptr(MAS_NONE); 2236 } 2237 2238 /* 2239 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2240 * @wr_mas: The maple write state 2241 * 2242 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2243 */ 2244 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2245 { 2246 struct ma_state *mas = wr_mas->mas; 2247 unsigned char count; 2248 unsigned char offset; 2249 unsigned long index, min, max; 2250 2251 if (unlikely(ma_is_dense(wr_mas->type))) { 2252 wr_mas->r_max = wr_mas->r_min = mas->index; 2253 mas->offset = mas->index = mas->min; 2254 return; 2255 } 2256 2257 wr_mas->node = mas_mn(wr_mas->mas); 2258 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2259 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2260 wr_mas->pivots, mas->max); 2261 offset = mas->offset; 2262 min = mas_safe_min(mas, wr_mas->pivots, offset); 2263 if (unlikely(offset == count)) 2264 goto max; 2265 2266 max = wr_mas->pivots[offset]; 2267 index = mas->index; 2268 if (unlikely(index <= max)) 2269 goto done; 2270 2271 if (unlikely(!max && offset)) 2272 goto max; 2273 2274 min = max + 1; 2275 while (++offset < count) { 2276 max = wr_mas->pivots[offset]; 2277 if (index <= max) 2278 goto done; 2279 else if (unlikely(!max)) 2280 break; 2281 2282 min = max + 1; 2283 } 2284 2285 max: 2286 max = mas->max; 2287 done: 2288 wr_mas->r_max = max; 2289 wr_mas->r_min = min; 2290 wr_mas->offset_end = mas->offset = offset; 2291 } 2292 2293 /* 2294 * mas_topiary_range() - Add a range of slots to the topiary. 2295 * @mas: The maple state 2296 * @destroy: The topiary to add the slots (usually destroy) 2297 * @start: The starting slot inclusively 2298 * @end: The end slot inclusively 2299 */ 2300 static inline void mas_topiary_range(struct ma_state *mas, 2301 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2302 { 2303 void __rcu **slots; 2304 unsigned char offset; 2305 2306 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); 2307 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2308 for (offset = start; offset <= end; offset++) { 2309 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2310 2311 if (mte_dead_node(enode)) 2312 continue; 2313 2314 mat_add(destroy, enode); 2315 } 2316 } 2317 2318 /* 2319 * mast_topiary() - Add the portions of the tree to the removal list; either to 2320 * be freed or discarded (destroy walk). 2321 * @mast: The maple_subtree_state. 2322 */ 2323 static inline void mast_topiary(struct maple_subtree_state *mast) 2324 { 2325 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2326 unsigned char r_start, r_end; 2327 unsigned char l_start, l_end; 2328 void __rcu **l_slots, **r_slots; 2329 2330 wr_mas.type = mte_node_type(mast->orig_l->node); 2331 mast->orig_l->index = mast->orig_l->last; 2332 mas_wr_node_walk(&wr_mas); 2333 l_start = mast->orig_l->offset + 1; 2334 l_end = mas_data_end(mast->orig_l); 2335 r_start = 0; 2336 r_end = mast->orig_r->offset; 2337 2338 if (r_end) 2339 r_end--; 2340 2341 l_slots = ma_slots(mas_mn(mast->orig_l), 2342 mte_node_type(mast->orig_l->node)); 2343 2344 r_slots = ma_slots(mas_mn(mast->orig_r), 2345 mte_node_type(mast->orig_r->node)); 2346 2347 if ((l_start < l_end) && 2348 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2349 l_start++; 2350 } 2351 2352 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2353 if (r_end) 2354 r_end--; 2355 } 2356 2357 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2358 return; 2359 2360 /* At the node where left and right sides meet, add the parts between */ 2361 if (mast->orig_l->node == mast->orig_r->node) { 2362 return mas_topiary_range(mast->orig_l, mast->destroy, 2363 l_start, r_end); 2364 } 2365 2366 /* mast->orig_r is different and consumed. */ 2367 if (mte_is_leaf(mast->orig_r->node)) 2368 return; 2369 2370 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2371 l_end--; 2372 2373 2374 if (l_start <= l_end) 2375 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2376 2377 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2378 r_start++; 2379 2380 if (r_start <= r_end) 2381 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2382 } 2383 2384 /* 2385 * mast_rebalance_next() - Rebalance against the next node 2386 * @mast: The maple subtree state 2387 * @old_r: The encoded maple node to the right (next node). 2388 */ 2389 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2390 { 2391 unsigned char b_end = mast->bn->b_end; 2392 2393 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2394 mast->bn, b_end); 2395 mast->orig_r->last = mast->orig_r->max; 2396 } 2397 2398 /* 2399 * mast_rebalance_prev() - Rebalance against the previous node 2400 * @mast: The maple subtree state 2401 * @old_l: The encoded maple node to the left (previous node) 2402 */ 2403 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2404 { 2405 unsigned char end = mas_data_end(mast->orig_l) + 1; 2406 unsigned char b_end = mast->bn->b_end; 2407 2408 mab_shift_right(mast->bn, end); 2409 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2410 mast->l->min = mast->orig_l->min; 2411 mast->orig_l->index = mast->orig_l->min; 2412 mast->bn->b_end = end + b_end; 2413 mast->l->offset += end; 2414 } 2415 2416 /* 2417 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2418 * the node to the right. Checking the nodes to the right then the left at each 2419 * level upwards until root is reached. Free and destroy as needed. 2420 * Data is copied into the @mast->bn. 2421 * @mast: The maple_subtree_state. 2422 */ 2423 static inline 2424 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2425 { 2426 struct ma_state r_tmp = *mast->orig_r; 2427 struct ma_state l_tmp = *mast->orig_l; 2428 struct maple_enode *ancestor = NULL; 2429 unsigned char start, end; 2430 unsigned char depth = 0; 2431 2432 r_tmp = *mast->orig_r; 2433 l_tmp = *mast->orig_l; 2434 do { 2435 mas_ascend(mast->orig_r); 2436 mas_ascend(mast->orig_l); 2437 depth++; 2438 if (!ancestor && 2439 (mast->orig_r->node == mast->orig_l->node)) { 2440 ancestor = mast->orig_r->node; 2441 end = mast->orig_r->offset - 1; 2442 start = mast->orig_l->offset + 1; 2443 } 2444 2445 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2446 if (!ancestor) { 2447 ancestor = mast->orig_r->node; 2448 start = 0; 2449 } 2450 2451 mast->orig_r->offset++; 2452 do { 2453 mas_descend(mast->orig_r); 2454 mast->orig_r->offset = 0; 2455 depth--; 2456 } while (depth); 2457 2458 mast_rebalance_next(mast); 2459 do { 2460 unsigned char l_off = 0; 2461 struct maple_enode *child = r_tmp.node; 2462 2463 mas_ascend(&r_tmp); 2464 if (ancestor == r_tmp.node) 2465 l_off = start; 2466 2467 if (r_tmp.offset) 2468 r_tmp.offset--; 2469 2470 if (l_off < r_tmp.offset) 2471 mas_topiary_range(&r_tmp, mast->destroy, 2472 l_off, r_tmp.offset); 2473 2474 if (l_tmp.node != child) 2475 mat_add(mast->free, child); 2476 2477 } while (r_tmp.node != ancestor); 2478 2479 *mast->orig_l = l_tmp; 2480 return true; 2481 2482 } else if (mast->orig_l->offset != 0) { 2483 if (!ancestor) { 2484 ancestor = mast->orig_l->node; 2485 end = mas_data_end(mast->orig_l); 2486 } 2487 2488 mast->orig_l->offset--; 2489 do { 2490 mas_descend(mast->orig_l); 2491 mast->orig_l->offset = 2492 mas_data_end(mast->orig_l); 2493 depth--; 2494 } while (depth); 2495 2496 mast_rebalance_prev(mast); 2497 do { 2498 unsigned char r_off; 2499 struct maple_enode *child = l_tmp.node; 2500 2501 mas_ascend(&l_tmp); 2502 if (ancestor == l_tmp.node) 2503 r_off = end; 2504 else 2505 r_off = mas_data_end(&l_tmp); 2506 2507 if (l_tmp.offset < r_off) 2508 l_tmp.offset++; 2509 2510 if (l_tmp.offset < r_off) 2511 mas_topiary_range(&l_tmp, mast->destroy, 2512 l_tmp.offset, r_off); 2513 2514 if (r_tmp.node != child) 2515 mat_add(mast->free, child); 2516 2517 } while (l_tmp.node != ancestor); 2518 2519 *mast->orig_r = r_tmp; 2520 return true; 2521 } 2522 } while (!mte_is_root(mast->orig_r->node)); 2523 2524 *mast->orig_r = r_tmp; 2525 *mast->orig_l = l_tmp; 2526 return false; 2527 } 2528 2529 /* 2530 * mast_ascend_free() - Add current original maple state nodes to the free list 2531 * and ascend. 2532 * @mast: the maple subtree state. 2533 * 2534 * Ascend the original left and right sides and add the previous nodes to the 2535 * free list. Set the slots to point to the correct location in the new nodes. 2536 */ 2537 static inline void 2538 mast_ascend_free(struct maple_subtree_state *mast) 2539 { 2540 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2541 struct maple_enode *left = mast->orig_l->node; 2542 struct maple_enode *right = mast->orig_r->node; 2543 2544 mas_ascend(mast->orig_l); 2545 mas_ascend(mast->orig_r); 2546 mat_add(mast->free, left); 2547 2548 if (left != right) 2549 mat_add(mast->free, right); 2550 2551 mast->orig_r->offset = 0; 2552 mast->orig_r->index = mast->r->max; 2553 /* last should be larger than or equal to index */ 2554 if (mast->orig_r->last < mast->orig_r->index) 2555 mast->orig_r->last = mast->orig_r->index; 2556 /* 2557 * The node may not contain the value so set slot to ensure all 2558 * of the nodes contents are freed or destroyed. 2559 */ 2560 wr_mas.type = mte_node_type(mast->orig_r->node); 2561 mas_wr_node_walk(&wr_mas); 2562 /* Set up the left side of things */ 2563 mast->orig_l->offset = 0; 2564 mast->orig_l->index = mast->l->min; 2565 wr_mas.mas = mast->orig_l; 2566 wr_mas.type = mte_node_type(mast->orig_l->node); 2567 mas_wr_node_walk(&wr_mas); 2568 2569 mast->bn->type = wr_mas.type; 2570 } 2571 2572 /* 2573 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2574 * @mas: the maple state with the allocations. 2575 * @b_node: the maple_big_node with the type encoding. 2576 * 2577 * Use the node type from the maple_big_node to allocate a new node from the 2578 * ma_state. This function exists mainly for code readability. 2579 * 2580 * Return: A new maple encoded node 2581 */ 2582 static inline struct maple_enode 2583 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2584 { 2585 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2586 } 2587 2588 /* 2589 * mas_mab_to_node() - Set up right and middle nodes 2590 * 2591 * @mas: the maple state that contains the allocations. 2592 * @b_node: the node which contains the data. 2593 * @left: The pointer which will have the left node 2594 * @right: The pointer which may have the right node 2595 * @middle: the pointer which may have the middle node (rare) 2596 * @mid_split: the split location for the middle node 2597 * 2598 * Return: the split of left. 2599 */ 2600 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2601 struct maple_big_node *b_node, struct maple_enode **left, 2602 struct maple_enode **right, struct maple_enode **middle, 2603 unsigned char *mid_split, unsigned long min) 2604 { 2605 unsigned char split = 0; 2606 unsigned char slot_count = mt_slots[b_node->type]; 2607 2608 *left = mas_new_ma_node(mas, b_node); 2609 *right = NULL; 2610 *middle = NULL; 2611 *mid_split = 0; 2612 2613 if (b_node->b_end < slot_count) { 2614 split = b_node->b_end; 2615 } else { 2616 split = mab_calc_split(mas, b_node, mid_split, min); 2617 *right = mas_new_ma_node(mas, b_node); 2618 } 2619 2620 if (*mid_split) 2621 *middle = mas_new_ma_node(mas, b_node); 2622 2623 return split; 2624 2625 } 2626 2627 /* 2628 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2629 * pointer. 2630 * @b_node - the big node to add the entry 2631 * @mas - the maple state to get the pivot (mas->max) 2632 * @entry - the entry to add, if NULL nothing happens. 2633 */ 2634 static inline void mab_set_b_end(struct maple_big_node *b_node, 2635 struct ma_state *mas, 2636 void *entry) 2637 { 2638 if (!entry) 2639 return; 2640 2641 b_node->slot[b_node->b_end] = entry; 2642 if (mt_is_alloc(mas->tree)) 2643 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2644 b_node->pivot[b_node->b_end++] = mas->max; 2645 } 2646 2647 /* 2648 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2649 * of @mas->node to either @left or @right, depending on @slot and @split 2650 * 2651 * @mas - the maple state with the node that needs a parent 2652 * @left - possible parent 1 2653 * @right - possible parent 2 2654 * @slot - the slot the mas->node was placed 2655 * @split - the split location between @left and @right 2656 */ 2657 static inline void mas_set_split_parent(struct ma_state *mas, 2658 struct maple_enode *left, 2659 struct maple_enode *right, 2660 unsigned char *slot, unsigned char split) 2661 { 2662 if (mas_is_none(mas)) 2663 return; 2664 2665 if ((*slot) <= split) 2666 mte_set_parent(mas->node, left, *slot); 2667 else if (right) 2668 mte_set_parent(mas->node, right, (*slot) - split - 1); 2669 2670 (*slot)++; 2671 } 2672 2673 /* 2674 * mte_mid_split_check() - Check if the next node passes the mid-split 2675 * @**l: Pointer to left encoded maple node. 2676 * @**m: Pointer to middle encoded maple node. 2677 * @**r: Pointer to right encoded maple node. 2678 * @slot: The offset 2679 * @*split: The split location. 2680 * @mid_split: The middle split. 2681 */ 2682 static inline void mte_mid_split_check(struct maple_enode **l, 2683 struct maple_enode **r, 2684 struct maple_enode *right, 2685 unsigned char slot, 2686 unsigned char *split, 2687 unsigned char mid_split) 2688 { 2689 if (*r == right) 2690 return; 2691 2692 if (slot < mid_split) 2693 return; 2694 2695 *l = *r; 2696 *r = right; 2697 *split = mid_split; 2698 } 2699 2700 /* 2701 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2702 * is taken from @mast->l. 2703 * @mast - the maple subtree state 2704 * @left - the left node 2705 * @right - the right node 2706 * @split - the split location. 2707 */ 2708 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2709 struct maple_enode *left, 2710 struct maple_enode *middle, 2711 struct maple_enode *right, 2712 unsigned char split, 2713 unsigned char mid_split) 2714 { 2715 unsigned char slot; 2716 struct maple_enode *l = left; 2717 struct maple_enode *r = right; 2718 2719 if (mas_is_none(mast->l)) 2720 return; 2721 2722 if (middle) 2723 r = middle; 2724 2725 slot = mast->l->offset; 2726 2727 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2728 mas_set_split_parent(mast->l, l, r, &slot, split); 2729 2730 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2731 mas_set_split_parent(mast->m, l, r, &slot, split); 2732 2733 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2734 mas_set_split_parent(mast->r, l, r, &slot, split); 2735 } 2736 2737 /* 2738 * mas_wmb_replace() - Write memory barrier and replace 2739 * @mas: The maple state 2740 * @free: the maple topiary list of nodes to free 2741 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2742 * 2743 * Updates gap as necessary. 2744 */ 2745 static inline void mas_wmb_replace(struct ma_state *mas, 2746 struct ma_topiary *free, 2747 struct ma_topiary *destroy) 2748 { 2749 /* All nodes must see old data as dead prior to replacing that data */ 2750 smp_wmb(); /* Needed for RCU */ 2751 2752 /* Insert the new data in the tree */ 2753 mas_replace(mas, true); 2754 2755 if (!mte_is_leaf(mas->node)) 2756 mas_descend_adopt(mas); 2757 2758 mas_mat_free(mas, free); 2759 2760 if (destroy) 2761 mas_mat_destroy(mas, destroy); 2762 2763 if (mte_is_leaf(mas->node)) 2764 return; 2765 2766 mas_update_gap(mas); 2767 } 2768 2769 /* 2770 * mast_new_root() - Set a new tree root during subtree creation 2771 * @mast: The maple subtree state 2772 * @mas: The maple state 2773 */ 2774 static inline void mast_new_root(struct maple_subtree_state *mast, 2775 struct ma_state *mas) 2776 { 2777 mas_mn(mast->l)->parent = 2778 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2779 if (!mte_dead_node(mast->orig_l->node) && 2780 !mte_is_root(mast->orig_l->node)) { 2781 do { 2782 mast_ascend_free(mast); 2783 mast_topiary(mast); 2784 } while (!mte_is_root(mast->orig_l->node)); 2785 } 2786 if ((mast->orig_l->node != mas->node) && 2787 (mast->l->depth > mas_mt_height(mas))) { 2788 mat_add(mast->free, mas->node); 2789 } 2790 } 2791 2792 /* 2793 * mast_cp_to_nodes() - Copy data out to nodes. 2794 * @mast: The maple subtree state 2795 * @left: The left encoded maple node 2796 * @middle: The middle encoded maple node 2797 * @right: The right encoded maple node 2798 * @split: The location to split between left and (middle ? middle : right) 2799 * @mid_split: The location to split between middle and right. 2800 */ 2801 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2802 struct maple_enode *left, struct maple_enode *middle, 2803 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2804 { 2805 bool new_lmax = true; 2806 2807 mast->l->node = mte_node_or_none(left); 2808 mast->m->node = mte_node_or_none(middle); 2809 mast->r->node = mte_node_or_none(right); 2810 2811 mast->l->min = mast->orig_l->min; 2812 if (split == mast->bn->b_end) { 2813 mast->l->max = mast->orig_r->max; 2814 new_lmax = false; 2815 } 2816 2817 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2818 2819 if (middle) { 2820 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2821 mast->m->min = mast->bn->pivot[split] + 1; 2822 split = mid_split; 2823 } 2824 2825 mast->r->max = mast->orig_r->max; 2826 if (right) { 2827 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2828 mast->r->min = mast->bn->pivot[split] + 1; 2829 } 2830 } 2831 2832 /* 2833 * mast_combine_cp_left - Copy in the original left side of the tree into the 2834 * combined data set in the maple subtree state big node. 2835 * @mast: The maple subtree state 2836 */ 2837 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2838 { 2839 unsigned char l_slot = mast->orig_l->offset; 2840 2841 if (!l_slot) 2842 return; 2843 2844 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2845 } 2846 2847 /* 2848 * mast_combine_cp_right: Copy in the original right side of the tree into the 2849 * combined data set in the maple subtree state big node. 2850 * @mast: The maple subtree state 2851 */ 2852 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2853 { 2854 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2855 return; 2856 2857 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2858 mt_slot_count(mast->orig_r->node), mast->bn, 2859 mast->bn->b_end); 2860 mast->orig_r->last = mast->orig_r->max; 2861 } 2862 2863 /* 2864 * mast_sufficient: Check if the maple subtree state has enough data in the big 2865 * node to create at least one sufficient node 2866 * @mast: the maple subtree state 2867 */ 2868 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2869 { 2870 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2871 return true; 2872 2873 return false; 2874 } 2875 2876 /* 2877 * mast_overflow: Check if there is too much data in the subtree state for a 2878 * single node. 2879 * @mast: The maple subtree state 2880 */ 2881 static inline bool mast_overflow(struct maple_subtree_state *mast) 2882 { 2883 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2884 return true; 2885 2886 return false; 2887 } 2888 2889 static inline void *mtree_range_walk(struct ma_state *mas) 2890 { 2891 unsigned long *pivots; 2892 unsigned char offset; 2893 struct maple_node *node; 2894 struct maple_enode *next, *last; 2895 enum maple_type type; 2896 void __rcu **slots; 2897 unsigned char end; 2898 unsigned long max, min; 2899 unsigned long prev_max, prev_min; 2900 2901 next = mas->node; 2902 min = mas->min; 2903 max = mas->max; 2904 do { 2905 offset = 0; 2906 last = next; 2907 node = mte_to_node(next); 2908 type = mte_node_type(next); 2909 pivots = ma_pivots(node, type); 2910 end = ma_data_end(node, type, pivots, max); 2911 if (unlikely(ma_dead_node(node))) 2912 goto dead_node; 2913 2914 if (pivots[offset] >= mas->index) { 2915 prev_max = max; 2916 prev_min = min; 2917 max = pivots[offset]; 2918 goto next; 2919 } 2920 2921 do { 2922 offset++; 2923 } while ((offset < end) && (pivots[offset] < mas->index)); 2924 2925 prev_min = min; 2926 min = pivots[offset - 1] + 1; 2927 prev_max = max; 2928 if (likely(offset < end && pivots[offset])) 2929 max = pivots[offset]; 2930 2931 next: 2932 slots = ma_slots(node, type); 2933 next = mt_slot(mas->tree, slots, offset); 2934 if (unlikely(ma_dead_node(node))) 2935 goto dead_node; 2936 } while (!ma_is_leaf(type)); 2937 2938 mas->offset = offset; 2939 mas->index = min; 2940 mas->last = max; 2941 mas->min = prev_min; 2942 mas->max = prev_max; 2943 mas->node = last; 2944 return (void *)next; 2945 2946 dead_node: 2947 mas_reset(mas); 2948 return NULL; 2949 } 2950 2951 /* 2952 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2953 * @mas: The starting maple state 2954 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2955 * @count: The estimated count of iterations needed. 2956 * 2957 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2958 * is hit. First @b_node is split into two entries which are inserted into the 2959 * next iteration of the loop. @b_node is returned populated with the final 2960 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2961 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2962 * to account of what has been copied into the new sub-tree. The update of 2963 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2964 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2965 * the new sub-tree in case the sub-tree becomes the full tree. 2966 * 2967 * Return: the number of elements in b_node during the last loop. 2968 */ 2969 static int mas_spanning_rebalance(struct ma_state *mas, 2970 struct maple_subtree_state *mast, unsigned char count) 2971 { 2972 unsigned char split, mid_split; 2973 unsigned char slot = 0; 2974 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2975 2976 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2977 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2978 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2979 MA_TOPIARY(free, mas->tree); 2980 MA_TOPIARY(destroy, mas->tree); 2981 2982 /* 2983 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2984 * Rebalancing is done by use of the ``struct maple_topiary``. 2985 */ 2986 mast->l = &l_mas; 2987 mast->m = &m_mas; 2988 mast->r = &r_mas; 2989 mast->free = &free; 2990 mast->destroy = &destroy; 2991 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 2992 2993 /* Check if this is not root and has sufficient data. */ 2994 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 2995 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 2996 mast_spanning_rebalance(mast); 2997 2998 mast->orig_l->depth = 0; 2999 3000 /* 3001 * Each level of the tree is examined and balanced, pushing data to the left or 3002 * right, or rebalancing against left or right nodes is employed to avoid 3003 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3004 * the tree is created, there may be a mix of new and old nodes. The old nodes 3005 * will have the incorrect parent pointers and currently be in two trees: the 3006 * original tree and the partially new tree. To remedy the parent pointers in 3007 * the old tree, the new data is swapped into the active tree and a walk down 3008 * the tree is performed and the parent pointers are updated. 3009 * See mas_descend_adopt() for more information.. 3010 */ 3011 while (count--) { 3012 mast->bn->b_end--; 3013 mast->bn->type = mte_node_type(mast->orig_l->node); 3014 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3015 &mid_split, mast->orig_l->min); 3016 mast_set_split_parents(mast, left, middle, right, split, 3017 mid_split); 3018 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3019 3020 /* 3021 * Copy data from next level in the tree to mast->bn from next 3022 * iteration 3023 */ 3024 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3025 mast->bn->type = mte_node_type(left); 3026 mast->orig_l->depth++; 3027 3028 /* Root already stored in l->node. */ 3029 if (mas_is_root_limits(mast->l)) 3030 goto new_root; 3031 3032 mast_ascend_free(mast); 3033 mast_combine_cp_left(mast); 3034 l_mas.offset = mast->bn->b_end; 3035 mab_set_b_end(mast->bn, &l_mas, left); 3036 mab_set_b_end(mast->bn, &m_mas, middle); 3037 mab_set_b_end(mast->bn, &r_mas, right); 3038 3039 /* Copy anything necessary out of the right node. */ 3040 mast_combine_cp_right(mast); 3041 mast_topiary(mast); 3042 mast->orig_l->last = mast->orig_l->max; 3043 3044 if (mast_sufficient(mast)) 3045 continue; 3046 3047 if (mast_overflow(mast)) 3048 continue; 3049 3050 /* May be a new root stored in mast->bn */ 3051 if (mas_is_root_limits(mast->orig_l)) 3052 break; 3053 3054 mast_spanning_rebalance(mast); 3055 3056 /* rebalancing from other nodes may require another loop. */ 3057 if (!count) 3058 count++; 3059 } 3060 3061 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3062 mte_node_type(mast->orig_l->node)); 3063 mast->orig_l->depth++; 3064 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3065 mte_set_parent(left, l_mas.node, slot); 3066 if (middle) 3067 mte_set_parent(middle, l_mas.node, ++slot); 3068 3069 if (right) 3070 mte_set_parent(right, l_mas.node, ++slot); 3071 3072 if (mas_is_root_limits(mast->l)) { 3073 new_root: 3074 mast_new_root(mast, mas); 3075 } else { 3076 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3077 } 3078 3079 if (!mte_dead_node(mast->orig_l->node)) 3080 mat_add(&free, mast->orig_l->node); 3081 3082 mas->depth = mast->orig_l->depth; 3083 *mast->orig_l = l_mas; 3084 mte_set_node_dead(mas->node); 3085 3086 /* Set up mas for insertion. */ 3087 mast->orig_l->depth = mas->depth; 3088 mast->orig_l->alloc = mas->alloc; 3089 *mas = *mast->orig_l; 3090 mas_wmb_replace(mas, &free, &destroy); 3091 mtree_range_walk(mas); 3092 return mast->bn->b_end; 3093 } 3094 3095 /* 3096 * mas_rebalance() - Rebalance a given node. 3097 * @mas: The maple state 3098 * @b_node: The big maple node. 3099 * 3100 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3101 * Continue upwards until tree is sufficient. 3102 * 3103 * Return: the number of elements in b_node during the last loop. 3104 */ 3105 static inline int mas_rebalance(struct ma_state *mas, 3106 struct maple_big_node *b_node) 3107 { 3108 char empty_count = mas_mt_height(mas); 3109 struct maple_subtree_state mast; 3110 unsigned char shift, b_end = ++b_node->b_end; 3111 3112 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3113 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3114 3115 trace_ma_op(__func__, mas); 3116 3117 /* 3118 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3119 * against the node to the right if it exists, otherwise the node to the 3120 * left of this node is rebalanced against this node. If rebalancing 3121 * causes just one node to be produced instead of two, then the parent 3122 * is also examined and rebalanced if it is insufficient. Every level 3123 * tries to combine the data in the same way. If one node contains the 3124 * entire range of the tree, then that node is used as a new root node. 3125 */ 3126 mas_node_count(mas, 1 + empty_count * 3); 3127 if (mas_is_err(mas)) 3128 return 0; 3129 3130 mast.orig_l = &l_mas; 3131 mast.orig_r = &r_mas; 3132 mast.bn = b_node; 3133 mast.bn->type = mte_node_type(mas->node); 3134 3135 l_mas = r_mas = *mas; 3136 3137 if (mas_next_sibling(&r_mas)) { 3138 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3139 r_mas.last = r_mas.index = r_mas.max; 3140 } else { 3141 mas_prev_sibling(&l_mas); 3142 shift = mas_data_end(&l_mas) + 1; 3143 mab_shift_right(b_node, shift); 3144 mas->offset += shift; 3145 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3146 b_node->b_end = shift + b_end; 3147 l_mas.index = l_mas.last = l_mas.min; 3148 } 3149 3150 return mas_spanning_rebalance(mas, &mast, empty_count); 3151 } 3152 3153 /* 3154 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3155 * state. 3156 * @mas: The maple state 3157 * @end: The end of the left-most node. 3158 * 3159 * During a mass-insert event (such as forking), it may be necessary to 3160 * rebalance the left-most node when it is not sufficient. 3161 */ 3162 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3163 { 3164 enum maple_type mt = mte_node_type(mas->node); 3165 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3166 struct maple_enode *eparent; 3167 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3168 void __rcu **l_slots, **slots; 3169 unsigned long *l_pivs, *pivs, gap; 3170 bool in_rcu = mt_in_rcu(mas->tree); 3171 3172 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3173 3174 l_mas = *mas; 3175 mas_prev_sibling(&l_mas); 3176 3177 /* set up node. */ 3178 if (in_rcu) { 3179 /* Allocate for both left and right as well as parent. */ 3180 mas_node_count(mas, 3); 3181 if (mas_is_err(mas)) 3182 return; 3183 3184 newnode = mas_pop_node(mas); 3185 } else { 3186 newnode = &reuse; 3187 } 3188 3189 node = mas_mn(mas); 3190 newnode->parent = node->parent; 3191 slots = ma_slots(newnode, mt); 3192 pivs = ma_pivots(newnode, mt); 3193 left = mas_mn(&l_mas); 3194 l_slots = ma_slots(left, mt); 3195 l_pivs = ma_pivots(left, mt); 3196 if (!l_slots[split]) 3197 split++; 3198 tmp = mas_data_end(&l_mas) - split; 3199 3200 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3201 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3202 pivs[tmp] = l_mas.max; 3203 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3204 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3205 3206 l_mas.max = l_pivs[split]; 3207 mas->min = l_mas.max + 1; 3208 eparent = mt_mk_node(mte_parent(l_mas.node), 3209 mas_parent_enum(&l_mas, l_mas.node)); 3210 tmp += end; 3211 if (!in_rcu) { 3212 unsigned char max_p = mt_pivots[mt]; 3213 unsigned char max_s = mt_slots[mt]; 3214 3215 if (tmp < max_p) 3216 memset(pivs + tmp, 0, 3217 sizeof(unsigned long *) * (max_p - tmp)); 3218 3219 if (tmp < mt_slots[mt]) 3220 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3221 3222 memcpy(node, newnode, sizeof(struct maple_node)); 3223 ma_set_meta(node, mt, 0, tmp - 1); 3224 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3225 l_pivs[split]); 3226 3227 /* Remove data from l_pivs. */ 3228 tmp = split + 1; 3229 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3230 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3231 ma_set_meta(left, mt, 0, split); 3232 3233 goto done; 3234 } 3235 3236 /* RCU requires replacing both l_mas, mas, and parent. */ 3237 mas->node = mt_mk_node(newnode, mt); 3238 ma_set_meta(newnode, mt, 0, tmp); 3239 3240 new_left = mas_pop_node(mas); 3241 new_left->parent = left->parent; 3242 mt = mte_node_type(l_mas.node); 3243 slots = ma_slots(new_left, mt); 3244 pivs = ma_pivots(new_left, mt); 3245 memcpy(slots, l_slots, sizeof(void *) * split); 3246 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3247 ma_set_meta(new_left, mt, 0, split); 3248 l_mas.node = mt_mk_node(new_left, mt); 3249 3250 /* replace parent. */ 3251 offset = mte_parent_slot(mas->node); 3252 mt = mas_parent_enum(&l_mas, l_mas.node); 3253 parent = mas_pop_node(mas); 3254 slots = ma_slots(parent, mt); 3255 pivs = ma_pivots(parent, mt); 3256 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3257 rcu_assign_pointer(slots[offset], mas->node); 3258 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3259 pivs[offset - 1] = l_mas.max; 3260 eparent = mt_mk_node(parent, mt); 3261 done: 3262 gap = mas_leaf_max_gap(mas); 3263 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3264 gap = mas_leaf_max_gap(&l_mas); 3265 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3266 mas_ascend(mas); 3267 3268 if (in_rcu) 3269 mas_replace(mas, false); 3270 3271 mas_update_gap(mas); 3272 } 3273 3274 /* 3275 * mas_split_final_node() - Split the final node in a subtree operation. 3276 * @mast: the maple subtree state 3277 * @mas: The maple state 3278 * @height: The height of the tree in case it's a new root. 3279 */ 3280 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3281 struct ma_state *mas, int height) 3282 { 3283 struct maple_enode *ancestor; 3284 3285 if (mte_is_root(mas->node)) { 3286 if (mt_is_alloc(mas->tree)) 3287 mast->bn->type = maple_arange_64; 3288 else 3289 mast->bn->type = maple_range_64; 3290 mas->depth = height; 3291 } 3292 /* 3293 * Only a single node is used here, could be root. 3294 * The Big_node data should just fit in a single node. 3295 */ 3296 ancestor = mas_new_ma_node(mas, mast->bn); 3297 mte_set_parent(mast->l->node, ancestor, mast->l->offset); 3298 mte_set_parent(mast->r->node, ancestor, mast->r->offset); 3299 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3300 3301 mast->l->node = ancestor; 3302 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3303 mas->offset = mast->bn->b_end - 1; 3304 return true; 3305 } 3306 3307 /* 3308 * mast_fill_bnode() - Copy data into the big node in the subtree state 3309 * @mast: The maple subtree state 3310 * @mas: the maple state 3311 * @skip: The number of entries to skip for new nodes insertion. 3312 */ 3313 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3314 struct ma_state *mas, 3315 unsigned char skip) 3316 { 3317 bool cp = true; 3318 struct maple_enode *old = mas->node; 3319 unsigned char split; 3320 3321 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3322 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3323 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3324 mast->bn->b_end = 0; 3325 3326 if (mte_is_root(mas->node)) { 3327 cp = false; 3328 } else { 3329 mas_ascend(mas); 3330 mat_add(mast->free, old); 3331 mas->offset = mte_parent_slot(mas->node); 3332 } 3333 3334 if (cp && mast->l->offset) 3335 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3336 3337 split = mast->bn->b_end; 3338 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3339 mast->r->offset = mast->bn->b_end; 3340 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3341 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3342 cp = false; 3343 3344 if (cp) 3345 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3346 mast->bn, mast->bn->b_end); 3347 3348 mast->bn->b_end--; 3349 mast->bn->type = mte_node_type(mas->node); 3350 } 3351 3352 /* 3353 * mast_split_data() - Split the data in the subtree state big node into regular 3354 * nodes. 3355 * @mast: The maple subtree state 3356 * @mas: The maple state 3357 * @split: The location to split the big node 3358 */ 3359 static inline void mast_split_data(struct maple_subtree_state *mast, 3360 struct ma_state *mas, unsigned char split) 3361 { 3362 unsigned char p_slot; 3363 3364 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3365 mte_set_pivot(mast->r->node, 0, mast->r->max); 3366 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3367 mast->l->offset = mte_parent_slot(mas->node); 3368 mast->l->max = mast->bn->pivot[split]; 3369 mast->r->min = mast->l->max + 1; 3370 if (mte_is_leaf(mas->node)) 3371 return; 3372 3373 p_slot = mast->orig_l->offset; 3374 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3375 &p_slot, split); 3376 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3377 &p_slot, split); 3378 } 3379 3380 /* 3381 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3382 * data to the right or left node if there is room. 3383 * @mas: The maple state 3384 * @height: The current height of the maple state 3385 * @mast: The maple subtree state 3386 * @left: Push left or not. 3387 * 3388 * Keeping the height of the tree low means faster lookups. 3389 * 3390 * Return: True if pushed, false otherwise. 3391 */ 3392 static inline bool mas_push_data(struct ma_state *mas, int height, 3393 struct maple_subtree_state *mast, bool left) 3394 { 3395 unsigned char slot_total = mast->bn->b_end; 3396 unsigned char end, space, split; 3397 3398 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3399 tmp_mas = *mas; 3400 tmp_mas.depth = mast->l->depth; 3401 3402 if (left && !mas_prev_sibling(&tmp_mas)) 3403 return false; 3404 else if (!left && !mas_next_sibling(&tmp_mas)) 3405 return false; 3406 3407 end = mas_data_end(&tmp_mas); 3408 slot_total += end; 3409 space = 2 * mt_slot_count(mas->node) - 2; 3410 /* -2 instead of -1 to ensure there isn't a triple split */ 3411 if (ma_is_leaf(mast->bn->type)) 3412 space--; 3413 3414 if (mas->max == ULONG_MAX) 3415 space--; 3416 3417 if (slot_total >= space) 3418 return false; 3419 3420 /* Get the data; Fill mast->bn */ 3421 mast->bn->b_end++; 3422 if (left) { 3423 mab_shift_right(mast->bn, end + 1); 3424 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3425 mast->bn->b_end = slot_total + 1; 3426 } else { 3427 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3428 } 3429 3430 /* Configure mast for splitting of mast->bn */ 3431 split = mt_slots[mast->bn->type] - 2; 3432 if (left) { 3433 /* Switch mas to prev node */ 3434 mat_add(mast->free, mas->node); 3435 *mas = tmp_mas; 3436 /* Start using mast->l for the left side. */ 3437 tmp_mas.node = mast->l->node; 3438 *mast->l = tmp_mas; 3439 } else { 3440 mat_add(mast->free, tmp_mas.node); 3441 tmp_mas.node = mast->r->node; 3442 *mast->r = tmp_mas; 3443 split = slot_total - split; 3444 } 3445 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3446 /* Update parent slot for split calculation. */ 3447 if (left) 3448 mast->orig_l->offset += end + 1; 3449 3450 mast_split_data(mast, mas, split); 3451 mast_fill_bnode(mast, mas, 2); 3452 mas_split_final_node(mast, mas, height + 1); 3453 return true; 3454 } 3455 3456 /* 3457 * mas_split() - Split data that is too big for one node into two. 3458 * @mas: The maple state 3459 * @b_node: The maple big node 3460 * Return: 1 on success, 0 on failure. 3461 */ 3462 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3463 { 3464 struct maple_subtree_state mast; 3465 int height = 0; 3466 unsigned char mid_split, split = 0; 3467 3468 /* 3469 * Splitting is handled differently from any other B-tree; the Maple 3470 * Tree splits upwards. Splitting up means that the split operation 3471 * occurs when the walk of the tree hits the leaves and not on the way 3472 * down. The reason for splitting up is that it is impossible to know 3473 * how much space will be needed until the leaf is (or leaves are) 3474 * reached. Since overwriting data is allowed and a range could 3475 * overwrite more than one range or result in changing one entry into 3 3476 * entries, it is impossible to know if a split is required until the 3477 * data is examined. 3478 * 3479 * Splitting is a balancing act between keeping allocations to a minimum 3480 * and avoiding a 'jitter' event where a tree is expanded to make room 3481 * for an entry followed by a contraction when the entry is removed. To 3482 * accomplish the balance, there are empty slots remaining in both left 3483 * and right nodes after a split. 3484 */ 3485 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3486 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3487 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3488 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3489 MA_TOPIARY(mat, mas->tree); 3490 3491 trace_ma_op(__func__, mas); 3492 mas->depth = mas_mt_height(mas); 3493 /* Allocation failures will happen early. */ 3494 mas_node_count(mas, 1 + mas->depth * 2); 3495 if (mas_is_err(mas)) 3496 return 0; 3497 3498 mast.l = &l_mas; 3499 mast.r = &r_mas; 3500 mast.orig_l = &prev_l_mas; 3501 mast.orig_r = &prev_r_mas; 3502 mast.free = &mat; 3503 mast.bn = b_node; 3504 3505 while (height++ <= mas->depth) { 3506 if (mt_slots[b_node->type] > b_node->b_end) { 3507 mas_split_final_node(&mast, mas, height); 3508 break; 3509 } 3510 3511 l_mas = r_mas = *mas; 3512 l_mas.node = mas_new_ma_node(mas, b_node); 3513 r_mas.node = mas_new_ma_node(mas, b_node); 3514 /* 3515 * Another way that 'jitter' is avoided is to terminate a split up early if the 3516 * left or right node has space to spare. This is referred to as "pushing left" 3517 * or "pushing right" and is similar to the B* tree, except the nodes left or 3518 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3519 * is a significant savings. 3520 */ 3521 /* Try to push left. */ 3522 if (mas_push_data(mas, height, &mast, true)) 3523 break; 3524 3525 /* Try to push right. */ 3526 if (mas_push_data(mas, height, &mast, false)) 3527 break; 3528 3529 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3530 mast_split_data(&mast, mas, split); 3531 /* 3532 * Usually correct, mab_mas_cp in the above call overwrites 3533 * r->max. 3534 */ 3535 mast.r->max = mas->max; 3536 mast_fill_bnode(&mast, mas, 1); 3537 prev_l_mas = *mast.l; 3538 prev_r_mas = *mast.r; 3539 } 3540 3541 /* Set the original node as dead */ 3542 mat_add(mast.free, mas->node); 3543 mas->node = l_mas.node; 3544 mas_wmb_replace(mas, mast.free, NULL); 3545 mtree_range_walk(mas); 3546 return 1; 3547 } 3548 3549 /* 3550 * mas_reuse_node() - Reuse the node to store the data. 3551 * @wr_mas: The maple write state 3552 * @bn: The maple big node 3553 * @end: The end of the data. 3554 * 3555 * Will always return false in RCU mode. 3556 * 3557 * Return: True if node was reused, false otherwise. 3558 */ 3559 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3560 struct maple_big_node *bn, unsigned char end) 3561 { 3562 /* Need to be rcu safe. */ 3563 if (mt_in_rcu(wr_mas->mas->tree)) 3564 return false; 3565 3566 if (end > bn->b_end) { 3567 int clear = mt_slots[wr_mas->type] - bn->b_end; 3568 3569 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3570 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3571 } 3572 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3573 return true; 3574 } 3575 3576 /* 3577 * mas_commit_b_node() - Commit the big node into the tree. 3578 * @wr_mas: The maple write state 3579 * @b_node: The maple big node 3580 * @end: The end of the data. 3581 */ 3582 static inline int mas_commit_b_node(struct ma_wr_state *wr_mas, 3583 struct maple_big_node *b_node, unsigned char end) 3584 { 3585 struct maple_node *node; 3586 unsigned char b_end = b_node->b_end; 3587 enum maple_type b_type = b_node->type; 3588 3589 if ((b_end < mt_min_slots[b_type]) && 3590 (!mte_is_root(wr_mas->mas->node)) && 3591 (mas_mt_height(wr_mas->mas) > 1)) 3592 return mas_rebalance(wr_mas->mas, b_node); 3593 3594 if (b_end >= mt_slots[b_type]) 3595 return mas_split(wr_mas->mas, b_node); 3596 3597 if (mas_reuse_node(wr_mas, b_node, end)) 3598 goto reuse_node; 3599 3600 mas_node_count(wr_mas->mas, 1); 3601 if (mas_is_err(wr_mas->mas)) 3602 return 0; 3603 3604 node = mas_pop_node(wr_mas->mas); 3605 node->parent = mas_mn(wr_mas->mas)->parent; 3606 wr_mas->mas->node = mt_mk_node(node, b_type); 3607 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3608 mas_replace(wr_mas->mas, false); 3609 reuse_node: 3610 mas_update_gap(wr_mas->mas); 3611 return 1; 3612 } 3613 3614 /* 3615 * mas_root_expand() - Expand a root to a node 3616 * @mas: The maple state 3617 * @entry: The entry to store into the tree 3618 */ 3619 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3620 { 3621 void *contents = mas_root_locked(mas); 3622 enum maple_type type = maple_leaf_64; 3623 struct maple_node *node; 3624 void __rcu **slots; 3625 unsigned long *pivots; 3626 int slot = 0; 3627 3628 mas_node_count(mas, 1); 3629 if (unlikely(mas_is_err(mas))) 3630 return 0; 3631 3632 node = mas_pop_node(mas); 3633 pivots = ma_pivots(node, type); 3634 slots = ma_slots(node, type); 3635 node->parent = ma_parent_ptr( 3636 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3637 mas->node = mt_mk_node(node, type); 3638 3639 if (mas->index) { 3640 if (contents) { 3641 rcu_assign_pointer(slots[slot], contents); 3642 if (likely(mas->index > 1)) 3643 slot++; 3644 } 3645 pivots[slot++] = mas->index - 1; 3646 } 3647 3648 rcu_assign_pointer(slots[slot], entry); 3649 mas->offset = slot; 3650 pivots[slot] = mas->last; 3651 if (mas->last != ULONG_MAX) 3652 slot++; 3653 mas->depth = 1; 3654 mas_set_height(mas); 3655 3656 /* swap the new root into the tree */ 3657 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3658 ma_set_meta(node, maple_leaf_64, 0, slot); 3659 return slot; 3660 } 3661 3662 static inline void mas_store_root(struct ma_state *mas, void *entry) 3663 { 3664 if (likely((mas->last != 0) || (mas->index != 0))) 3665 mas_root_expand(mas, entry); 3666 else if (((unsigned long) (entry) & 3) == 2) 3667 mas_root_expand(mas, entry); 3668 else { 3669 rcu_assign_pointer(mas->tree->ma_root, entry); 3670 mas->node = MAS_START; 3671 } 3672 } 3673 3674 /* 3675 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3676 * spans the node. 3677 * @mas: The maple state 3678 * @piv: The pivot value being written 3679 * @type: The maple node type 3680 * @entry: The data to write 3681 * 3682 * Spanning writes are writes that start in one node and end in another OR if 3683 * the write of a %NULL will cause the node to end with a %NULL. 3684 * 3685 * Return: True if this is a spanning write, false otherwise. 3686 */ 3687 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3688 { 3689 unsigned long max; 3690 unsigned long last = wr_mas->mas->last; 3691 unsigned long piv = wr_mas->r_max; 3692 enum maple_type type = wr_mas->type; 3693 void *entry = wr_mas->entry; 3694 3695 /* Contained in this pivot */ 3696 if (piv > last) 3697 return false; 3698 3699 max = wr_mas->mas->max; 3700 if (unlikely(ma_is_leaf(type))) { 3701 /* Fits in the node, but may span slots. */ 3702 if (last < max) 3703 return false; 3704 3705 /* Writes to the end of the node but not null. */ 3706 if ((last == max) && entry) 3707 return false; 3708 3709 /* 3710 * Writing ULONG_MAX is not a spanning write regardless of the 3711 * value being written as long as the range fits in the node. 3712 */ 3713 if ((last == ULONG_MAX) && (last == max)) 3714 return false; 3715 } else if (piv == last) { 3716 if (entry) 3717 return false; 3718 3719 /* Detect spanning store wr walk */ 3720 if (last == ULONG_MAX) 3721 return false; 3722 } 3723 3724 trace_ma_write(__func__, wr_mas->mas, piv, entry); 3725 3726 return true; 3727 } 3728 3729 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3730 { 3731 wr_mas->type = mte_node_type(wr_mas->mas->node); 3732 mas_wr_node_walk(wr_mas); 3733 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3734 } 3735 3736 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3737 { 3738 wr_mas->mas->max = wr_mas->r_max; 3739 wr_mas->mas->min = wr_mas->r_min; 3740 wr_mas->mas->node = wr_mas->content; 3741 wr_mas->mas->offset = 0; 3742 wr_mas->mas->depth++; 3743 } 3744 /* 3745 * mas_wr_walk() - Walk the tree for a write. 3746 * @wr_mas: The maple write state 3747 * 3748 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3749 * 3750 * Return: True if it's contained in a node, false on spanning write. 3751 */ 3752 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3753 { 3754 struct ma_state *mas = wr_mas->mas; 3755 3756 while (true) { 3757 mas_wr_walk_descend(wr_mas); 3758 if (unlikely(mas_is_span_wr(wr_mas))) 3759 return false; 3760 3761 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3762 mas->offset); 3763 if (ma_is_leaf(wr_mas->type)) 3764 return true; 3765 3766 mas_wr_walk_traverse(wr_mas); 3767 } 3768 3769 return true; 3770 } 3771 3772 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3773 { 3774 struct ma_state *mas = wr_mas->mas; 3775 3776 while (true) { 3777 mas_wr_walk_descend(wr_mas); 3778 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3779 mas->offset); 3780 if (ma_is_leaf(wr_mas->type)) 3781 return true; 3782 mas_wr_walk_traverse(wr_mas); 3783 3784 } 3785 return true; 3786 } 3787 /* 3788 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3789 * @l_wr_mas: The left maple write state 3790 * @r_wr_mas: The right maple write state 3791 */ 3792 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3793 struct ma_wr_state *r_wr_mas) 3794 { 3795 struct ma_state *r_mas = r_wr_mas->mas; 3796 struct ma_state *l_mas = l_wr_mas->mas; 3797 unsigned char l_slot; 3798 3799 l_slot = l_mas->offset; 3800 if (!l_wr_mas->content) 3801 l_mas->index = l_wr_mas->r_min; 3802 3803 if ((l_mas->index == l_wr_mas->r_min) && 3804 (l_slot && 3805 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3806 if (l_slot > 1) 3807 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3808 else 3809 l_mas->index = l_mas->min; 3810 3811 l_mas->offset = l_slot - 1; 3812 } 3813 3814 if (!r_wr_mas->content) { 3815 if (r_mas->last < r_wr_mas->r_max) 3816 r_mas->last = r_wr_mas->r_max; 3817 r_mas->offset++; 3818 } else if ((r_mas->last == r_wr_mas->r_max) && 3819 (r_mas->last < r_mas->max) && 3820 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3821 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3822 r_wr_mas->type, r_mas->offset + 1); 3823 r_mas->offset++; 3824 } 3825 } 3826 3827 static inline void *mas_state_walk(struct ma_state *mas) 3828 { 3829 void *entry; 3830 3831 entry = mas_start(mas); 3832 if (mas_is_none(mas)) 3833 return NULL; 3834 3835 if (mas_is_ptr(mas)) 3836 return entry; 3837 3838 return mtree_range_walk(mas); 3839 } 3840 3841 /* 3842 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3843 * to date. 3844 * 3845 * @mas: The maple state. 3846 * 3847 * Note: Leaves mas in undesirable state. 3848 * Return: The entry for @mas->index or %NULL on dead node. 3849 */ 3850 static inline void *mtree_lookup_walk(struct ma_state *mas) 3851 { 3852 unsigned long *pivots; 3853 unsigned char offset; 3854 struct maple_node *node; 3855 struct maple_enode *next; 3856 enum maple_type type; 3857 void __rcu **slots; 3858 unsigned char end; 3859 unsigned long max; 3860 3861 next = mas->node; 3862 max = ULONG_MAX; 3863 do { 3864 offset = 0; 3865 node = mte_to_node(next); 3866 type = mte_node_type(next); 3867 pivots = ma_pivots(node, type); 3868 end = ma_data_end(node, type, pivots, max); 3869 if (unlikely(ma_dead_node(node))) 3870 goto dead_node; 3871 3872 if (pivots[offset] >= mas->index) 3873 goto next; 3874 3875 do { 3876 offset++; 3877 } while ((offset < end) && (pivots[offset] < mas->index)); 3878 3879 if (likely(offset > end)) 3880 max = pivots[offset]; 3881 3882 next: 3883 slots = ma_slots(node, type); 3884 next = mt_slot(mas->tree, slots, offset); 3885 if (unlikely(ma_dead_node(node))) 3886 goto dead_node; 3887 } while (!ma_is_leaf(type)); 3888 3889 return (void *)next; 3890 3891 dead_node: 3892 mas_reset(mas); 3893 return NULL; 3894 } 3895 3896 /* 3897 * mas_new_root() - Create a new root node that only contains the entry passed 3898 * in. 3899 * @mas: The maple state 3900 * @entry: The entry to store. 3901 * 3902 * Only valid when the index == 0 and the last == ULONG_MAX 3903 * 3904 * Return 0 on error, 1 on success. 3905 */ 3906 static inline int mas_new_root(struct ma_state *mas, void *entry) 3907 { 3908 struct maple_enode *root = mas_root_locked(mas); 3909 enum maple_type type = maple_leaf_64; 3910 struct maple_node *node; 3911 void __rcu **slots; 3912 unsigned long *pivots; 3913 3914 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3915 mas->depth = 0; 3916 mas_set_height(mas); 3917 rcu_assign_pointer(mas->tree->ma_root, entry); 3918 mas->node = MAS_START; 3919 goto done; 3920 } 3921 3922 mas_node_count(mas, 1); 3923 if (mas_is_err(mas)) 3924 return 0; 3925 3926 node = mas_pop_node(mas); 3927 pivots = ma_pivots(node, type); 3928 slots = ma_slots(node, type); 3929 node->parent = ma_parent_ptr( 3930 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3931 mas->node = mt_mk_node(node, type); 3932 rcu_assign_pointer(slots[0], entry); 3933 pivots[0] = mas->last; 3934 mas->depth = 1; 3935 mas_set_height(mas); 3936 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3937 3938 done: 3939 if (xa_is_node(root)) 3940 mte_destroy_walk(root, mas->tree); 3941 3942 return 1; 3943 } 3944 /* 3945 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3946 * and new nodes where necessary, then place the sub-tree in the actual tree. 3947 * Note that mas is expected to point to the node which caused the store to 3948 * span. 3949 * @wr_mas: The maple write state 3950 * 3951 * Return: 0 on error, positive on success. 3952 */ 3953 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3954 { 3955 struct maple_subtree_state mast; 3956 struct maple_big_node b_node; 3957 struct ma_state *mas; 3958 unsigned char height; 3959 3960 /* Left and Right side of spanning store */ 3961 MA_STATE(l_mas, NULL, 0, 0); 3962 MA_STATE(r_mas, NULL, 0, 0); 3963 3964 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3965 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3966 3967 /* 3968 * A store operation that spans multiple nodes is called a spanning 3969 * store and is handled early in the store call stack by the function 3970 * mas_is_span_wr(). When a spanning store is identified, the maple 3971 * state is duplicated. The first maple state walks the left tree path 3972 * to ``index``, the duplicate walks the right tree path to ``last``. 3973 * The data in the two nodes are combined into a single node, two nodes, 3974 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3975 * written to the last entry of a node is considered a spanning store as 3976 * a rebalance is required for the operation to complete and an overflow 3977 * of data may happen. 3978 */ 3979 mas = wr_mas->mas; 3980 trace_ma_op(__func__, mas); 3981 3982 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3983 return mas_new_root(mas, wr_mas->entry); 3984 /* 3985 * Node rebalancing may occur due to this store, so there may be three new 3986 * entries per level plus a new root. 3987 */ 3988 height = mas_mt_height(mas); 3989 mas_node_count(mas, 1 + height * 3); 3990 if (mas_is_err(mas)) 3991 return 0; 3992 3993 /* 3994 * Set up right side. Need to get to the next offset after the spanning 3995 * store to ensure it's not NULL and to combine both the next node and 3996 * the node with the start together. 3997 */ 3998 r_mas = *mas; 3999 /* Avoid overflow, walk to next slot in the tree. */ 4000 if (r_mas.last + 1) 4001 r_mas.last++; 4002 4003 r_mas.index = r_mas.last; 4004 mas_wr_walk_index(&r_wr_mas); 4005 r_mas.last = r_mas.index = mas->last; 4006 4007 /* Set up left side. */ 4008 l_mas = *mas; 4009 mas_wr_walk_index(&l_wr_mas); 4010 4011 if (!wr_mas->entry) { 4012 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4013 mas->offset = l_mas.offset; 4014 mas->index = l_mas.index; 4015 mas->last = l_mas.last = r_mas.last; 4016 } 4017 4018 /* expanding NULLs may make this cover the entire range */ 4019 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4020 mas_set_range(mas, 0, ULONG_MAX); 4021 return mas_new_root(mas, wr_mas->entry); 4022 } 4023 4024 memset(&b_node, 0, sizeof(struct maple_big_node)); 4025 /* Copy l_mas and store the value in b_node. */ 4026 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4027 /* Copy r_mas into b_node. */ 4028 if (r_mas.offset <= r_wr_mas.node_end) 4029 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4030 &b_node, b_node.b_end + 1); 4031 else 4032 b_node.b_end++; 4033 4034 /* Stop spanning searches by searching for just index. */ 4035 l_mas.index = l_mas.last = mas->index; 4036 4037 mast.bn = &b_node; 4038 mast.orig_l = &l_mas; 4039 mast.orig_r = &r_mas; 4040 /* Combine l_mas and r_mas and split them up evenly again. */ 4041 return mas_spanning_rebalance(mas, &mast, height + 1); 4042 } 4043 4044 /* 4045 * mas_wr_node_store() - Attempt to store the value in a node 4046 * @wr_mas: The maple write state 4047 * 4048 * Attempts to reuse the node, but may allocate. 4049 * 4050 * Return: True if stored, false otherwise 4051 */ 4052 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) 4053 { 4054 struct ma_state *mas = wr_mas->mas; 4055 void __rcu **dst_slots; 4056 unsigned long *dst_pivots; 4057 unsigned char dst_offset; 4058 unsigned char new_end = wr_mas->node_end; 4059 unsigned char offset; 4060 unsigned char node_slots = mt_slots[wr_mas->type]; 4061 struct maple_node reuse, *newnode; 4062 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; 4063 bool in_rcu = mt_in_rcu(mas->tree); 4064 4065 offset = mas->offset; 4066 if (mas->last == wr_mas->r_max) { 4067 /* runs right to the end of the node */ 4068 if (mas->last == mas->max) 4069 new_end = offset; 4070 /* don't copy this offset */ 4071 wr_mas->offset_end++; 4072 } else if (mas->last < wr_mas->r_max) { 4073 /* new range ends in this range */ 4074 if (unlikely(wr_mas->r_max == ULONG_MAX)) 4075 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4076 4077 new_end++; 4078 } else { 4079 if (wr_mas->end_piv == mas->last) 4080 wr_mas->offset_end++; 4081 4082 new_end -= wr_mas->offset_end - offset - 1; 4083 } 4084 4085 /* new range starts within a range */ 4086 if (wr_mas->r_min < mas->index) 4087 new_end++; 4088 4089 /* Not enough room */ 4090 if (new_end >= node_slots) 4091 return false; 4092 4093 /* Not enough data. */ 4094 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4095 !(mas->mas_flags & MA_STATE_BULK)) 4096 return false; 4097 4098 /* set up node. */ 4099 if (in_rcu) { 4100 mas_node_count(mas, 1); 4101 if (mas_is_err(mas)) 4102 return false; 4103 4104 newnode = mas_pop_node(mas); 4105 } else { 4106 memset(&reuse, 0, sizeof(struct maple_node)); 4107 newnode = &reuse; 4108 } 4109 4110 newnode->parent = mas_mn(mas)->parent; 4111 dst_pivots = ma_pivots(newnode, wr_mas->type); 4112 dst_slots = ma_slots(newnode, wr_mas->type); 4113 /* Copy from start to insert point */ 4114 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); 4115 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); 4116 dst_offset = offset; 4117 4118 /* Handle insert of new range starting after old range */ 4119 if (wr_mas->r_min < mas->index) { 4120 mas->offset++; 4121 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); 4122 dst_pivots[dst_offset++] = mas->index - 1; 4123 } 4124 4125 /* Store the new entry and range end. */ 4126 if (dst_offset < max_piv) 4127 dst_pivots[dst_offset] = mas->last; 4128 mas->offset = dst_offset; 4129 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); 4130 4131 /* 4132 * this range wrote to the end of the node or it overwrote the rest of 4133 * the data 4134 */ 4135 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { 4136 new_end = dst_offset; 4137 goto done; 4138 } 4139 4140 dst_offset++; 4141 /* Copy to the end of node if necessary. */ 4142 copy_size = wr_mas->node_end - wr_mas->offset_end + 1; 4143 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, 4144 sizeof(void *) * copy_size); 4145 if (dst_offset < max_piv) { 4146 if (copy_size > max_piv - dst_offset) 4147 copy_size = max_piv - dst_offset; 4148 4149 memcpy(dst_pivots + dst_offset, 4150 wr_mas->pivots + wr_mas->offset_end, 4151 sizeof(unsigned long) * copy_size); 4152 } 4153 4154 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) 4155 dst_pivots[new_end] = mas->max; 4156 4157 done: 4158 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4159 if (in_rcu) { 4160 mas->node = mt_mk_node(newnode, wr_mas->type); 4161 mas_replace(mas, false); 4162 } else { 4163 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4164 } 4165 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4166 mas_update_gap(mas); 4167 return true; 4168 } 4169 4170 /* 4171 * mas_wr_slot_store: Attempt to store a value in a slot. 4172 * @wr_mas: the maple write state 4173 * 4174 * Return: True if stored, false otherwise 4175 */ 4176 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4177 { 4178 struct ma_state *mas = wr_mas->mas; 4179 unsigned long lmax; /* Logical max. */ 4180 unsigned char offset = mas->offset; 4181 4182 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || 4183 (offset != wr_mas->node_end))) 4184 return false; 4185 4186 if (offset == wr_mas->node_end - 1) 4187 lmax = mas->max; 4188 else 4189 lmax = wr_mas->pivots[offset + 1]; 4190 4191 /* going to overwrite too many slots. */ 4192 if (lmax < mas->last) 4193 return false; 4194 4195 if (wr_mas->r_min == mas->index) { 4196 /* overwriting two or more ranges with one. */ 4197 if (lmax == mas->last) 4198 return false; 4199 4200 /* Overwriting all of offset and a portion of offset + 1. */ 4201 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); 4202 wr_mas->pivots[offset] = mas->last; 4203 goto done; 4204 } 4205 4206 /* Doesn't end on the next range end. */ 4207 if (lmax != mas->last) 4208 return false; 4209 4210 /* Overwriting a portion of offset and all of offset + 1 */ 4211 if ((offset + 1 < mt_pivots[wr_mas->type]) && 4212 (wr_mas->entry || wr_mas->pivots[offset + 1])) 4213 wr_mas->pivots[offset + 1] = mas->last; 4214 4215 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); 4216 wr_mas->pivots[offset] = mas->index - 1; 4217 mas->offset++; /* Keep mas accurate. */ 4218 4219 done: 4220 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4221 mas_update_gap(mas); 4222 return true; 4223 } 4224 4225 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4226 { 4227 while ((wr_mas->mas->last > wr_mas->end_piv) && 4228 (wr_mas->offset_end < wr_mas->node_end)) 4229 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; 4230 4231 if (wr_mas->mas->last > wr_mas->end_piv) 4232 wr_mas->end_piv = wr_mas->mas->max; 4233 } 4234 4235 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4236 { 4237 struct ma_state *mas = wr_mas->mas; 4238 4239 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) 4240 mas->last = wr_mas->end_piv; 4241 4242 /* Check next slot(s) if we are overwriting the end */ 4243 if ((mas->last == wr_mas->end_piv) && 4244 (wr_mas->node_end != wr_mas->offset_end) && 4245 !wr_mas->slots[wr_mas->offset_end + 1]) { 4246 wr_mas->offset_end++; 4247 if (wr_mas->offset_end == wr_mas->node_end) 4248 mas->last = mas->max; 4249 else 4250 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4251 wr_mas->end_piv = mas->last; 4252 } 4253 4254 if (!wr_mas->content) { 4255 /* If this one is null, the next and prev are not */ 4256 mas->index = wr_mas->r_min; 4257 } else { 4258 /* Check prev slot if we are overwriting the start */ 4259 if (mas->index == wr_mas->r_min && mas->offset && 4260 !wr_mas->slots[mas->offset - 1]) { 4261 mas->offset--; 4262 wr_mas->r_min = mas->index = 4263 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4264 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4265 } 4266 } 4267 } 4268 4269 static inline bool mas_wr_append(struct ma_wr_state *wr_mas) 4270 { 4271 unsigned char end = wr_mas->node_end; 4272 unsigned char new_end = end + 1; 4273 struct ma_state *mas = wr_mas->mas; 4274 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4275 4276 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { 4277 if (new_end < node_pivots) 4278 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4279 4280 if (new_end < node_pivots) 4281 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4282 4283 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); 4284 mas->offset = new_end; 4285 wr_mas->pivots[end] = mas->index - 1; 4286 4287 return true; 4288 } 4289 4290 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { 4291 if (new_end < node_pivots) 4292 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4293 4294 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4295 if (new_end < node_pivots) 4296 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4297 4298 wr_mas->pivots[end] = mas->last; 4299 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4300 return true; 4301 } 4302 4303 return false; 4304 } 4305 4306 /* 4307 * mas_wr_bnode() - Slow path for a modification. 4308 * @wr_mas: The write maple state 4309 * 4310 * This is where split, rebalance end up. 4311 */ 4312 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4313 { 4314 struct maple_big_node b_node; 4315 4316 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4317 memset(&b_node, 0, sizeof(struct maple_big_node)); 4318 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4319 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4320 } 4321 4322 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4323 { 4324 unsigned char node_slots; 4325 unsigned char node_size; 4326 struct ma_state *mas = wr_mas->mas; 4327 4328 /* Direct replacement */ 4329 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4330 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4331 if (!!wr_mas->entry ^ !!wr_mas->content) 4332 mas_update_gap(mas); 4333 return; 4334 } 4335 4336 /* Attempt to append */ 4337 node_slots = mt_slots[wr_mas->type]; 4338 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; 4339 if (mas->max == ULONG_MAX) 4340 node_size++; 4341 4342 /* slot and node store will not fit, go to the slow path */ 4343 if (unlikely(node_size >= node_slots)) 4344 goto slow_path; 4345 4346 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && 4347 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { 4348 if (!wr_mas->content || !wr_mas->entry) 4349 mas_update_gap(mas); 4350 return; 4351 } 4352 4353 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) 4354 return; 4355 else if (mas_wr_node_store(wr_mas)) 4356 return; 4357 4358 if (mas_is_err(mas)) 4359 return; 4360 4361 slow_path: 4362 mas_wr_bnode(wr_mas); 4363 } 4364 4365 /* 4366 * mas_wr_store_entry() - Internal call to store a value 4367 * @mas: The maple state 4368 * @entry: The entry to store. 4369 * 4370 * Return: The contents that was stored at the index. 4371 */ 4372 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4373 { 4374 struct ma_state *mas = wr_mas->mas; 4375 4376 wr_mas->content = mas_start(mas); 4377 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4378 mas_store_root(mas, wr_mas->entry); 4379 return wr_mas->content; 4380 } 4381 4382 if (unlikely(!mas_wr_walk(wr_mas))) { 4383 mas_wr_spanning_store(wr_mas); 4384 return wr_mas->content; 4385 } 4386 4387 /* At this point, we are at the leaf node that needs to be altered. */ 4388 wr_mas->end_piv = wr_mas->r_max; 4389 mas_wr_end_piv(wr_mas); 4390 4391 if (!wr_mas->entry) 4392 mas_wr_extend_null(wr_mas); 4393 4394 /* New root for a single pointer */ 4395 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4396 mas_new_root(mas, wr_mas->entry); 4397 return wr_mas->content; 4398 } 4399 4400 mas_wr_modify(wr_mas); 4401 return wr_mas->content; 4402 } 4403 4404 /** 4405 * mas_insert() - Internal call to insert a value 4406 * @mas: The maple state 4407 * @entry: The entry to store 4408 * 4409 * Return: %NULL or the contents that already exists at the requested index 4410 * otherwise. The maple state needs to be checked for error conditions. 4411 */ 4412 static inline void *mas_insert(struct ma_state *mas, void *entry) 4413 { 4414 MA_WR_STATE(wr_mas, mas, entry); 4415 4416 /* 4417 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4418 * tree. If the insert fits exactly into an existing gap with a value 4419 * of NULL, then the slot only needs to be written with the new value. 4420 * If the range being inserted is adjacent to another range, then only a 4421 * single pivot needs to be inserted (as well as writing the entry). If 4422 * the new range is within a gap but does not touch any other ranges, 4423 * then two pivots need to be inserted: the start - 1, and the end. As 4424 * usual, the entry must be written. Most operations require a new node 4425 * to be allocated and replace an existing node to ensure RCU safety, 4426 * when in RCU mode. The exception to requiring a newly allocated node 4427 * is when inserting at the end of a node (appending). When done 4428 * carefully, appending can reuse the node in place. 4429 */ 4430 wr_mas.content = mas_start(mas); 4431 if (wr_mas.content) 4432 goto exists; 4433 4434 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4435 mas_store_root(mas, entry); 4436 return NULL; 4437 } 4438 4439 /* spanning writes always overwrite something */ 4440 if (!mas_wr_walk(&wr_mas)) 4441 goto exists; 4442 4443 /* At this point, we are at the leaf node that needs to be altered. */ 4444 wr_mas.offset_end = mas->offset; 4445 wr_mas.end_piv = wr_mas.r_max; 4446 4447 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4448 goto exists; 4449 4450 if (!entry) 4451 return NULL; 4452 4453 mas_wr_modify(&wr_mas); 4454 return wr_mas.content; 4455 4456 exists: 4457 mas_set_err(mas, -EEXIST); 4458 return wr_mas.content; 4459 4460 } 4461 4462 /* 4463 * mas_prev_node() - Find the prev non-null entry at the same level in the 4464 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4465 * @mas: The maple state 4466 * @min: The lower limit to search 4467 * 4468 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4469 * Return: 1 if the node is dead, 0 otherwise. 4470 */ 4471 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4472 { 4473 enum maple_type mt; 4474 int offset, level; 4475 void __rcu **slots; 4476 struct maple_node *node; 4477 struct maple_enode *enode; 4478 unsigned long *pivots; 4479 4480 if (mas_is_none(mas)) 4481 return 0; 4482 4483 level = 0; 4484 do { 4485 node = mas_mn(mas); 4486 if (ma_is_root(node)) 4487 goto no_entry; 4488 4489 /* Walk up. */ 4490 if (unlikely(mas_ascend(mas))) 4491 return 1; 4492 offset = mas->offset; 4493 level++; 4494 } while (!offset); 4495 4496 offset--; 4497 mt = mte_node_type(mas->node); 4498 node = mas_mn(mas); 4499 slots = ma_slots(node, mt); 4500 pivots = ma_pivots(node, mt); 4501 mas->max = pivots[offset]; 4502 if (offset) 4503 mas->min = pivots[offset - 1] + 1; 4504 if (unlikely(ma_dead_node(node))) 4505 return 1; 4506 4507 if (mas->max < min) 4508 goto no_entry_min; 4509 4510 while (level > 1) { 4511 level--; 4512 enode = mas_slot(mas, slots, offset); 4513 if (unlikely(ma_dead_node(node))) 4514 return 1; 4515 4516 mas->node = enode; 4517 mt = mte_node_type(mas->node); 4518 node = mas_mn(mas); 4519 slots = ma_slots(node, mt); 4520 pivots = ma_pivots(node, mt); 4521 offset = ma_data_end(node, mt, pivots, mas->max); 4522 if (offset) 4523 mas->min = pivots[offset - 1] + 1; 4524 4525 if (offset < mt_pivots[mt]) 4526 mas->max = pivots[offset]; 4527 4528 if (mas->max < min) 4529 goto no_entry; 4530 } 4531 4532 mas->node = mas_slot(mas, slots, offset); 4533 if (unlikely(ma_dead_node(node))) 4534 return 1; 4535 4536 mas->offset = mas_data_end(mas); 4537 if (unlikely(mte_dead_node(mas->node))) 4538 return 1; 4539 4540 return 0; 4541 4542 no_entry_min: 4543 mas->offset = offset; 4544 if (offset) 4545 mas->min = pivots[offset - 1] + 1; 4546 no_entry: 4547 if (unlikely(ma_dead_node(node))) 4548 return 1; 4549 4550 mas->node = MAS_NONE; 4551 return 0; 4552 } 4553 4554 /* 4555 * mas_next_node() - Get the next node at the same level in the tree. 4556 * @mas: The maple state 4557 * @max: The maximum pivot value to check. 4558 * 4559 * The next value will be mas->node[mas->offset] or MAS_NONE. 4560 * Return: 1 on dead node, 0 otherwise. 4561 */ 4562 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4563 unsigned long max) 4564 { 4565 unsigned long min, pivot; 4566 unsigned long *pivots; 4567 struct maple_enode *enode; 4568 int level = 0; 4569 unsigned char offset; 4570 enum maple_type mt; 4571 void __rcu **slots; 4572 4573 if (mas->max >= max) 4574 goto no_entry; 4575 4576 level = 0; 4577 do { 4578 if (ma_is_root(node)) 4579 goto no_entry; 4580 4581 min = mas->max + 1; 4582 if (min > max) 4583 goto no_entry; 4584 4585 if (unlikely(mas_ascend(mas))) 4586 return 1; 4587 4588 offset = mas->offset; 4589 level++; 4590 node = mas_mn(mas); 4591 mt = mte_node_type(mas->node); 4592 pivots = ma_pivots(node, mt); 4593 } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); 4594 4595 slots = ma_slots(node, mt); 4596 pivot = mas_safe_pivot(mas, pivots, ++offset, mt); 4597 while (unlikely(level > 1)) { 4598 /* Descend, if necessary */ 4599 enode = mas_slot(mas, slots, offset); 4600 if (unlikely(ma_dead_node(node))) 4601 return 1; 4602 4603 mas->node = enode; 4604 level--; 4605 node = mas_mn(mas); 4606 mt = mte_node_type(mas->node); 4607 slots = ma_slots(node, mt); 4608 pivots = ma_pivots(node, mt); 4609 offset = 0; 4610 pivot = pivots[0]; 4611 } 4612 4613 enode = mas_slot(mas, slots, offset); 4614 if (unlikely(ma_dead_node(node))) 4615 return 1; 4616 4617 mas->node = enode; 4618 mas->min = min; 4619 mas->max = pivot; 4620 return 0; 4621 4622 no_entry: 4623 if (unlikely(ma_dead_node(node))) 4624 return 1; 4625 4626 mas->node = MAS_NONE; 4627 return 0; 4628 } 4629 4630 /* 4631 * mas_next_nentry() - Get the next node entry 4632 * @mas: The maple state 4633 * @max: The maximum value to check 4634 * @*range_start: Pointer to store the start of the range. 4635 * 4636 * Sets @mas->offset to the offset of the next node entry, @mas->last to the 4637 * pivot of the entry. 4638 * 4639 * Return: The next entry, %NULL otherwise 4640 */ 4641 static inline void *mas_next_nentry(struct ma_state *mas, 4642 struct maple_node *node, unsigned long max, enum maple_type type) 4643 { 4644 unsigned char count; 4645 unsigned long pivot; 4646 unsigned long *pivots; 4647 void __rcu **slots; 4648 void *entry; 4649 4650 if (mas->last == mas->max) { 4651 mas->index = mas->max; 4652 return NULL; 4653 } 4654 4655 pivots = ma_pivots(node, type); 4656 slots = ma_slots(node, type); 4657 mas->index = mas_safe_min(mas, pivots, mas->offset); 4658 if (ma_dead_node(node)) 4659 return NULL; 4660 4661 if (mas->index > max) 4662 return NULL; 4663 4664 count = ma_data_end(node, type, pivots, mas->max); 4665 if (mas->offset > count) 4666 return NULL; 4667 4668 while (mas->offset < count) { 4669 pivot = pivots[mas->offset]; 4670 entry = mas_slot(mas, slots, mas->offset); 4671 if (ma_dead_node(node)) 4672 return NULL; 4673 4674 if (entry) 4675 goto found; 4676 4677 if (pivot >= max) 4678 return NULL; 4679 4680 mas->index = pivot + 1; 4681 mas->offset++; 4682 } 4683 4684 if (mas->index > mas->max) { 4685 mas->index = mas->last; 4686 return NULL; 4687 } 4688 4689 pivot = mas_safe_pivot(mas, pivots, mas->offset, type); 4690 entry = mas_slot(mas, slots, mas->offset); 4691 if (ma_dead_node(node)) 4692 return NULL; 4693 4694 if (!pivot) 4695 return NULL; 4696 4697 if (!entry) 4698 return NULL; 4699 4700 found: 4701 mas->last = pivot; 4702 return entry; 4703 } 4704 4705 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4706 { 4707 retry: 4708 mas_set(mas, index); 4709 mas_state_walk(mas); 4710 if (mas_is_start(mas)) 4711 goto retry; 4712 } 4713 4714 /* 4715 * mas_next_entry() - Internal function to get the next entry. 4716 * @mas: The maple state 4717 * @limit: The maximum range start. 4718 * 4719 * Set the @mas->node to the next entry and the range_start to 4720 * the beginning value for the entry. Does not check beyond @limit. 4721 * Sets @mas->index and @mas->last to the limit if it is hit. 4722 * Restarts on dead nodes. 4723 * 4724 * Return: the next entry or %NULL. 4725 */ 4726 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4727 { 4728 void *entry = NULL; 4729 struct maple_enode *prev_node; 4730 struct maple_node *node; 4731 unsigned char offset; 4732 unsigned long last; 4733 enum maple_type mt; 4734 4735 last = mas->last; 4736 retry: 4737 offset = mas->offset; 4738 prev_node = mas->node; 4739 node = mas_mn(mas); 4740 mt = mte_node_type(mas->node); 4741 mas->offset++; 4742 if (unlikely(mas->offset >= mt_slots[mt])) { 4743 mas->offset = mt_slots[mt] - 1; 4744 goto next_node; 4745 } 4746 4747 while (!mas_is_none(mas)) { 4748 entry = mas_next_nentry(mas, node, limit, mt); 4749 if (unlikely(ma_dead_node(node))) { 4750 mas_rewalk(mas, last); 4751 goto retry; 4752 } 4753 4754 if (likely(entry)) 4755 return entry; 4756 4757 if (unlikely((mas->index > limit))) 4758 break; 4759 4760 next_node: 4761 prev_node = mas->node; 4762 offset = mas->offset; 4763 if (unlikely(mas_next_node(mas, node, limit))) { 4764 mas_rewalk(mas, last); 4765 goto retry; 4766 } 4767 mas->offset = 0; 4768 node = mas_mn(mas); 4769 mt = mte_node_type(mas->node); 4770 } 4771 4772 mas->index = mas->last = limit; 4773 mas->offset = offset; 4774 mas->node = prev_node; 4775 return NULL; 4776 } 4777 4778 /* 4779 * mas_prev_nentry() - Get the previous node entry. 4780 * @mas: The maple state. 4781 * @limit: The lower limit to check for a value. 4782 * 4783 * Return: the entry, %NULL otherwise. 4784 */ 4785 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, 4786 unsigned long index) 4787 { 4788 unsigned long pivot, min; 4789 unsigned char offset; 4790 struct maple_node *mn; 4791 enum maple_type mt; 4792 unsigned long *pivots; 4793 void __rcu **slots; 4794 void *entry; 4795 4796 retry: 4797 if (!mas->offset) 4798 return NULL; 4799 4800 mn = mas_mn(mas); 4801 mt = mte_node_type(mas->node); 4802 offset = mas->offset - 1; 4803 if (offset >= mt_slots[mt]) 4804 offset = mt_slots[mt] - 1; 4805 4806 slots = ma_slots(mn, mt); 4807 pivots = ma_pivots(mn, mt); 4808 if (offset == mt_pivots[mt]) 4809 pivot = mas->max; 4810 else 4811 pivot = pivots[offset]; 4812 4813 if (unlikely(ma_dead_node(mn))) { 4814 mas_rewalk(mas, index); 4815 goto retry; 4816 } 4817 4818 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || 4819 !pivot)) 4820 pivot = pivots[--offset]; 4821 4822 min = mas_safe_min(mas, pivots, offset); 4823 entry = mas_slot(mas, slots, offset); 4824 if (unlikely(ma_dead_node(mn))) { 4825 mas_rewalk(mas, index); 4826 goto retry; 4827 } 4828 4829 if (likely(entry)) { 4830 mas->offset = offset; 4831 mas->last = pivot; 4832 mas->index = min; 4833 } 4834 return entry; 4835 } 4836 4837 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) 4838 { 4839 void *entry; 4840 4841 retry: 4842 while (likely(!mas_is_none(mas))) { 4843 entry = mas_prev_nentry(mas, min, mas->index); 4844 if (unlikely(mas->last < min)) 4845 goto not_found; 4846 4847 if (likely(entry)) 4848 return entry; 4849 4850 if (unlikely(mas_prev_node(mas, min))) { 4851 mas_rewalk(mas, mas->index); 4852 goto retry; 4853 } 4854 4855 mas->offset++; 4856 } 4857 4858 mas->offset--; 4859 not_found: 4860 mas->index = mas->last = min; 4861 return NULL; 4862 } 4863 4864 /* 4865 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4866 * highest gap address of a given size in a given node and descend. 4867 * @mas: The maple state 4868 * @size: The needed size. 4869 * 4870 * Return: True if found in a leaf, false otherwise. 4871 * 4872 */ 4873 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) 4874 { 4875 enum maple_type type = mte_node_type(mas->node); 4876 struct maple_node *node = mas_mn(mas); 4877 unsigned long *pivots, *gaps; 4878 void __rcu **slots; 4879 unsigned long gap = 0; 4880 unsigned long max, min; 4881 unsigned char offset; 4882 4883 if (unlikely(mas_is_err(mas))) 4884 return true; 4885 4886 if (ma_is_dense(type)) { 4887 /* dense nodes. */ 4888 mas->offset = (unsigned char)(mas->index - mas->min); 4889 return true; 4890 } 4891 4892 pivots = ma_pivots(node, type); 4893 slots = ma_slots(node, type); 4894 gaps = ma_gaps(node, type); 4895 offset = mas->offset; 4896 min = mas_safe_min(mas, pivots, offset); 4897 /* Skip out of bounds. */ 4898 while (mas->last < min) 4899 min = mas_safe_min(mas, pivots, --offset); 4900 4901 max = mas_safe_pivot(mas, pivots, offset, type); 4902 while (mas->index <= max) { 4903 gap = 0; 4904 if (gaps) 4905 gap = gaps[offset]; 4906 else if (!mas_slot(mas, slots, offset)) 4907 gap = max - min + 1; 4908 4909 if (gap) { 4910 if ((size <= gap) && (size <= mas->last - min + 1)) 4911 break; 4912 4913 if (!gaps) { 4914 /* Skip the next slot, it cannot be a gap. */ 4915 if (offset < 2) 4916 goto ascend; 4917 4918 offset -= 2; 4919 max = pivots[offset]; 4920 min = mas_safe_min(mas, pivots, offset); 4921 continue; 4922 } 4923 } 4924 4925 if (!offset) 4926 goto ascend; 4927 4928 offset--; 4929 max = min - 1; 4930 min = mas_safe_min(mas, pivots, offset); 4931 } 4932 4933 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4934 goto no_space; 4935 4936 if (unlikely(ma_is_leaf(type))) { 4937 mas->offset = offset; 4938 mas->min = min; 4939 mas->max = min + gap - 1; 4940 return true; 4941 } 4942 4943 /* descend, only happens under lock. */ 4944 mas->node = mas_slot(mas, slots, offset); 4945 mas->min = min; 4946 mas->max = max; 4947 mas->offset = mas_data_end(mas); 4948 return false; 4949 4950 ascend: 4951 if (!mte_is_root(mas->node)) 4952 return false; 4953 4954 no_space: 4955 mas_set_err(mas, -EBUSY); 4956 return false; 4957 } 4958 4959 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4960 { 4961 enum maple_type type = mte_node_type(mas->node); 4962 unsigned long pivot, min, gap = 0; 4963 unsigned char offset; 4964 unsigned long *gaps; 4965 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 4966 void __rcu **slots = ma_slots(mas_mn(mas), type); 4967 bool found = false; 4968 4969 if (ma_is_dense(type)) { 4970 mas->offset = (unsigned char)(mas->index - mas->min); 4971 return true; 4972 } 4973 4974 gaps = ma_gaps(mte_to_node(mas->node), type); 4975 offset = mas->offset; 4976 min = mas_safe_min(mas, pivots, offset); 4977 for (; offset < mt_slots[type]; offset++) { 4978 pivot = mas_safe_pivot(mas, pivots, offset, type); 4979 if (offset && !pivot) 4980 break; 4981 4982 /* Not within lower bounds */ 4983 if (mas->index > pivot) 4984 goto next_slot; 4985 4986 if (gaps) 4987 gap = gaps[offset]; 4988 else if (!mas_slot(mas, slots, offset)) 4989 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 4990 else 4991 goto next_slot; 4992 4993 if (gap >= size) { 4994 if (ma_is_leaf(type)) { 4995 found = true; 4996 goto done; 4997 } 4998 if (mas->index <= pivot) { 4999 mas->node = mas_slot(mas, slots, offset); 5000 mas->min = min; 5001 mas->max = pivot; 5002 offset = 0; 5003 break; 5004 } 5005 } 5006 next_slot: 5007 min = pivot + 1; 5008 if (mas->last <= pivot) { 5009 mas_set_err(mas, -EBUSY); 5010 return true; 5011 } 5012 } 5013 5014 if (mte_is_root(mas->node)) 5015 found = true; 5016 done: 5017 mas->offset = offset; 5018 return found; 5019 } 5020 5021 /** 5022 * mas_walk() - Search for @mas->index in the tree. 5023 * @mas: The maple state. 5024 * 5025 * mas->index and mas->last will be set to the range if there is a value. If 5026 * mas->node is MAS_NONE, reset to MAS_START. 5027 * 5028 * Return: the entry at the location or %NULL. 5029 */ 5030 void *mas_walk(struct ma_state *mas) 5031 { 5032 void *entry; 5033 5034 retry: 5035 entry = mas_state_walk(mas); 5036 if (mas_is_start(mas)) 5037 goto retry; 5038 5039 if (mas_is_ptr(mas)) { 5040 if (!mas->index) { 5041 mas->last = 0; 5042 } else { 5043 mas->index = 1; 5044 mas->last = ULONG_MAX; 5045 } 5046 return entry; 5047 } 5048 5049 if (mas_is_none(mas)) { 5050 mas->index = 0; 5051 mas->last = ULONG_MAX; 5052 } 5053 5054 return entry; 5055 } 5056 EXPORT_SYMBOL_GPL(mas_walk); 5057 5058 static inline bool mas_rewind_node(struct ma_state *mas) 5059 { 5060 unsigned char slot; 5061 5062 do { 5063 if (mte_is_root(mas->node)) { 5064 slot = mas->offset; 5065 if (!slot) 5066 return false; 5067 } else { 5068 mas_ascend(mas); 5069 slot = mas->offset; 5070 } 5071 } while (!slot); 5072 5073 mas->offset = --slot; 5074 return true; 5075 } 5076 5077 /* 5078 * mas_skip_node() - Internal function. Skip over a node. 5079 * @mas: The maple state. 5080 * 5081 * Return: true if there is another node, false otherwise. 5082 */ 5083 static inline bool mas_skip_node(struct ma_state *mas) 5084 { 5085 unsigned char slot, slot_count; 5086 unsigned long *pivots; 5087 enum maple_type mt; 5088 5089 mt = mte_node_type(mas->node); 5090 slot_count = mt_slots[mt] - 1; 5091 do { 5092 if (mte_is_root(mas->node)) { 5093 slot = mas->offset; 5094 if (slot > slot_count) { 5095 mas_set_err(mas, -EBUSY); 5096 return false; 5097 } 5098 } else { 5099 mas_ascend(mas); 5100 slot = mas->offset; 5101 mt = mte_node_type(mas->node); 5102 slot_count = mt_slots[mt] - 1; 5103 } 5104 } while (slot > slot_count); 5105 5106 mas->offset = ++slot; 5107 pivots = ma_pivots(mas_mn(mas), mt); 5108 if (slot > 0) 5109 mas->min = pivots[slot - 1] + 1; 5110 5111 if (slot <= slot_count) 5112 mas->max = pivots[slot]; 5113 5114 return true; 5115 } 5116 5117 /* 5118 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5119 * @size 5120 * @mas: The maple state 5121 * @size: The size of the gap required 5122 * 5123 * Search between @mas->index and @mas->last for a gap of @size. 5124 */ 5125 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5126 { 5127 struct maple_enode *last = NULL; 5128 5129 /* 5130 * There are 4 options: 5131 * go to child (descend) 5132 * go back to parent (ascend) 5133 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5134 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5135 */ 5136 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5137 if (last == mas->node) 5138 mas_skip_node(mas); 5139 else 5140 last = mas->node; 5141 } 5142 } 5143 5144 /* 5145 * mas_fill_gap() - Fill a located gap with @entry. 5146 * @mas: The maple state 5147 * @entry: The value to store 5148 * @slot: The offset into the node to store the @entry 5149 * @size: The size of the entry 5150 * @index: The start location 5151 */ 5152 static inline void mas_fill_gap(struct ma_state *mas, void *entry, 5153 unsigned char slot, unsigned long size, unsigned long *index) 5154 { 5155 MA_WR_STATE(wr_mas, mas, entry); 5156 unsigned char pslot = mte_parent_slot(mas->node); 5157 struct maple_enode *mn = mas->node; 5158 unsigned long *pivots; 5159 enum maple_type ptype; 5160 /* 5161 * mas->index is the start address for the search 5162 * which may no longer be needed. 5163 * mas->last is the end address for the search 5164 */ 5165 5166 *index = mas->index; 5167 mas->last = mas->index + size - 1; 5168 5169 /* 5170 * It is possible that using mas->max and mas->min to correctly 5171 * calculate the index and last will cause an issue in the gap 5172 * calculation, so fix the ma_state here 5173 */ 5174 mas_ascend(mas); 5175 ptype = mte_node_type(mas->node); 5176 pivots = ma_pivots(mas_mn(mas), ptype); 5177 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); 5178 mas->min = mas_safe_min(mas, pivots, pslot); 5179 mas->node = mn; 5180 mas->offset = slot; 5181 mas_wr_store_entry(&wr_mas); 5182 } 5183 5184 /* 5185 * mas_sparse_area() - Internal function. Return upper or lower limit when 5186 * searching for a gap in an empty tree. 5187 * @mas: The maple state 5188 * @min: the minimum range 5189 * @max: The maximum range 5190 * @size: The size of the gap 5191 * @fwd: Searching forward or back 5192 */ 5193 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, 5194 unsigned long max, unsigned long size, bool fwd) 5195 { 5196 unsigned long start = 0; 5197 5198 if (!unlikely(mas_is_none(mas))) 5199 start++; 5200 /* mas_is_ptr */ 5201 5202 if (start < min) 5203 start = min; 5204 5205 if (fwd) { 5206 mas->index = start; 5207 mas->last = start + size - 1; 5208 return; 5209 } 5210 5211 mas->index = max; 5212 } 5213 5214 /* 5215 * mas_empty_area() - Get the lowest address within the range that is 5216 * sufficient for the size requested. 5217 * @mas: The maple state 5218 * @min: The lowest value of the range 5219 * @max: The highest value of the range 5220 * @size: The size needed 5221 */ 5222 int mas_empty_area(struct ma_state *mas, unsigned long min, 5223 unsigned long max, unsigned long size) 5224 { 5225 unsigned char offset; 5226 unsigned long *pivots; 5227 enum maple_type mt; 5228 5229 if (mas_is_start(mas)) 5230 mas_start(mas); 5231 else if (mas->offset >= 2) 5232 mas->offset -= 2; 5233 else if (!mas_skip_node(mas)) 5234 return -EBUSY; 5235 5236 /* Empty set */ 5237 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5238 mas_sparse_area(mas, min, max, size, true); 5239 return 0; 5240 } 5241 5242 /* The start of the window can only be within these values */ 5243 mas->index = min; 5244 mas->last = max; 5245 mas_awalk(mas, size); 5246 5247 if (unlikely(mas_is_err(mas))) 5248 return xa_err(mas->node); 5249 5250 offset = mas->offset; 5251 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5252 return -EBUSY; 5253 5254 mt = mte_node_type(mas->node); 5255 pivots = ma_pivots(mas_mn(mas), mt); 5256 if (offset) 5257 mas->min = pivots[offset - 1] + 1; 5258 5259 if (offset < mt_pivots[mt]) 5260 mas->max = pivots[offset]; 5261 5262 if (mas->index < mas->min) 5263 mas->index = mas->min; 5264 5265 mas->last = mas->index + size - 1; 5266 return 0; 5267 } 5268 EXPORT_SYMBOL_GPL(mas_empty_area); 5269 5270 /* 5271 * mas_empty_area_rev() - Get the highest address within the range that is 5272 * sufficient for the size requested. 5273 * @mas: The maple state 5274 * @min: The lowest value of the range 5275 * @max: The highest value of the range 5276 * @size: The size needed 5277 */ 5278 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5279 unsigned long max, unsigned long size) 5280 { 5281 struct maple_enode *last = mas->node; 5282 5283 if (mas_is_start(mas)) { 5284 mas_start(mas); 5285 mas->offset = mas_data_end(mas); 5286 } else if (mas->offset >= 2) { 5287 mas->offset -= 2; 5288 } else if (!mas_rewind_node(mas)) { 5289 return -EBUSY; 5290 } 5291 5292 /* Empty set. */ 5293 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5294 mas_sparse_area(mas, min, max, size, false); 5295 return 0; 5296 } 5297 5298 /* The start of the window can only be within these values. */ 5299 mas->index = min; 5300 mas->last = max; 5301 5302 while (!mas_rev_awalk(mas, size)) { 5303 if (last == mas->node) { 5304 if (!mas_rewind_node(mas)) 5305 return -EBUSY; 5306 } else { 5307 last = mas->node; 5308 } 5309 } 5310 5311 if (mas_is_err(mas)) 5312 return xa_err(mas->node); 5313 5314 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5315 return -EBUSY; 5316 5317 /* 5318 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If 5319 * the maximum is outside the window we are searching, then use the last 5320 * location in the search. 5321 * mas->max and mas->min is the range of the gap. 5322 * mas->index and mas->last are currently set to the search range. 5323 */ 5324 5325 /* Trim the upper limit to the max. */ 5326 if (mas->max <= mas->last) 5327 mas->last = mas->max; 5328 5329 mas->index = mas->last - size + 1; 5330 return 0; 5331 } 5332 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5333 5334 static inline int mas_alloc(struct ma_state *mas, void *entry, 5335 unsigned long size, unsigned long *index) 5336 { 5337 unsigned long min; 5338 5339 mas_start(mas); 5340 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5341 mas_root_expand(mas, entry); 5342 if (mas_is_err(mas)) 5343 return xa_err(mas->node); 5344 5345 if (!mas->index) 5346 return mte_pivot(mas->node, 0); 5347 return mte_pivot(mas->node, 1); 5348 } 5349 5350 /* Must be walking a tree. */ 5351 mas_awalk(mas, size); 5352 if (mas_is_err(mas)) 5353 return xa_err(mas->node); 5354 5355 if (mas->offset == MAPLE_NODE_SLOTS) 5356 goto no_gap; 5357 5358 /* 5359 * At this point, mas->node points to the right node and we have an 5360 * offset that has a sufficient gap. 5361 */ 5362 min = mas->min; 5363 if (mas->offset) 5364 min = mte_pivot(mas->node, mas->offset - 1) + 1; 5365 5366 if (mas->index < min) 5367 mas->index = min; 5368 5369 mas_fill_gap(mas, entry, mas->offset, size, index); 5370 return 0; 5371 5372 no_gap: 5373 return -EBUSY; 5374 } 5375 5376 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, 5377 unsigned long max, void *entry, 5378 unsigned long size, unsigned long *index) 5379 { 5380 int ret = 0; 5381 5382 ret = mas_empty_area_rev(mas, min, max, size); 5383 if (ret) 5384 return ret; 5385 5386 if (mas_is_err(mas)) 5387 return xa_err(mas->node); 5388 5389 if (mas->offset == MAPLE_NODE_SLOTS) 5390 goto no_gap; 5391 5392 mas_fill_gap(mas, entry, mas->offset, size, index); 5393 return 0; 5394 5395 no_gap: 5396 return -EBUSY; 5397 } 5398 5399 /* 5400 * mas_dead_leaves() - Mark all leaves of a node as dead. 5401 * @mas: The maple state 5402 * @slots: Pointer to the slot array 5403 * 5404 * Must hold the write lock. 5405 * 5406 * Return: The number of leaves marked as dead. 5407 */ 5408 static inline 5409 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots) 5410 { 5411 struct maple_node *node; 5412 enum maple_type type; 5413 void *entry; 5414 int offset; 5415 5416 for (offset = 0; offset < mt_slot_count(mas->node); offset++) { 5417 entry = mas_slot_locked(mas, slots, offset); 5418 type = mte_node_type(entry); 5419 node = mte_to_node(entry); 5420 /* Use both node and type to catch LE & BE metadata */ 5421 if (!node || !type) 5422 break; 5423 5424 mte_set_node_dead(entry); 5425 smp_wmb(); /* Needed for RCU */ 5426 node->type = type; 5427 rcu_assign_pointer(slots[offset], node); 5428 } 5429 5430 return offset; 5431 } 5432 5433 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset) 5434 { 5435 struct maple_node *node, *next; 5436 void __rcu **slots = NULL; 5437 5438 next = mas_mn(mas); 5439 do { 5440 mas->node = ma_enode_ptr(next); 5441 node = mas_mn(mas); 5442 slots = ma_slots(node, node->type); 5443 next = mas_slot_locked(mas, slots, offset); 5444 offset = 0; 5445 } while (!ma_is_leaf(next->type)); 5446 5447 return slots; 5448 } 5449 5450 static void mt_free_walk(struct rcu_head *head) 5451 { 5452 void __rcu **slots; 5453 struct maple_node *node, *start; 5454 struct maple_tree mt; 5455 unsigned char offset; 5456 enum maple_type type; 5457 MA_STATE(mas, &mt, 0, 0); 5458 5459 node = container_of(head, struct maple_node, rcu); 5460 5461 if (ma_is_leaf(node->type)) 5462 goto free_leaf; 5463 5464 mt_init_flags(&mt, node->ma_flags); 5465 mas_lock(&mas); 5466 start = node; 5467 mas.node = mt_mk_node(node, node->type); 5468 slots = mas_dead_walk(&mas, 0); 5469 node = mas_mn(&mas); 5470 do { 5471 mt_free_bulk(node->slot_len, slots); 5472 offset = node->parent_slot + 1; 5473 mas.node = node->piv_parent; 5474 if (mas_mn(&mas) == node) 5475 goto start_slots_free; 5476 5477 type = mte_node_type(mas.node); 5478 slots = ma_slots(mte_to_node(mas.node), type); 5479 if ((offset < mt_slots[type]) && (slots[offset])) 5480 slots = mas_dead_walk(&mas, offset); 5481 5482 node = mas_mn(&mas); 5483 } while ((node != start) || (node->slot_len < offset)); 5484 5485 slots = ma_slots(node, node->type); 5486 mt_free_bulk(node->slot_len, slots); 5487 5488 start_slots_free: 5489 mas_unlock(&mas); 5490 free_leaf: 5491 mt_free_rcu(&node->rcu); 5492 } 5493 5494 static inline void __rcu **mas_destroy_descend(struct ma_state *mas, 5495 struct maple_enode *prev, unsigned char offset) 5496 { 5497 struct maple_node *node; 5498 struct maple_enode *next = mas->node; 5499 void __rcu **slots = NULL; 5500 5501 do { 5502 mas->node = next; 5503 node = mas_mn(mas); 5504 slots = ma_slots(node, mte_node_type(mas->node)); 5505 next = mas_slot_locked(mas, slots, 0); 5506 if ((mte_dead_node(next))) 5507 next = mas_slot_locked(mas, slots, 1); 5508 5509 mte_set_node_dead(mas->node); 5510 node->type = mte_node_type(mas->node); 5511 node->piv_parent = prev; 5512 node->parent_slot = offset; 5513 offset = 0; 5514 prev = mas->node; 5515 } while (!mte_is_leaf(next)); 5516 5517 return slots; 5518 } 5519 5520 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags, 5521 bool free) 5522 { 5523 void __rcu **slots; 5524 struct maple_node *node = mte_to_node(enode); 5525 struct maple_enode *start; 5526 struct maple_tree mt; 5527 5528 MA_STATE(mas, &mt, 0, 0); 5529 5530 if (mte_is_leaf(enode)) 5531 goto free_leaf; 5532 5533 mt_init_flags(&mt, ma_flags); 5534 mas_lock(&mas); 5535 5536 mas.node = start = enode; 5537 slots = mas_destroy_descend(&mas, start, 0); 5538 node = mas_mn(&mas); 5539 do { 5540 enum maple_type type; 5541 unsigned char offset; 5542 struct maple_enode *parent, *tmp; 5543 5544 node->slot_len = mas_dead_leaves(&mas, slots); 5545 if (free) 5546 mt_free_bulk(node->slot_len, slots); 5547 offset = node->parent_slot + 1; 5548 mas.node = node->piv_parent; 5549 if (mas_mn(&mas) == node) 5550 goto start_slots_free; 5551 5552 type = mte_node_type(mas.node); 5553 slots = ma_slots(mte_to_node(mas.node), type); 5554 if (offset >= mt_slots[type]) 5555 goto next; 5556 5557 tmp = mas_slot_locked(&mas, slots, offset); 5558 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5559 parent = mas.node; 5560 mas.node = tmp; 5561 slots = mas_destroy_descend(&mas, parent, offset); 5562 } 5563 next: 5564 node = mas_mn(&mas); 5565 } while (start != mas.node); 5566 5567 node = mas_mn(&mas); 5568 node->slot_len = mas_dead_leaves(&mas, slots); 5569 if (free) 5570 mt_free_bulk(node->slot_len, slots); 5571 5572 start_slots_free: 5573 mas_unlock(&mas); 5574 5575 free_leaf: 5576 if (free) 5577 mt_free_rcu(&node->rcu); 5578 } 5579 5580 /* 5581 * mte_destroy_walk() - Free a tree or sub-tree. 5582 * @enode: the encoded maple node (maple_enode) to start 5583 * @mt: the tree to free - needed for node types. 5584 * 5585 * Must hold the write lock. 5586 */ 5587 static inline void mte_destroy_walk(struct maple_enode *enode, 5588 struct maple_tree *mt) 5589 { 5590 struct maple_node *node = mte_to_node(enode); 5591 5592 if (mt_in_rcu(mt)) { 5593 mt_destroy_walk(enode, mt->ma_flags, false); 5594 call_rcu(&node->rcu, mt_free_walk); 5595 } else { 5596 mt_destroy_walk(enode, mt->ma_flags, true); 5597 } 5598 } 5599 5600 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5601 { 5602 if (!mas_is_start(wr_mas->mas)) { 5603 if (mas_is_none(wr_mas->mas)) { 5604 mas_reset(wr_mas->mas); 5605 } else { 5606 wr_mas->r_max = wr_mas->mas->max; 5607 wr_mas->type = mte_node_type(wr_mas->mas->node); 5608 if (mas_is_span_wr(wr_mas)) 5609 mas_reset(wr_mas->mas); 5610 } 5611 } 5612 } 5613 5614 /* Interface */ 5615 5616 /** 5617 * mas_store() - Store an @entry. 5618 * @mas: The maple state. 5619 * @entry: The entry to store. 5620 * 5621 * The @mas->index and @mas->last is used to set the range for the @entry. 5622 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5623 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5624 * 5625 * Return: the first entry between mas->index and mas->last or %NULL. 5626 */ 5627 void *mas_store(struct ma_state *mas, void *entry) 5628 { 5629 MA_WR_STATE(wr_mas, mas, entry); 5630 5631 trace_ma_write(__func__, mas, 0, entry); 5632 #ifdef CONFIG_DEBUG_MAPLE_TREE 5633 if (mas->index > mas->last) 5634 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); 5635 MT_BUG_ON(mas->tree, mas->index > mas->last); 5636 if (mas->index > mas->last) { 5637 mas_set_err(mas, -EINVAL); 5638 return NULL; 5639 } 5640 5641 #endif 5642 5643 /* 5644 * Storing is the same operation as insert with the added caveat that it 5645 * can overwrite entries. Although this seems simple enough, one may 5646 * want to examine what happens if a single store operation was to 5647 * overwrite multiple entries within a self-balancing B-Tree. 5648 */ 5649 mas_wr_store_setup(&wr_mas); 5650 mas_wr_store_entry(&wr_mas); 5651 return wr_mas.content; 5652 } 5653 EXPORT_SYMBOL_GPL(mas_store); 5654 5655 /** 5656 * mas_store_gfp() - Store a value into the tree. 5657 * @mas: The maple state 5658 * @entry: The entry to store 5659 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5660 * 5661 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5662 * be allocated. 5663 */ 5664 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5665 { 5666 MA_WR_STATE(wr_mas, mas, entry); 5667 5668 mas_wr_store_setup(&wr_mas); 5669 trace_ma_write(__func__, mas, 0, entry); 5670 retry: 5671 mas_wr_store_entry(&wr_mas); 5672 if (unlikely(mas_nomem(mas, gfp))) 5673 goto retry; 5674 5675 if (unlikely(mas_is_err(mas))) 5676 return xa_err(mas->node); 5677 5678 return 0; 5679 } 5680 EXPORT_SYMBOL_GPL(mas_store_gfp); 5681 5682 /** 5683 * mas_store_prealloc() - Store a value into the tree using memory 5684 * preallocated in the maple state. 5685 * @mas: The maple state 5686 * @entry: The entry to store. 5687 */ 5688 void mas_store_prealloc(struct ma_state *mas, void *entry) 5689 { 5690 MA_WR_STATE(wr_mas, mas, entry); 5691 5692 mas_wr_store_setup(&wr_mas); 5693 trace_ma_write(__func__, mas, 0, entry); 5694 mas_wr_store_entry(&wr_mas); 5695 BUG_ON(mas_is_err(mas)); 5696 mas_destroy(mas); 5697 } 5698 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5699 5700 /** 5701 * mas_preallocate() - Preallocate enough nodes for a store operation 5702 * @mas: The maple state 5703 * @gfp: The GFP_FLAGS to use for allocations. 5704 * 5705 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5706 */ 5707 int mas_preallocate(struct ma_state *mas, gfp_t gfp) 5708 { 5709 int ret; 5710 5711 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5712 mas->mas_flags |= MA_STATE_PREALLOC; 5713 if (likely(!mas_is_err(mas))) 5714 return 0; 5715 5716 mas_set_alloc_req(mas, 0); 5717 ret = xa_err(mas->node); 5718 mas_reset(mas); 5719 mas_destroy(mas); 5720 mas_reset(mas); 5721 return ret; 5722 } 5723 5724 /* 5725 * mas_destroy() - destroy a maple state. 5726 * @mas: The maple state 5727 * 5728 * Upon completion, check the left-most node and rebalance against the node to 5729 * the right if necessary. Frees any allocated nodes associated with this maple 5730 * state. 5731 */ 5732 void mas_destroy(struct ma_state *mas) 5733 { 5734 struct maple_alloc *node; 5735 unsigned long total; 5736 5737 /* 5738 * When using mas_for_each() to insert an expected number of elements, 5739 * it is possible that the number inserted is less than the expected 5740 * number. To fix an invalid final node, a check is performed here to 5741 * rebalance the previous node with the final node. 5742 */ 5743 if (mas->mas_flags & MA_STATE_REBALANCE) { 5744 unsigned char end; 5745 5746 if (mas_is_start(mas)) 5747 mas_start(mas); 5748 5749 mtree_range_walk(mas); 5750 end = mas_data_end(mas) + 1; 5751 if (end < mt_min_slot_count(mas->node) - 1) 5752 mas_destroy_rebalance(mas, end); 5753 5754 mas->mas_flags &= ~MA_STATE_REBALANCE; 5755 } 5756 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5757 5758 total = mas_allocated(mas); 5759 while (total) { 5760 node = mas->alloc; 5761 mas->alloc = node->slot[0]; 5762 if (node->node_count > 1) { 5763 size_t count = node->node_count - 1; 5764 5765 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5766 total -= count; 5767 } 5768 kmem_cache_free(maple_node_cache, node); 5769 total--; 5770 } 5771 5772 mas->alloc = NULL; 5773 } 5774 EXPORT_SYMBOL_GPL(mas_destroy); 5775 5776 /* 5777 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5778 * @mas: The maple state 5779 * @nr_entries: The number of expected entries. 5780 * 5781 * This will attempt to pre-allocate enough nodes to store the expected number 5782 * of entries. The allocations will occur using the bulk allocator interface 5783 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5784 * to ensure any unused nodes are freed. 5785 * 5786 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5787 */ 5788 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5789 { 5790 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5791 struct maple_enode *enode = mas->node; 5792 int nr_nodes; 5793 int ret; 5794 5795 /* 5796 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5797 * forking a process and duplicating the VMAs from one tree to a new 5798 * tree. When such a situation arises, it is known that the new tree is 5799 * not going to be used until the entire tree is populated. For 5800 * performance reasons, it is best to use a bulk load with RCU disabled. 5801 * This allows for optimistic splitting that favours the left and reuse 5802 * of nodes during the operation. 5803 */ 5804 5805 /* Optimize splitting for bulk insert in-order */ 5806 mas->mas_flags |= MA_STATE_BULK; 5807 5808 /* 5809 * Avoid overflow, assume a gap between each entry and a trailing null. 5810 * If this is wrong, it just means allocation can happen during 5811 * insertion of entries. 5812 */ 5813 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5814 if (!mt_is_alloc(mas->tree)) 5815 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5816 5817 /* Leaves; reduce slots to keep space for expansion */ 5818 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5819 /* Internal nodes */ 5820 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5821 /* Add working room for split (2 nodes) + new parents */ 5822 mas_node_count(mas, nr_nodes + 3); 5823 5824 /* Detect if allocations run out */ 5825 mas->mas_flags |= MA_STATE_PREALLOC; 5826 5827 if (!mas_is_err(mas)) 5828 return 0; 5829 5830 ret = xa_err(mas->node); 5831 mas->node = enode; 5832 mas_destroy(mas); 5833 return ret; 5834 5835 } 5836 EXPORT_SYMBOL_GPL(mas_expected_entries); 5837 5838 /** 5839 * mas_next() - Get the next entry. 5840 * @mas: The maple state 5841 * @max: The maximum index to check. 5842 * 5843 * Returns the next entry after @mas->index. 5844 * Must hold rcu_read_lock or the write lock. 5845 * Can return the zero entry. 5846 * 5847 * Return: The next entry or %NULL 5848 */ 5849 void *mas_next(struct ma_state *mas, unsigned long max) 5850 { 5851 if (mas_is_none(mas) || mas_is_paused(mas)) 5852 mas->node = MAS_START; 5853 5854 if (mas_is_start(mas)) 5855 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5856 5857 if (mas_is_ptr(mas)) { 5858 if (!mas->index) { 5859 mas->index = 1; 5860 mas->last = ULONG_MAX; 5861 } 5862 return NULL; 5863 } 5864 5865 if (mas->last == ULONG_MAX) 5866 return NULL; 5867 5868 /* Retries on dead nodes handled by mas_next_entry */ 5869 return mas_next_entry(mas, max); 5870 } 5871 EXPORT_SYMBOL_GPL(mas_next); 5872 5873 /** 5874 * mt_next() - get the next value in the maple tree 5875 * @mt: The maple tree 5876 * @index: The start index 5877 * @max: The maximum index to check 5878 * 5879 * Return: The entry at @index or higher, or %NULL if nothing is found. 5880 */ 5881 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5882 { 5883 void *entry = NULL; 5884 MA_STATE(mas, mt, index, index); 5885 5886 rcu_read_lock(); 5887 entry = mas_next(&mas, max); 5888 rcu_read_unlock(); 5889 return entry; 5890 } 5891 EXPORT_SYMBOL_GPL(mt_next); 5892 5893 /** 5894 * mas_prev() - Get the previous entry 5895 * @mas: The maple state 5896 * @min: The minimum value to check. 5897 * 5898 * Must hold rcu_read_lock or the write lock. 5899 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5900 * searchable nodes. 5901 * 5902 * Return: the previous value or %NULL. 5903 */ 5904 void *mas_prev(struct ma_state *mas, unsigned long min) 5905 { 5906 if (!mas->index) { 5907 /* Nothing comes before 0 */ 5908 mas->last = 0; 5909 return NULL; 5910 } 5911 5912 if (unlikely(mas_is_ptr(mas))) 5913 return NULL; 5914 5915 if (mas_is_none(mas) || mas_is_paused(mas)) 5916 mas->node = MAS_START; 5917 5918 if (mas_is_start(mas)) { 5919 mas_walk(mas); 5920 if (!mas->index) 5921 return NULL; 5922 } 5923 5924 if (mas_is_ptr(mas)) { 5925 if (!mas->index) { 5926 mas->last = 0; 5927 return NULL; 5928 } 5929 5930 mas->index = mas->last = 0; 5931 return mas_root_locked(mas); 5932 } 5933 return mas_prev_entry(mas, min); 5934 } 5935 EXPORT_SYMBOL_GPL(mas_prev); 5936 5937 /** 5938 * mt_prev() - get the previous value in the maple tree 5939 * @mt: The maple tree 5940 * @index: The start index 5941 * @min: The minimum index to check 5942 * 5943 * Return: The entry at @index or lower, or %NULL if nothing is found. 5944 */ 5945 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5946 { 5947 void *entry = NULL; 5948 MA_STATE(mas, mt, index, index); 5949 5950 rcu_read_lock(); 5951 entry = mas_prev(&mas, min); 5952 rcu_read_unlock(); 5953 return entry; 5954 } 5955 EXPORT_SYMBOL_GPL(mt_prev); 5956 5957 /** 5958 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5959 * @mas: The maple state to pause 5960 * 5961 * Some users need to pause a walk and drop the lock they're holding in 5962 * order to yield to a higher priority thread or carry out an operation 5963 * on an entry. Those users should call this function before they drop 5964 * the lock. It resets the @mas to be suitable for the next iteration 5965 * of the loop after the user has reacquired the lock. If most entries 5966 * found during a walk require you to call mas_pause(), the mt_for_each() 5967 * iterator may be more appropriate. 5968 * 5969 */ 5970 void mas_pause(struct ma_state *mas) 5971 { 5972 mas->node = MAS_PAUSE; 5973 } 5974 EXPORT_SYMBOL_GPL(mas_pause); 5975 5976 /** 5977 * mas_find() - On the first call, find the entry at or after mas->index up to 5978 * %max. Otherwise, find the entry after mas->index. 5979 * @mas: The maple state 5980 * @max: The maximum value to check. 5981 * 5982 * Must hold rcu_read_lock or the write lock. 5983 * If an entry exists, last and index are updated accordingly. 5984 * May set @mas->node to MAS_NONE. 5985 * 5986 * Return: The entry or %NULL. 5987 */ 5988 void *mas_find(struct ma_state *mas, unsigned long max) 5989 { 5990 if (unlikely(mas_is_paused(mas))) { 5991 if (unlikely(mas->last == ULONG_MAX)) { 5992 mas->node = MAS_NONE; 5993 return NULL; 5994 } 5995 mas->node = MAS_START; 5996 mas->index = ++mas->last; 5997 } 5998 5999 if (unlikely(mas_is_start(mas))) { 6000 /* First run or continue */ 6001 void *entry; 6002 6003 if (mas->index > max) 6004 return NULL; 6005 6006 entry = mas_walk(mas); 6007 if (entry) 6008 return entry; 6009 } 6010 6011 if (unlikely(!mas_searchable(mas))) 6012 return NULL; 6013 6014 /* Retries on dead nodes handled by mas_next_entry */ 6015 return mas_next_entry(mas, max); 6016 } 6017 EXPORT_SYMBOL_GPL(mas_find); 6018 6019 /** 6020 * mas_find_rev: On the first call, find the first non-null entry at or below 6021 * mas->index down to %min. Otherwise find the first non-null entry below 6022 * mas->index down to %min. 6023 * @mas: The maple state 6024 * @min: The minimum value to check. 6025 * 6026 * Must hold rcu_read_lock or the write lock. 6027 * If an entry exists, last and index are updated accordingly. 6028 * May set @mas->node to MAS_NONE. 6029 * 6030 * Return: The entry or %NULL. 6031 */ 6032 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6033 { 6034 if (unlikely(mas_is_paused(mas))) { 6035 if (unlikely(mas->last == ULONG_MAX)) { 6036 mas->node = MAS_NONE; 6037 return NULL; 6038 } 6039 mas->node = MAS_START; 6040 mas->last = --mas->index; 6041 } 6042 6043 if (unlikely(mas_is_start(mas))) { 6044 /* First run or continue */ 6045 void *entry; 6046 6047 if (mas->index < min) 6048 return NULL; 6049 6050 entry = mas_walk(mas); 6051 if (entry) 6052 return entry; 6053 } 6054 6055 if (unlikely(!mas_searchable(mas))) 6056 return NULL; 6057 6058 if (mas->index < min) 6059 return NULL; 6060 6061 /* Retries on dead nodes handled by mas_prev_entry */ 6062 return mas_prev_entry(mas, min); 6063 } 6064 EXPORT_SYMBOL_GPL(mas_find_rev); 6065 6066 /** 6067 * mas_erase() - Find the range in which index resides and erase the entire 6068 * range. 6069 * @mas: The maple state 6070 * 6071 * Must hold the write lock. 6072 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6073 * erases that range. 6074 * 6075 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6076 */ 6077 void *mas_erase(struct ma_state *mas) 6078 { 6079 void *entry; 6080 MA_WR_STATE(wr_mas, mas, NULL); 6081 6082 if (mas_is_none(mas) || mas_is_paused(mas)) 6083 mas->node = MAS_START; 6084 6085 /* Retry unnecessary when holding the write lock. */ 6086 entry = mas_state_walk(mas); 6087 if (!entry) 6088 return NULL; 6089 6090 write_retry: 6091 /* Must reset to ensure spanning writes of last slot are detected */ 6092 mas_reset(mas); 6093 mas_wr_store_setup(&wr_mas); 6094 mas_wr_store_entry(&wr_mas); 6095 if (mas_nomem(mas, GFP_KERNEL)) 6096 goto write_retry; 6097 6098 return entry; 6099 } 6100 EXPORT_SYMBOL_GPL(mas_erase); 6101 6102 /** 6103 * mas_nomem() - Check if there was an error allocating and do the allocation 6104 * if necessary If there are allocations, then free them. 6105 * @mas: The maple state 6106 * @gfp: The GFP_FLAGS to use for allocations 6107 * Return: true on allocation, false otherwise. 6108 */ 6109 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6110 __must_hold(mas->tree->lock) 6111 { 6112 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6113 mas_destroy(mas); 6114 return false; 6115 } 6116 6117 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6118 mtree_unlock(mas->tree); 6119 mas_alloc_nodes(mas, gfp); 6120 mtree_lock(mas->tree); 6121 } else { 6122 mas_alloc_nodes(mas, gfp); 6123 } 6124 6125 if (!mas_allocated(mas)) 6126 return false; 6127 6128 mas->node = MAS_START; 6129 return true; 6130 } 6131 6132 void __init maple_tree_init(void) 6133 { 6134 maple_node_cache = kmem_cache_create("maple_node", 6135 sizeof(struct maple_node), sizeof(struct maple_node), 6136 SLAB_PANIC, NULL); 6137 } 6138 6139 /** 6140 * mtree_load() - Load a value stored in a maple tree 6141 * @mt: The maple tree 6142 * @index: The index to load 6143 * 6144 * Return: the entry or %NULL 6145 */ 6146 void *mtree_load(struct maple_tree *mt, unsigned long index) 6147 { 6148 MA_STATE(mas, mt, index, index); 6149 void *entry; 6150 6151 trace_ma_read(__func__, &mas); 6152 rcu_read_lock(); 6153 retry: 6154 entry = mas_start(&mas); 6155 if (unlikely(mas_is_none(&mas))) 6156 goto unlock; 6157 6158 if (unlikely(mas_is_ptr(&mas))) { 6159 if (index) 6160 entry = NULL; 6161 6162 goto unlock; 6163 } 6164 6165 entry = mtree_lookup_walk(&mas); 6166 if (!entry && unlikely(mas_is_start(&mas))) 6167 goto retry; 6168 unlock: 6169 rcu_read_unlock(); 6170 if (xa_is_zero(entry)) 6171 return NULL; 6172 6173 return entry; 6174 } 6175 EXPORT_SYMBOL(mtree_load); 6176 6177 /** 6178 * mtree_store_range() - Store an entry at a given range. 6179 * @mt: The maple tree 6180 * @index: The start of the range 6181 * @last: The end of the range 6182 * @entry: The entry to store 6183 * @gfp: The GFP_FLAGS to use for allocations 6184 * 6185 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6186 * be allocated. 6187 */ 6188 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6189 unsigned long last, void *entry, gfp_t gfp) 6190 { 6191 MA_STATE(mas, mt, index, last); 6192 MA_WR_STATE(wr_mas, &mas, entry); 6193 6194 trace_ma_write(__func__, &mas, 0, entry); 6195 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6196 return -EINVAL; 6197 6198 if (index > last) 6199 return -EINVAL; 6200 6201 mtree_lock(mt); 6202 retry: 6203 mas_wr_store_entry(&wr_mas); 6204 if (mas_nomem(&mas, gfp)) 6205 goto retry; 6206 6207 mtree_unlock(mt); 6208 if (mas_is_err(&mas)) 6209 return xa_err(mas.node); 6210 6211 return 0; 6212 } 6213 EXPORT_SYMBOL(mtree_store_range); 6214 6215 /** 6216 * mtree_store() - Store an entry at a given index. 6217 * @mt: The maple tree 6218 * @index: The index to store the value 6219 * @entry: The entry to store 6220 * @gfp: The GFP_FLAGS to use for allocations 6221 * 6222 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6223 * be allocated. 6224 */ 6225 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6226 gfp_t gfp) 6227 { 6228 return mtree_store_range(mt, index, index, entry, gfp); 6229 } 6230 EXPORT_SYMBOL(mtree_store); 6231 6232 /** 6233 * mtree_insert_range() - Insert an entry at a give range if there is no value. 6234 * @mt: The maple tree 6235 * @first: The start of the range 6236 * @last: The end of the range 6237 * @entry: The entry to store 6238 * @gfp: The GFP_FLAGS to use for allocations. 6239 * 6240 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6241 * request, -ENOMEM if memory could not be allocated. 6242 */ 6243 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6244 unsigned long last, void *entry, gfp_t gfp) 6245 { 6246 MA_STATE(ms, mt, first, last); 6247 6248 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6249 return -EINVAL; 6250 6251 if (first > last) 6252 return -EINVAL; 6253 6254 mtree_lock(mt); 6255 retry: 6256 mas_insert(&ms, entry); 6257 if (mas_nomem(&ms, gfp)) 6258 goto retry; 6259 6260 mtree_unlock(mt); 6261 if (mas_is_err(&ms)) 6262 return xa_err(ms.node); 6263 6264 return 0; 6265 } 6266 EXPORT_SYMBOL(mtree_insert_range); 6267 6268 /** 6269 * mtree_insert() - Insert an entry at a give index if there is no value. 6270 * @mt: The maple tree 6271 * @index : The index to store the value 6272 * @entry: The entry to store 6273 * @gfp: The FGP_FLAGS to use for allocations. 6274 * 6275 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6276 * request, -ENOMEM if memory could not be allocated. 6277 */ 6278 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6279 gfp_t gfp) 6280 { 6281 return mtree_insert_range(mt, index, index, entry, gfp); 6282 } 6283 EXPORT_SYMBOL(mtree_insert); 6284 6285 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6286 void *entry, unsigned long size, unsigned long min, 6287 unsigned long max, gfp_t gfp) 6288 { 6289 int ret = 0; 6290 6291 MA_STATE(mas, mt, min, max - size); 6292 if (!mt_is_alloc(mt)) 6293 return -EINVAL; 6294 6295 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6296 return -EINVAL; 6297 6298 if (min > max) 6299 return -EINVAL; 6300 6301 if (max < size) 6302 return -EINVAL; 6303 6304 if (!size) 6305 return -EINVAL; 6306 6307 mtree_lock(mt); 6308 retry: 6309 mas.offset = 0; 6310 mas.index = min; 6311 mas.last = max - size; 6312 ret = mas_alloc(&mas, entry, size, startp); 6313 if (mas_nomem(&mas, gfp)) 6314 goto retry; 6315 6316 mtree_unlock(mt); 6317 return ret; 6318 } 6319 EXPORT_SYMBOL(mtree_alloc_range); 6320 6321 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6322 void *entry, unsigned long size, unsigned long min, 6323 unsigned long max, gfp_t gfp) 6324 { 6325 int ret = 0; 6326 6327 MA_STATE(mas, mt, min, max - size); 6328 if (!mt_is_alloc(mt)) 6329 return -EINVAL; 6330 6331 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6332 return -EINVAL; 6333 6334 if (min >= max) 6335 return -EINVAL; 6336 6337 if (max < size - 1) 6338 return -EINVAL; 6339 6340 if (!size) 6341 return -EINVAL; 6342 6343 mtree_lock(mt); 6344 retry: 6345 ret = mas_rev_alloc(&mas, min, max, entry, size, startp); 6346 if (mas_nomem(&mas, gfp)) 6347 goto retry; 6348 6349 mtree_unlock(mt); 6350 return ret; 6351 } 6352 EXPORT_SYMBOL(mtree_alloc_rrange); 6353 6354 /** 6355 * mtree_erase() - Find an index and erase the entire range. 6356 * @mt: The maple tree 6357 * @index: The index to erase 6358 * 6359 * Erasing is the same as a walk to an entry then a store of a NULL to that 6360 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6361 * 6362 * Return: The entry stored at the @index or %NULL 6363 */ 6364 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6365 { 6366 void *entry = NULL; 6367 6368 MA_STATE(mas, mt, index, index); 6369 trace_ma_op(__func__, &mas); 6370 6371 mtree_lock(mt); 6372 entry = mas_erase(&mas); 6373 mtree_unlock(mt); 6374 6375 return entry; 6376 } 6377 EXPORT_SYMBOL(mtree_erase); 6378 6379 /** 6380 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6381 * @mt: The maple tree 6382 * 6383 * Note: Does not handle locking. 6384 */ 6385 void __mt_destroy(struct maple_tree *mt) 6386 { 6387 void *root = mt_root_locked(mt); 6388 6389 rcu_assign_pointer(mt->ma_root, NULL); 6390 if (xa_is_node(root)) 6391 mte_destroy_walk(root, mt); 6392 6393 mt->ma_flags = 0; 6394 } 6395 EXPORT_SYMBOL_GPL(__mt_destroy); 6396 6397 /** 6398 * mtree_destroy() - Destroy a maple tree 6399 * @mt: The maple tree 6400 * 6401 * Frees all resources used by the tree. Handles locking. 6402 */ 6403 void mtree_destroy(struct maple_tree *mt) 6404 { 6405 mtree_lock(mt); 6406 __mt_destroy(mt); 6407 mtree_unlock(mt); 6408 } 6409 EXPORT_SYMBOL(mtree_destroy); 6410 6411 /** 6412 * mt_find() - Search from the start up until an entry is found. 6413 * @mt: The maple tree 6414 * @index: Pointer which contains the start location of the search 6415 * @max: The maximum value to check 6416 * 6417 * Handles locking. @index will be incremented to one beyond the range. 6418 * 6419 * Return: The entry at or after the @index or %NULL 6420 */ 6421 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6422 { 6423 MA_STATE(mas, mt, *index, *index); 6424 void *entry; 6425 #ifdef CONFIG_DEBUG_MAPLE_TREE 6426 unsigned long copy = *index; 6427 #endif 6428 6429 trace_ma_read(__func__, &mas); 6430 6431 if ((*index) > max) 6432 return NULL; 6433 6434 rcu_read_lock(); 6435 retry: 6436 entry = mas_state_walk(&mas); 6437 if (mas_is_start(&mas)) 6438 goto retry; 6439 6440 if (unlikely(xa_is_zero(entry))) 6441 entry = NULL; 6442 6443 if (entry) 6444 goto unlock; 6445 6446 while (mas_searchable(&mas) && (mas.index < max)) { 6447 entry = mas_next_entry(&mas, max); 6448 if (likely(entry && !xa_is_zero(entry))) 6449 break; 6450 } 6451 6452 if (unlikely(xa_is_zero(entry))) 6453 entry = NULL; 6454 unlock: 6455 rcu_read_unlock(); 6456 if (likely(entry)) { 6457 *index = mas.last + 1; 6458 #ifdef CONFIG_DEBUG_MAPLE_TREE 6459 if ((*index) && (*index) <= copy) 6460 pr_err("index not increased! %lx <= %lx\n", 6461 *index, copy); 6462 MT_BUG_ON(mt, (*index) && ((*index) <= copy)); 6463 #endif 6464 } 6465 6466 return entry; 6467 } 6468 EXPORT_SYMBOL(mt_find); 6469 6470 /** 6471 * mt_find_after() - Search from the start up until an entry is found. 6472 * @mt: The maple tree 6473 * @index: Pointer which contains the start location of the search 6474 * @max: The maximum value to check 6475 * 6476 * Handles locking, detects wrapping on index == 0 6477 * 6478 * Return: The entry at or after the @index or %NULL 6479 */ 6480 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6481 unsigned long max) 6482 { 6483 if (!(*index)) 6484 return NULL; 6485 6486 return mt_find(mt, index, max); 6487 } 6488 EXPORT_SYMBOL(mt_find_after); 6489 6490 #ifdef CONFIG_DEBUG_MAPLE_TREE 6491 atomic_t maple_tree_tests_run; 6492 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6493 atomic_t maple_tree_tests_passed; 6494 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6495 6496 #ifndef __KERNEL__ 6497 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6498 void mt_set_non_kernel(unsigned int val) 6499 { 6500 kmem_cache_set_non_kernel(maple_node_cache, val); 6501 } 6502 6503 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6504 unsigned long mt_get_alloc_size(void) 6505 { 6506 return kmem_cache_get_alloc(maple_node_cache); 6507 } 6508 6509 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6510 void mt_zero_nr_tallocated(void) 6511 { 6512 kmem_cache_zero_nr_tallocated(maple_node_cache); 6513 } 6514 6515 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6516 unsigned int mt_nr_tallocated(void) 6517 { 6518 return kmem_cache_nr_tallocated(maple_node_cache); 6519 } 6520 6521 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6522 unsigned int mt_nr_allocated(void) 6523 { 6524 return kmem_cache_nr_allocated(maple_node_cache); 6525 } 6526 6527 /* 6528 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6529 * @mas: The maple state 6530 * @index: The index to restore in @mas. 6531 * 6532 * Used in test code. 6533 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6534 */ 6535 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6536 { 6537 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6538 return 0; 6539 6540 if (likely(!mte_dead_node(mas->node))) 6541 return 0; 6542 6543 mas_rewalk(mas, index); 6544 return 1; 6545 } 6546 6547 void mt_cache_shrink(void) 6548 { 6549 } 6550 #else 6551 /* 6552 * mt_cache_shrink() - For testing, don't use this. 6553 * 6554 * Certain testcases can trigger an OOM when combined with other memory 6555 * debugging configuration options. This function is used to reduce the 6556 * possibility of an out of memory even due to kmem_cache objects remaining 6557 * around for longer than usual. 6558 */ 6559 void mt_cache_shrink(void) 6560 { 6561 kmem_cache_shrink(maple_node_cache); 6562 6563 } 6564 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6565 6566 #endif /* not defined __KERNEL__ */ 6567 /* 6568 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6569 * @mas: The maple state 6570 * @offset: The offset into the slot array to fetch. 6571 * 6572 * Return: The entry stored at @offset. 6573 */ 6574 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6575 unsigned char offset) 6576 { 6577 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6578 offset); 6579 } 6580 6581 6582 /* 6583 * mas_first_entry() - Go the first leaf and find the first entry. 6584 * @mas: the maple state. 6585 * @limit: the maximum index to check. 6586 * @*r_start: Pointer to set to the range start. 6587 * 6588 * Sets mas->offset to the offset of the entry, r_start to the range minimum. 6589 * 6590 * Return: The first entry or MAS_NONE. 6591 */ 6592 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, 6593 unsigned long limit, enum maple_type mt) 6594 6595 { 6596 unsigned long max; 6597 unsigned long *pivots; 6598 void __rcu **slots; 6599 void *entry = NULL; 6600 6601 mas->index = mas->min; 6602 if (mas->index > limit) 6603 goto none; 6604 6605 max = mas->max; 6606 mas->offset = 0; 6607 while (likely(!ma_is_leaf(mt))) { 6608 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6609 slots = ma_slots(mn, mt); 6610 pivots = ma_pivots(mn, mt); 6611 max = pivots[0]; 6612 entry = mas_slot(mas, slots, 0); 6613 if (unlikely(ma_dead_node(mn))) 6614 return NULL; 6615 mas->node = entry; 6616 mn = mas_mn(mas); 6617 mt = mte_node_type(mas->node); 6618 } 6619 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6620 6621 mas->max = max; 6622 slots = ma_slots(mn, mt); 6623 entry = mas_slot(mas, slots, 0); 6624 if (unlikely(ma_dead_node(mn))) 6625 return NULL; 6626 6627 /* Slot 0 or 1 must be set */ 6628 if (mas->index > limit) 6629 goto none; 6630 6631 if (likely(entry)) 6632 return entry; 6633 6634 pivots = ma_pivots(mn, mt); 6635 mas->index = pivots[0] + 1; 6636 mas->offset = 1; 6637 entry = mas_slot(mas, slots, 1); 6638 if (unlikely(ma_dead_node(mn))) 6639 return NULL; 6640 6641 if (mas->index > limit) 6642 goto none; 6643 6644 if (likely(entry)) 6645 return entry; 6646 6647 none: 6648 if (likely(!ma_dead_node(mn))) 6649 mas->node = MAS_NONE; 6650 return NULL; 6651 } 6652 6653 /* Depth first search, post-order */ 6654 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6655 { 6656 6657 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6658 unsigned long p_min, p_max; 6659 6660 mas_next_node(mas, mas_mn(mas), max); 6661 if (!mas_is_none(mas)) 6662 return; 6663 6664 if (mte_is_root(mn)) 6665 return; 6666 6667 mas->node = mn; 6668 mas_ascend(mas); 6669 while (mas->node != MAS_NONE) { 6670 p = mas->node; 6671 p_min = mas->min; 6672 p_max = mas->max; 6673 mas_prev_node(mas, 0); 6674 } 6675 6676 if (p == MAS_NONE) 6677 return; 6678 6679 mas->node = p; 6680 mas->max = p_max; 6681 mas->min = p_min; 6682 } 6683 6684 /* Tree validations */ 6685 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6686 unsigned long min, unsigned long max, unsigned int depth); 6687 static void mt_dump_range(unsigned long min, unsigned long max, 6688 unsigned int depth) 6689 { 6690 static const char spaces[] = " "; 6691 6692 if (min == max) 6693 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6694 else 6695 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6696 } 6697 6698 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6699 unsigned int depth) 6700 { 6701 mt_dump_range(min, max, depth); 6702 6703 if (xa_is_value(entry)) 6704 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6705 xa_to_value(entry), entry); 6706 else if (xa_is_zero(entry)) 6707 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6708 else if (mt_is_reserved(entry)) 6709 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6710 else 6711 pr_cont("%p\n", entry); 6712 } 6713 6714 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6715 unsigned long min, unsigned long max, unsigned int depth) 6716 { 6717 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6718 bool leaf = mte_is_leaf(entry); 6719 unsigned long first = min; 6720 int i; 6721 6722 pr_cont(" contents: "); 6723 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) 6724 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6725 pr_cont("%p\n", node->slot[i]); 6726 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6727 unsigned long last = max; 6728 6729 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6730 last = node->pivot[i]; 6731 else if (!node->slot[i] && max != mt_node_max(entry)) 6732 break; 6733 if (last == 0 && i > 0) 6734 break; 6735 if (leaf) 6736 mt_dump_entry(mt_slot(mt, node->slot, i), 6737 first, last, depth + 1); 6738 else if (node->slot[i]) 6739 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6740 first, last, depth + 1); 6741 6742 if (last == max) 6743 break; 6744 if (last > max) { 6745 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6746 node, last, max, i); 6747 break; 6748 } 6749 first = last + 1; 6750 } 6751 } 6752 6753 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6754 unsigned long min, unsigned long max, unsigned int depth) 6755 { 6756 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6757 bool leaf = mte_is_leaf(entry); 6758 unsigned long first = min; 6759 int i; 6760 6761 pr_cont(" contents: "); 6762 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6763 pr_cont("%lu ", node->gap[i]); 6764 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6765 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6766 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6767 pr_cont("%p\n", node->slot[i]); 6768 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6769 unsigned long last = max; 6770 6771 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6772 last = node->pivot[i]; 6773 else if (!node->slot[i]) 6774 break; 6775 if (last == 0 && i > 0) 6776 break; 6777 if (leaf) 6778 mt_dump_entry(mt_slot(mt, node->slot, i), 6779 first, last, depth + 1); 6780 else if (node->slot[i]) 6781 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6782 first, last, depth + 1); 6783 6784 if (last == max) 6785 break; 6786 if (last > max) { 6787 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6788 node, last, max, i); 6789 break; 6790 } 6791 first = last + 1; 6792 } 6793 } 6794 6795 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6796 unsigned long min, unsigned long max, unsigned int depth) 6797 { 6798 struct maple_node *node = mte_to_node(entry); 6799 unsigned int type = mte_node_type(entry); 6800 unsigned int i; 6801 6802 mt_dump_range(min, max, depth); 6803 6804 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6805 node ? node->parent : NULL); 6806 switch (type) { 6807 case maple_dense: 6808 pr_cont("\n"); 6809 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6810 if (min + i > max) 6811 pr_cont("OUT OF RANGE: "); 6812 mt_dump_entry(mt_slot(mt, node->slot, i), 6813 min + i, min + i, depth); 6814 } 6815 break; 6816 case maple_leaf_64: 6817 case maple_range_64: 6818 mt_dump_range64(mt, entry, min, max, depth); 6819 break; 6820 case maple_arange_64: 6821 mt_dump_arange64(mt, entry, min, max, depth); 6822 break; 6823 6824 default: 6825 pr_cont(" UNKNOWN TYPE\n"); 6826 } 6827 } 6828 6829 void mt_dump(const struct maple_tree *mt) 6830 { 6831 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6832 6833 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6834 mt, mt->ma_flags, mt_height(mt), entry); 6835 if (!xa_is_node(entry)) 6836 mt_dump_entry(entry, 0, 0, 0); 6837 else if (entry) 6838 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); 6839 } 6840 EXPORT_SYMBOL_GPL(mt_dump); 6841 6842 /* 6843 * Calculate the maximum gap in a node and check if that's what is reported in 6844 * the parent (unless root). 6845 */ 6846 static void mas_validate_gaps(struct ma_state *mas) 6847 { 6848 struct maple_enode *mte = mas->node; 6849 struct maple_node *p_mn; 6850 unsigned long gap = 0, max_gap = 0; 6851 unsigned long p_end, p_start = mas->min; 6852 unsigned char p_slot; 6853 unsigned long *gaps = NULL; 6854 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); 6855 int i; 6856 6857 if (ma_is_dense(mte_node_type(mte))) { 6858 for (i = 0; i < mt_slot_count(mte); i++) { 6859 if (mas_get_slot(mas, i)) { 6860 if (gap > max_gap) 6861 max_gap = gap; 6862 gap = 0; 6863 continue; 6864 } 6865 gap++; 6866 } 6867 goto counted; 6868 } 6869 6870 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); 6871 for (i = 0; i < mt_slot_count(mte); i++) { 6872 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); 6873 6874 if (!gaps) { 6875 if (mas_get_slot(mas, i)) { 6876 gap = 0; 6877 goto not_empty; 6878 } 6879 6880 gap += p_end - p_start + 1; 6881 } else { 6882 void *entry = mas_get_slot(mas, i); 6883 6884 gap = gaps[i]; 6885 if (!entry) { 6886 if (gap != p_end - p_start + 1) { 6887 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", 6888 mas_mn(mas), i, 6889 mas_get_slot(mas, i), gap, 6890 p_end, p_start); 6891 mt_dump(mas->tree); 6892 6893 MT_BUG_ON(mas->tree, 6894 gap != p_end - p_start + 1); 6895 } 6896 } else { 6897 if (gap > p_end - p_start + 1) { 6898 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6899 mas_mn(mas), i, gap, p_end, p_start, 6900 p_end - p_start + 1); 6901 MT_BUG_ON(mas->tree, 6902 gap > p_end - p_start + 1); 6903 } 6904 } 6905 } 6906 6907 if (gap > max_gap) 6908 max_gap = gap; 6909 not_empty: 6910 p_start = p_end + 1; 6911 if (p_end >= mas->max) 6912 break; 6913 } 6914 6915 counted: 6916 if (mte_is_root(mte)) 6917 return; 6918 6919 p_slot = mte_parent_slot(mas->node); 6920 p_mn = mte_parent(mte); 6921 MT_BUG_ON(mas->tree, max_gap > mas->max); 6922 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { 6923 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 6924 mt_dump(mas->tree); 6925 } 6926 6927 MT_BUG_ON(mas->tree, 6928 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); 6929 } 6930 6931 static void mas_validate_parent_slot(struct ma_state *mas) 6932 { 6933 struct maple_node *parent; 6934 struct maple_enode *node; 6935 enum maple_type p_type = mas_parent_enum(mas, mas->node); 6936 unsigned char p_slot = mte_parent_slot(mas->node); 6937 void __rcu **slots; 6938 int i; 6939 6940 if (mte_is_root(mas->node)) 6941 return; 6942 6943 parent = mte_parent(mas->node); 6944 slots = ma_slots(parent, p_type); 6945 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 6946 6947 /* Check prev/next parent slot for duplicate node entry */ 6948 6949 for (i = 0; i < mt_slots[p_type]; i++) { 6950 node = mas_slot(mas, slots, i); 6951 if (i == p_slot) { 6952 if (node != mas->node) 6953 pr_err("parent %p[%u] does not have %p\n", 6954 parent, i, mas_mn(mas)); 6955 MT_BUG_ON(mas->tree, node != mas->node); 6956 } else if (node == mas->node) { 6957 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 6958 mas_mn(mas), parent, i, p_slot); 6959 MT_BUG_ON(mas->tree, node == mas->node); 6960 } 6961 } 6962 } 6963 6964 static void mas_validate_child_slot(struct ma_state *mas) 6965 { 6966 enum maple_type type = mte_node_type(mas->node); 6967 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 6968 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 6969 struct maple_enode *child; 6970 unsigned char i; 6971 6972 if (mte_is_leaf(mas->node)) 6973 return; 6974 6975 for (i = 0; i < mt_slots[type]; i++) { 6976 child = mas_slot(mas, slots, i); 6977 if (!pivots[i] || pivots[i] == mas->max) 6978 break; 6979 6980 if (!child) 6981 break; 6982 6983 if (mte_parent_slot(child) != i) { 6984 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 6985 mas_mn(mas), i, mte_to_node(child), 6986 mte_parent_slot(child)); 6987 MT_BUG_ON(mas->tree, 1); 6988 } 6989 6990 if (mte_parent(child) != mte_to_node(mas->node)) { 6991 pr_err("child %p has parent %p not %p\n", 6992 mte_to_node(child), mte_parent(child), 6993 mte_to_node(mas->node)); 6994 MT_BUG_ON(mas->tree, 1); 6995 } 6996 } 6997 } 6998 6999 /* 7000 * Validate all pivots are within mas->min and mas->max. 7001 */ 7002 static void mas_validate_limits(struct ma_state *mas) 7003 { 7004 int i; 7005 unsigned long prev_piv = 0; 7006 enum maple_type type = mte_node_type(mas->node); 7007 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7008 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7009 7010 /* all limits are fine here. */ 7011 if (mte_is_root(mas->node)) 7012 return; 7013 7014 for (i = 0; i < mt_slots[type]; i++) { 7015 unsigned long piv; 7016 7017 piv = mas_safe_pivot(mas, pivots, i, type); 7018 7019 if (!piv && (i != 0)) 7020 break; 7021 7022 if (!mte_is_leaf(mas->node)) { 7023 void *entry = mas_slot(mas, slots, i); 7024 7025 if (!entry) 7026 pr_err("%p[%u] cannot be null\n", 7027 mas_mn(mas), i); 7028 7029 MT_BUG_ON(mas->tree, !entry); 7030 } 7031 7032 if (prev_piv > piv) { 7033 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7034 mas_mn(mas), i, piv, prev_piv); 7035 MT_BUG_ON(mas->tree, piv < prev_piv); 7036 } 7037 7038 if (piv < mas->min) { 7039 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7040 piv, mas->min); 7041 MT_BUG_ON(mas->tree, piv < mas->min); 7042 } 7043 if (piv > mas->max) { 7044 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7045 piv, mas->max); 7046 MT_BUG_ON(mas->tree, piv > mas->max); 7047 } 7048 prev_piv = piv; 7049 if (piv == mas->max) 7050 break; 7051 } 7052 for (i += 1; i < mt_slots[type]; i++) { 7053 void *entry = mas_slot(mas, slots, i); 7054 7055 if (entry && (i != mt_slots[type] - 1)) { 7056 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7057 i, entry); 7058 MT_BUG_ON(mas->tree, entry != NULL); 7059 } 7060 7061 if (i < mt_pivots[type]) { 7062 unsigned long piv = pivots[i]; 7063 7064 if (!piv) 7065 continue; 7066 7067 pr_err("%p[%u] should not have piv %lu\n", 7068 mas_mn(mas), i, piv); 7069 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1); 7070 } 7071 } 7072 } 7073 7074 static void mt_validate_nulls(struct maple_tree *mt) 7075 { 7076 void *entry, *last = (void *)1; 7077 unsigned char offset = 0; 7078 void __rcu **slots; 7079 MA_STATE(mas, mt, 0, 0); 7080 7081 mas_start(&mas); 7082 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7083 return; 7084 7085 while (!mte_is_leaf(mas.node)) 7086 mas_descend(&mas); 7087 7088 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7089 do { 7090 entry = mas_slot(&mas, slots, offset); 7091 if (!last && !entry) { 7092 pr_err("Sequential nulls end at %p[%u]\n", 7093 mas_mn(&mas), offset); 7094 } 7095 MT_BUG_ON(mt, !last && !entry); 7096 last = entry; 7097 if (offset == mas_data_end(&mas)) { 7098 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7099 if (mas_is_none(&mas)) 7100 return; 7101 offset = 0; 7102 slots = ma_slots(mte_to_node(mas.node), 7103 mte_node_type(mas.node)); 7104 } else { 7105 offset++; 7106 } 7107 7108 } while (!mas_is_none(&mas)); 7109 } 7110 7111 /* 7112 * validate a maple tree by checking: 7113 * 1. The limits (pivots are within mas->min to mas->max) 7114 * 2. The gap is correctly set in the parents 7115 */ 7116 void mt_validate(struct maple_tree *mt) 7117 { 7118 unsigned char end; 7119 7120 MA_STATE(mas, mt, 0, 0); 7121 rcu_read_lock(); 7122 mas_start(&mas); 7123 if (!mas_searchable(&mas)) 7124 goto done; 7125 7126 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); 7127 while (!mas_is_none(&mas)) { 7128 MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); 7129 if (!mte_is_root(mas.node)) { 7130 end = mas_data_end(&mas); 7131 if ((end < mt_min_slot_count(mas.node)) && 7132 (mas.max != ULONG_MAX)) { 7133 pr_err("Invalid size %u of %p\n", end, 7134 mas_mn(&mas)); 7135 MT_BUG_ON(mas.tree, 1); 7136 } 7137 7138 } 7139 mas_validate_parent_slot(&mas); 7140 mas_validate_child_slot(&mas); 7141 mas_validate_limits(&mas); 7142 if (mt_is_alloc(mt)) 7143 mas_validate_gaps(&mas); 7144 mas_dfs_postorder(&mas, ULONG_MAX); 7145 } 7146 mt_validate_nulls(mt); 7147 done: 7148 rcu_read_unlock(); 7149 7150 } 7151 EXPORT_SYMBOL_GPL(mt_validate); 7152 7153 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7154