1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 #ifdef CONFIG_KASAN_STACK 150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ 151 #define noinline_for_kasan noinline_for_stack 152 #else 153 #define noinline_for_kasan inline 154 #endif 155 156 /* Functions */ 157 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 158 { 159 return kmem_cache_alloc(maple_node_cache, gfp); 160 } 161 162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 163 { 164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 165 } 166 167 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 168 { 169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 170 } 171 172 static void mt_free_rcu(struct rcu_head *head) 173 { 174 struct maple_node *node = container_of(head, struct maple_node, rcu); 175 176 kmem_cache_free(maple_node_cache, node); 177 } 178 179 /* 180 * ma_free_rcu() - Use rcu callback to free a maple node 181 * @node: The node to free 182 * 183 * The maple tree uses the parent pointer to indicate this node is no longer in 184 * use and will be freed. 185 */ 186 static void ma_free_rcu(struct maple_node *node) 187 { 188 WARN_ON(node->parent != ma_parent_ptr(node)); 189 call_rcu(&node->rcu, mt_free_rcu); 190 } 191 192 static void mas_set_height(struct ma_state *mas) 193 { 194 unsigned int new_flags = mas->tree->ma_flags; 195 196 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 197 MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); 198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 199 mas->tree->ma_flags = new_flags; 200 } 201 202 static unsigned int mas_mt_height(struct ma_state *mas) 203 { 204 return mt_height(mas->tree); 205 } 206 207 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 208 { 209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 210 MAPLE_NODE_TYPE_MASK; 211 } 212 213 static inline bool ma_is_dense(const enum maple_type type) 214 { 215 return type < maple_leaf_64; 216 } 217 218 static inline bool ma_is_leaf(const enum maple_type type) 219 { 220 return type < maple_range_64; 221 } 222 223 static inline bool mte_is_leaf(const struct maple_enode *entry) 224 { 225 return ma_is_leaf(mte_node_type(entry)); 226 } 227 228 /* 229 * We also reserve values with the bottom two bits set to '10' which are 230 * below 4096 231 */ 232 static inline bool mt_is_reserved(const void *entry) 233 { 234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 235 xa_is_internal(entry); 236 } 237 238 static inline void mas_set_err(struct ma_state *mas, long err) 239 { 240 mas->node = MA_ERROR(err); 241 } 242 243 static inline bool mas_is_ptr(const struct ma_state *mas) 244 { 245 return mas->node == MAS_ROOT; 246 } 247 248 static inline bool mas_is_start(const struct ma_state *mas) 249 { 250 return mas->node == MAS_START; 251 } 252 253 bool mas_is_err(struct ma_state *mas) 254 { 255 return xa_is_err(mas->node); 256 } 257 258 static inline bool mas_searchable(struct ma_state *mas) 259 { 260 if (mas_is_none(mas)) 261 return false; 262 263 if (mas_is_ptr(mas)) 264 return false; 265 266 return true; 267 } 268 269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 270 { 271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 272 } 273 274 /* 275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 276 * @entry: The maple encoded node 277 * 278 * Return: a maple topiary pointer 279 */ 280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 281 { 282 return (struct maple_topiary *) 283 ((unsigned long)entry & ~MAPLE_NODE_MASK); 284 } 285 286 /* 287 * mas_mn() - Get the maple state node. 288 * @mas: The maple state 289 * 290 * Return: the maple node (not encoded - bare pointer). 291 */ 292 static inline struct maple_node *mas_mn(const struct ma_state *mas) 293 { 294 return mte_to_node(mas->node); 295 } 296 297 /* 298 * mte_set_node_dead() - Set a maple encoded node as dead. 299 * @mn: The maple encoded node. 300 */ 301 static inline void mte_set_node_dead(struct maple_enode *mn) 302 { 303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 304 smp_wmb(); /* Needed for RCU */ 305 } 306 307 /* Bit 1 indicates the root is a node */ 308 #define MAPLE_ROOT_NODE 0x02 309 /* maple_type stored bit 3-6 */ 310 #define MAPLE_ENODE_TYPE_SHIFT 0x03 311 /* Bit 2 means a NULL somewhere below */ 312 #define MAPLE_ENODE_NULL 0x04 313 314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 315 enum maple_type type) 316 { 317 return (void *)((unsigned long)node | 318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 319 } 320 321 static inline void *mte_mk_root(const struct maple_enode *node) 322 { 323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 324 } 325 326 static inline void *mte_safe_root(const struct maple_enode *node) 327 { 328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 329 } 330 331 static inline void *mte_set_full(const struct maple_enode *node) 332 { 333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 334 } 335 336 static inline void *mte_clear_full(const struct maple_enode *node) 337 { 338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 339 } 340 341 static inline bool mte_has_null(const struct maple_enode *node) 342 { 343 return (unsigned long)node & MAPLE_ENODE_NULL; 344 } 345 346 static inline bool ma_is_root(struct maple_node *node) 347 { 348 return ((unsigned long)node->parent & MA_ROOT_PARENT); 349 } 350 351 static inline bool mte_is_root(const struct maple_enode *node) 352 { 353 return ma_is_root(mte_to_node(node)); 354 } 355 356 static inline bool mas_is_root_limits(const struct ma_state *mas) 357 { 358 return !mas->min && mas->max == ULONG_MAX; 359 } 360 361 static inline bool mt_is_alloc(struct maple_tree *mt) 362 { 363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 364 } 365 366 /* 367 * The Parent Pointer 368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 370 * bit values need an extra bit to store the offset. This extra bit comes from 371 * a reuse of the last bit in the node type. This is possible by using bit 1 to 372 * indicate if bit 2 is part of the type or the slot. 373 * 374 * Note types: 375 * 0x??1 = Root 376 * 0x?00 = 16 bit nodes 377 * 0x010 = 32 bit nodes 378 * 0x110 = 64 bit nodes 379 * 380 * Slot size and alignment 381 * 0b??1 : Root 382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 385 */ 386 387 #define MAPLE_PARENT_ROOT 0x01 388 389 #define MAPLE_PARENT_SLOT_SHIFT 0x03 390 #define MAPLE_PARENT_SLOT_MASK 0xF8 391 392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 394 395 #define MAPLE_PARENT_RANGE64 0x06 396 #define MAPLE_PARENT_RANGE32 0x04 397 #define MAPLE_PARENT_NOT_RANGE16 0x02 398 399 /* 400 * mte_parent_shift() - Get the parent shift for the slot storage. 401 * @parent: The parent pointer cast as an unsigned long 402 * Return: The shift into that pointer to the star to of the slot 403 */ 404 static inline unsigned long mte_parent_shift(unsigned long parent) 405 { 406 /* Note bit 1 == 0 means 16B */ 407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 408 return MAPLE_PARENT_SLOT_SHIFT; 409 410 return MAPLE_PARENT_16B_SLOT_SHIFT; 411 } 412 413 /* 414 * mte_parent_slot_mask() - Get the slot mask for the parent. 415 * @parent: The parent pointer cast as an unsigned long. 416 * Return: The slot mask for that parent. 417 */ 418 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 419 { 420 /* Note bit 1 == 0 means 16B */ 421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 422 return MAPLE_PARENT_SLOT_MASK; 423 424 return MAPLE_PARENT_16B_SLOT_MASK; 425 } 426 427 /* 428 * mas_parent_type() - Return the maple_type of the parent from the stored 429 * parent type. 430 * @mas: The maple state 431 * @enode: The maple_enode to extract the parent's enum 432 * Return: The node->parent maple_type 433 */ 434 static inline 435 enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) 436 { 437 unsigned long p_type; 438 439 p_type = (unsigned long)mte_to_node(enode)->parent; 440 if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) 441 return 0; 442 443 p_type &= MAPLE_NODE_MASK; 444 p_type &= ~mte_parent_slot_mask(p_type); 445 switch (p_type) { 446 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 447 if (mt_is_alloc(mas->tree)) 448 return maple_arange_64; 449 return maple_range_64; 450 } 451 452 return 0; 453 } 454 455 /* 456 * mas_set_parent() - Set the parent node and encode the slot 457 * @enode: The encoded maple node. 458 * @parent: The encoded maple node that is the parent of @enode. 459 * @slot: The slot that @enode resides in @parent. 460 * 461 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 462 * parent type. 463 */ 464 static inline 465 void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, 466 const struct maple_enode *parent, unsigned char slot) 467 { 468 unsigned long val = (unsigned long)parent; 469 unsigned long shift; 470 unsigned long type; 471 enum maple_type p_type = mte_node_type(parent); 472 473 MAS_BUG_ON(mas, p_type == maple_dense); 474 MAS_BUG_ON(mas, p_type == maple_leaf_64); 475 476 switch (p_type) { 477 case maple_range_64: 478 case maple_arange_64: 479 shift = MAPLE_PARENT_SLOT_SHIFT; 480 type = MAPLE_PARENT_RANGE64; 481 break; 482 default: 483 case maple_dense: 484 case maple_leaf_64: 485 shift = type = 0; 486 break; 487 } 488 489 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 490 val |= (slot << shift) | type; 491 mte_to_node(enode)->parent = ma_parent_ptr(val); 492 } 493 494 /* 495 * mte_parent_slot() - get the parent slot of @enode. 496 * @enode: The encoded maple node. 497 * 498 * Return: The slot in the parent node where @enode resides. 499 */ 500 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 501 { 502 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 503 504 if (val & MA_ROOT_PARENT) 505 return 0; 506 507 /* 508 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 509 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 510 */ 511 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 512 } 513 514 /* 515 * mte_parent() - Get the parent of @node. 516 * @node: The encoded maple node. 517 * 518 * Return: The parent maple node. 519 */ 520 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 521 { 522 return (void *)((unsigned long) 523 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 524 } 525 526 /* 527 * ma_dead_node() - check if the @enode is dead. 528 * @enode: The encoded maple node 529 * 530 * Return: true if dead, false otherwise. 531 */ 532 static inline bool ma_dead_node(const struct maple_node *node) 533 { 534 struct maple_node *parent; 535 536 /* Do not reorder reads from the node prior to the parent check */ 537 smp_rmb(); 538 parent = (void *)((unsigned long) node->parent & ~MAPLE_NODE_MASK); 539 return (parent == node); 540 } 541 542 /* 543 * mte_dead_node() - check if the @enode is dead. 544 * @enode: The encoded maple node 545 * 546 * Return: true if dead, false otherwise. 547 */ 548 static inline bool mte_dead_node(const struct maple_enode *enode) 549 { 550 struct maple_node *parent, *node; 551 552 node = mte_to_node(enode); 553 /* Do not reorder reads from the node prior to the parent check */ 554 smp_rmb(); 555 parent = mte_parent(enode); 556 return (parent == node); 557 } 558 559 /* 560 * mas_allocated() - Get the number of nodes allocated in a maple state. 561 * @mas: The maple state 562 * 563 * The ma_state alloc member is overloaded to hold a pointer to the first 564 * allocated node or to the number of requested nodes to allocate. If bit 0 is 565 * set, then the alloc contains the number of requested nodes. If there is an 566 * allocated node, then the total allocated nodes is in that node. 567 * 568 * Return: The total number of nodes allocated 569 */ 570 static inline unsigned long mas_allocated(const struct ma_state *mas) 571 { 572 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 573 return 0; 574 575 return mas->alloc->total; 576 } 577 578 /* 579 * mas_set_alloc_req() - Set the requested number of allocations. 580 * @mas: the maple state 581 * @count: the number of allocations. 582 * 583 * The requested number of allocations is either in the first allocated node, 584 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 585 * no allocated node. Set the request either in the node or do the necessary 586 * encoding to store in @mas->alloc directly. 587 */ 588 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 589 { 590 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 591 if (!count) 592 mas->alloc = NULL; 593 else 594 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 595 return; 596 } 597 598 mas->alloc->request_count = count; 599 } 600 601 /* 602 * mas_alloc_req() - get the requested number of allocations. 603 * @mas: The maple state 604 * 605 * The alloc count is either stored directly in @mas, or in 606 * @mas->alloc->request_count if there is at least one node allocated. Decode 607 * the request count if it's stored directly in @mas->alloc. 608 * 609 * Return: The allocation request count. 610 */ 611 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 612 { 613 if ((unsigned long)mas->alloc & 0x1) 614 return (unsigned long)(mas->alloc) >> 1; 615 else if (mas->alloc) 616 return mas->alloc->request_count; 617 return 0; 618 } 619 620 /* 621 * ma_pivots() - Get a pointer to the maple node pivots. 622 * @node - the maple node 623 * @type - the node type 624 * 625 * In the event of a dead node, this array may be %NULL 626 * 627 * Return: A pointer to the maple node pivots 628 */ 629 static inline unsigned long *ma_pivots(struct maple_node *node, 630 enum maple_type type) 631 { 632 switch (type) { 633 case maple_arange_64: 634 return node->ma64.pivot; 635 case maple_range_64: 636 case maple_leaf_64: 637 return node->mr64.pivot; 638 case maple_dense: 639 return NULL; 640 } 641 return NULL; 642 } 643 644 /* 645 * ma_gaps() - Get a pointer to the maple node gaps. 646 * @node - the maple node 647 * @type - the node type 648 * 649 * Return: A pointer to the maple node gaps 650 */ 651 static inline unsigned long *ma_gaps(struct maple_node *node, 652 enum maple_type type) 653 { 654 switch (type) { 655 case maple_arange_64: 656 return node->ma64.gap; 657 case maple_range_64: 658 case maple_leaf_64: 659 case maple_dense: 660 return NULL; 661 } 662 return NULL; 663 } 664 665 /* 666 * mas_pivot() - Get the pivot at @piv of the maple encoded node. 667 * @mas: The maple state. 668 * @piv: The pivot. 669 * 670 * Return: the pivot at @piv of @mn. 671 */ 672 static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) 673 { 674 struct maple_node *node = mas_mn(mas); 675 enum maple_type type = mte_node_type(mas->node); 676 677 if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) { 678 mas_set_err(mas, -EIO); 679 return 0; 680 } 681 682 switch (type) { 683 case maple_arange_64: 684 return node->ma64.pivot[piv]; 685 case maple_range_64: 686 case maple_leaf_64: 687 return node->mr64.pivot[piv]; 688 case maple_dense: 689 return 0; 690 } 691 return 0; 692 } 693 694 /* 695 * mas_safe_pivot() - get the pivot at @piv or mas->max. 696 * @mas: The maple state 697 * @pivots: The pointer to the maple node pivots 698 * @piv: The pivot to fetch 699 * @type: The maple node type 700 * 701 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 702 * otherwise. 703 */ 704 static inline unsigned long 705 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 706 unsigned char piv, enum maple_type type) 707 { 708 if (piv >= mt_pivots[type]) 709 return mas->max; 710 711 return pivots[piv]; 712 } 713 714 /* 715 * mas_safe_min() - Return the minimum for a given offset. 716 * @mas: The maple state 717 * @pivots: The pointer to the maple node pivots 718 * @offset: The offset into the pivot array 719 * 720 * Return: The minimum range value that is contained in @offset. 721 */ 722 static inline unsigned long 723 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 724 { 725 if (likely(offset)) 726 return pivots[offset - 1] + 1; 727 728 return mas->min; 729 } 730 731 /* 732 * mas_logical_pivot() - Get the logical pivot of a given offset. 733 * @mas: The maple state 734 * @pivots: The pointer to the maple node pivots 735 * @offset: The offset into the pivot array 736 * @type: The maple node type 737 * 738 * When there is no value at a pivot (beyond the end of the data), then the 739 * pivot is actually @mas->max. 740 * 741 * Return: the logical pivot of a given @offset. 742 */ 743 static inline unsigned long 744 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, 745 unsigned char offset, enum maple_type type) 746 { 747 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); 748 749 if (likely(lpiv)) 750 return lpiv; 751 752 if (likely(offset)) 753 return mas->max; 754 755 return lpiv; 756 } 757 758 /* 759 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 760 * @mn: The encoded maple node 761 * @piv: The pivot offset 762 * @val: The value of the pivot 763 */ 764 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 765 unsigned long val) 766 { 767 struct maple_node *node = mte_to_node(mn); 768 enum maple_type type = mte_node_type(mn); 769 770 BUG_ON(piv >= mt_pivots[type]); 771 switch (type) { 772 default: 773 case maple_range_64: 774 case maple_leaf_64: 775 node->mr64.pivot[piv] = val; 776 break; 777 case maple_arange_64: 778 node->ma64.pivot[piv] = val; 779 break; 780 case maple_dense: 781 break; 782 } 783 784 } 785 786 /* 787 * ma_slots() - Get a pointer to the maple node slots. 788 * @mn: The maple node 789 * @mt: The maple node type 790 * 791 * Return: A pointer to the maple node slots 792 */ 793 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 794 { 795 switch (mt) { 796 default: 797 case maple_arange_64: 798 return mn->ma64.slot; 799 case maple_range_64: 800 case maple_leaf_64: 801 return mn->mr64.slot; 802 case maple_dense: 803 return mn->slot; 804 } 805 } 806 807 static inline bool mt_locked(const struct maple_tree *mt) 808 { 809 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 810 lockdep_is_held(&mt->ma_lock); 811 } 812 813 static inline void *mt_slot(const struct maple_tree *mt, 814 void __rcu **slots, unsigned char offset) 815 { 816 return rcu_dereference_check(slots[offset], mt_locked(mt)); 817 } 818 819 static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots, 820 unsigned char offset) 821 { 822 return rcu_dereference_protected(slots[offset], mt_locked(mt)); 823 } 824 /* 825 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 826 * @mas: The maple state 827 * @slots: The pointer to the slots 828 * @offset: The offset into the slots array to fetch 829 * 830 * Return: The entry stored in @slots at the @offset. 831 */ 832 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 833 unsigned char offset) 834 { 835 return mt_slot_locked(mas->tree, slots, offset); 836 } 837 838 /* 839 * mas_slot() - Get the slot value when not holding the maple tree lock. 840 * @mas: The maple state 841 * @slots: The pointer to the slots 842 * @offset: The offset into the slots array to fetch 843 * 844 * Return: The entry stored in @slots at the @offset 845 */ 846 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 847 unsigned char offset) 848 { 849 return mt_slot(mas->tree, slots, offset); 850 } 851 852 /* 853 * mas_root() - Get the maple tree root. 854 * @mas: The maple state. 855 * 856 * Return: The pointer to the root of the tree 857 */ 858 static inline void *mas_root(struct ma_state *mas) 859 { 860 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 861 } 862 863 static inline void *mt_root_locked(struct maple_tree *mt) 864 { 865 return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); 866 } 867 868 /* 869 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 870 * @mas: The maple state. 871 * 872 * Return: The pointer to the root of the tree 873 */ 874 static inline void *mas_root_locked(struct ma_state *mas) 875 { 876 return mt_root_locked(mas->tree); 877 } 878 879 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 880 enum maple_type mt) 881 { 882 switch (mt) { 883 case maple_arange_64: 884 return &mn->ma64.meta; 885 default: 886 return &mn->mr64.meta; 887 } 888 } 889 890 /* 891 * ma_set_meta() - Set the metadata information of a node. 892 * @mn: The maple node 893 * @mt: The maple node type 894 * @offset: The offset of the highest sub-gap in this node. 895 * @end: The end of the data in this node. 896 */ 897 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 898 unsigned char offset, unsigned char end) 899 { 900 struct maple_metadata *meta = ma_meta(mn, mt); 901 902 meta->gap = offset; 903 meta->end = end; 904 } 905 906 /* 907 * mt_clear_meta() - clear the metadata information of a node, if it exists 908 * @mt: The maple tree 909 * @mn: The maple node 910 * @type: The maple node type 911 * @offset: The offset of the highest sub-gap in this node. 912 * @end: The end of the data in this node. 913 */ 914 static inline void mt_clear_meta(struct maple_tree *mt, struct maple_node *mn, 915 enum maple_type type) 916 { 917 struct maple_metadata *meta; 918 unsigned long *pivots; 919 void __rcu **slots; 920 void *next; 921 922 switch (type) { 923 case maple_range_64: 924 pivots = mn->mr64.pivot; 925 if (unlikely(pivots[MAPLE_RANGE64_SLOTS - 2])) { 926 slots = mn->mr64.slot; 927 next = mt_slot_locked(mt, slots, 928 MAPLE_RANGE64_SLOTS - 1); 929 if (unlikely((mte_to_node(next) && 930 mte_node_type(next)))) 931 return; /* no metadata, could be node */ 932 } 933 fallthrough; 934 case maple_arange_64: 935 meta = ma_meta(mn, type); 936 break; 937 default: 938 return; 939 } 940 941 meta->gap = 0; 942 meta->end = 0; 943 } 944 945 /* 946 * ma_meta_end() - Get the data end of a node from the metadata 947 * @mn: The maple node 948 * @mt: The maple node type 949 */ 950 static inline unsigned char ma_meta_end(struct maple_node *mn, 951 enum maple_type mt) 952 { 953 struct maple_metadata *meta = ma_meta(mn, mt); 954 955 return meta->end; 956 } 957 958 /* 959 * ma_meta_gap() - Get the largest gap location of a node from the metadata 960 * @mn: The maple node 961 * @mt: The maple node type 962 */ 963 static inline unsigned char ma_meta_gap(struct maple_node *mn, 964 enum maple_type mt) 965 { 966 return mn->ma64.meta.gap; 967 } 968 969 /* 970 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 971 * @mn: The maple node 972 * @mn: The maple node type 973 * @offset: The location of the largest gap. 974 */ 975 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 976 unsigned char offset) 977 { 978 979 struct maple_metadata *meta = ma_meta(mn, mt); 980 981 meta->gap = offset; 982 } 983 984 /* 985 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 986 * @mat - the ma_topiary, a linked list of dead nodes. 987 * @dead_enode - the node to be marked as dead and added to the tail of the list 988 * 989 * Add the @dead_enode to the linked list in @mat. 990 */ 991 static inline void mat_add(struct ma_topiary *mat, 992 struct maple_enode *dead_enode) 993 { 994 mte_set_node_dead(dead_enode); 995 mte_to_mat(dead_enode)->next = NULL; 996 if (!mat->tail) { 997 mat->tail = mat->head = dead_enode; 998 return; 999 } 1000 1001 mte_to_mat(mat->tail)->next = dead_enode; 1002 mat->tail = dead_enode; 1003 } 1004 1005 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 1006 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 1007 1008 /* 1009 * mas_mat_free() - Free all nodes in a dead list. 1010 * @mas - the maple state 1011 * @mat - the ma_topiary linked list of dead nodes to free. 1012 * 1013 * Free walk a dead list. 1014 */ 1015 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 1016 { 1017 struct maple_enode *next; 1018 1019 while (mat->head) { 1020 next = mte_to_mat(mat->head)->next; 1021 mas_free(mas, mat->head); 1022 mat->head = next; 1023 } 1024 } 1025 1026 /* 1027 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 1028 * @mas - the maple state 1029 * @mat - the ma_topiary linked list of dead nodes to free. 1030 * 1031 * Destroy walk a dead list. 1032 */ 1033 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 1034 { 1035 struct maple_enode *next; 1036 1037 while (mat->head) { 1038 next = mte_to_mat(mat->head)->next; 1039 mte_destroy_walk(mat->head, mat->mtree); 1040 mat->head = next; 1041 } 1042 } 1043 /* 1044 * mas_descend() - Descend into the slot stored in the ma_state. 1045 * @mas - the maple state. 1046 * 1047 * Note: Not RCU safe, only use in write side or debug code. 1048 */ 1049 static inline void mas_descend(struct ma_state *mas) 1050 { 1051 enum maple_type type; 1052 unsigned long *pivots; 1053 struct maple_node *node; 1054 void __rcu **slots; 1055 1056 node = mas_mn(mas); 1057 type = mte_node_type(mas->node); 1058 pivots = ma_pivots(node, type); 1059 slots = ma_slots(node, type); 1060 1061 if (mas->offset) 1062 mas->min = pivots[mas->offset - 1] + 1; 1063 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1064 mas->node = mas_slot(mas, slots, mas->offset); 1065 } 1066 1067 /* 1068 * mte_set_gap() - Set a maple node gap. 1069 * @mn: The encoded maple node 1070 * @gap: The offset of the gap to set 1071 * @val: The gap value 1072 */ 1073 static inline void mte_set_gap(const struct maple_enode *mn, 1074 unsigned char gap, unsigned long val) 1075 { 1076 switch (mte_node_type(mn)) { 1077 default: 1078 break; 1079 case maple_arange_64: 1080 mte_to_node(mn)->ma64.gap[gap] = val; 1081 break; 1082 } 1083 } 1084 1085 /* 1086 * mas_ascend() - Walk up a level of the tree. 1087 * @mas: The maple state 1088 * 1089 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1090 * may cause several levels of walking up to find the correct min and max. 1091 * May find a dead node which will cause a premature return. 1092 * Return: 1 on dead node, 0 otherwise 1093 */ 1094 static int mas_ascend(struct ma_state *mas) 1095 { 1096 struct maple_enode *p_enode; /* parent enode. */ 1097 struct maple_enode *a_enode; /* ancestor enode. */ 1098 struct maple_node *a_node; /* ancestor node. */ 1099 struct maple_node *p_node; /* parent node. */ 1100 unsigned char a_slot; 1101 enum maple_type a_type; 1102 unsigned long min, max; 1103 unsigned long *pivots; 1104 bool set_max = false, set_min = false; 1105 1106 a_node = mas_mn(mas); 1107 if (ma_is_root(a_node)) { 1108 mas->offset = 0; 1109 return 0; 1110 } 1111 1112 p_node = mte_parent(mas->node); 1113 if (unlikely(a_node == p_node)) 1114 return 1; 1115 1116 a_type = mas_parent_type(mas, mas->node); 1117 mas->offset = mte_parent_slot(mas->node); 1118 a_enode = mt_mk_node(p_node, a_type); 1119 1120 /* Check to make sure all parent information is still accurate */ 1121 if (p_node != mte_parent(mas->node)) 1122 return 1; 1123 1124 mas->node = a_enode; 1125 1126 if (mte_is_root(a_enode)) { 1127 mas->max = ULONG_MAX; 1128 mas->min = 0; 1129 return 0; 1130 } 1131 1132 if (!mas->min) 1133 set_min = true; 1134 1135 if (mas->max == ULONG_MAX) 1136 set_max = true; 1137 1138 min = 0; 1139 max = ULONG_MAX; 1140 do { 1141 p_enode = a_enode; 1142 a_type = mas_parent_type(mas, p_enode); 1143 a_node = mte_parent(p_enode); 1144 a_slot = mte_parent_slot(p_enode); 1145 a_enode = mt_mk_node(a_node, a_type); 1146 pivots = ma_pivots(a_node, a_type); 1147 1148 if (unlikely(ma_dead_node(a_node))) 1149 return 1; 1150 1151 if (!set_min && a_slot) { 1152 set_min = true; 1153 min = pivots[a_slot - 1] + 1; 1154 } 1155 1156 if (!set_max && a_slot < mt_pivots[a_type]) { 1157 set_max = true; 1158 max = pivots[a_slot]; 1159 } 1160 1161 if (unlikely(ma_dead_node(a_node))) 1162 return 1; 1163 1164 if (unlikely(ma_is_root(a_node))) 1165 break; 1166 1167 } while (!set_min || !set_max); 1168 1169 mas->max = max; 1170 mas->min = min; 1171 return 0; 1172 } 1173 1174 /* 1175 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1176 * @mas: The maple state 1177 * 1178 * Return: A pointer to a maple node. 1179 */ 1180 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1181 { 1182 struct maple_alloc *ret, *node = mas->alloc; 1183 unsigned long total = mas_allocated(mas); 1184 unsigned int req = mas_alloc_req(mas); 1185 1186 /* nothing or a request pending. */ 1187 if (WARN_ON(!total)) 1188 return NULL; 1189 1190 if (total == 1) { 1191 /* single allocation in this ma_state */ 1192 mas->alloc = NULL; 1193 ret = node; 1194 goto single_node; 1195 } 1196 1197 if (node->node_count == 1) { 1198 /* Single allocation in this node. */ 1199 mas->alloc = node->slot[0]; 1200 mas->alloc->total = node->total - 1; 1201 ret = node; 1202 goto new_head; 1203 } 1204 node->total--; 1205 ret = node->slot[--node->node_count]; 1206 node->slot[node->node_count] = NULL; 1207 1208 single_node: 1209 new_head: 1210 if (req) { 1211 req++; 1212 mas_set_alloc_req(mas, req); 1213 } 1214 1215 memset(ret, 0, sizeof(*ret)); 1216 return (struct maple_node *)ret; 1217 } 1218 1219 /* 1220 * mas_push_node() - Push a node back on the maple state allocation. 1221 * @mas: The maple state 1222 * @used: The used maple node 1223 * 1224 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1225 * requested node count as necessary. 1226 */ 1227 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1228 { 1229 struct maple_alloc *reuse = (struct maple_alloc *)used; 1230 struct maple_alloc *head = mas->alloc; 1231 unsigned long count; 1232 unsigned int requested = mas_alloc_req(mas); 1233 1234 count = mas_allocated(mas); 1235 1236 reuse->request_count = 0; 1237 reuse->node_count = 0; 1238 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { 1239 head->slot[head->node_count++] = reuse; 1240 head->total++; 1241 goto done; 1242 } 1243 1244 reuse->total = 1; 1245 if ((head) && !((unsigned long)head & 0x1)) { 1246 reuse->slot[0] = head; 1247 reuse->node_count = 1; 1248 reuse->total += head->total; 1249 } 1250 1251 mas->alloc = reuse; 1252 done: 1253 if (requested > 1) 1254 mas_set_alloc_req(mas, requested - 1); 1255 } 1256 1257 /* 1258 * mas_alloc_nodes() - Allocate nodes into a maple state 1259 * @mas: The maple state 1260 * @gfp: The GFP Flags 1261 */ 1262 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1263 { 1264 struct maple_alloc *node; 1265 unsigned long allocated = mas_allocated(mas); 1266 unsigned int requested = mas_alloc_req(mas); 1267 unsigned int count; 1268 void **slots = NULL; 1269 unsigned int max_req = 0; 1270 1271 if (!requested) 1272 return; 1273 1274 mas_set_alloc_req(mas, 0); 1275 if (mas->mas_flags & MA_STATE_PREALLOC) { 1276 if (allocated) 1277 return; 1278 WARN_ON(!allocated); 1279 } 1280 1281 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1282 node = (struct maple_alloc *)mt_alloc_one(gfp); 1283 if (!node) 1284 goto nomem_one; 1285 1286 if (allocated) { 1287 node->slot[0] = mas->alloc; 1288 node->node_count = 1; 1289 } else { 1290 node->node_count = 0; 1291 } 1292 1293 mas->alloc = node; 1294 node->total = ++allocated; 1295 requested--; 1296 } 1297 1298 node = mas->alloc; 1299 node->request_count = 0; 1300 while (requested) { 1301 max_req = MAPLE_ALLOC_SLOTS - node->node_count; 1302 slots = (void **)&node->slot[node->node_count]; 1303 max_req = min(requested, max_req); 1304 count = mt_alloc_bulk(gfp, max_req, slots); 1305 if (!count) 1306 goto nomem_bulk; 1307 1308 if (node->node_count == 0) { 1309 node->slot[0]->node_count = 0; 1310 node->slot[0]->request_count = 0; 1311 } 1312 1313 node->node_count += count; 1314 allocated += count; 1315 node = node->slot[0]; 1316 requested -= count; 1317 } 1318 mas->alloc->total = allocated; 1319 return; 1320 1321 nomem_bulk: 1322 /* Clean up potential freed allocations on bulk failure */ 1323 memset(slots, 0, max_req * sizeof(unsigned long)); 1324 nomem_one: 1325 mas_set_alloc_req(mas, requested); 1326 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1327 mas->alloc->total = allocated; 1328 mas_set_err(mas, -ENOMEM); 1329 } 1330 1331 /* 1332 * mas_free() - Free an encoded maple node 1333 * @mas: The maple state 1334 * @used: The encoded maple node to free. 1335 * 1336 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1337 * otherwise. 1338 */ 1339 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1340 { 1341 struct maple_node *tmp = mte_to_node(used); 1342 1343 if (mt_in_rcu(mas->tree)) 1344 ma_free_rcu(tmp); 1345 else 1346 mas_push_node(mas, tmp); 1347 } 1348 1349 /* 1350 * mas_node_count() - Check if enough nodes are allocated and request more if 1351 * there is not enough nodes. 1352 * @mas: The maple state 1353 * @count: The number of nodes needed 1354 * @gfp: the gfp flags 1355 */ 1356 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1357 { 1358 unsigned long allocated = mas_allocated(mas); 1359 1360 if (allocated < count) { 1361 mas_set_alloc_req(mas, count - allocated); 1362 mas_alloc_nodes(mas, gfp); 1363 } 1364 } 1365 1366 /* 1367 * mas_node_count() - Check if enough nodes are allocated and request more if 1368 * there is not enough nodes. 1369 * @mas: The maple state 1370 * @count: The number of nodes needed 1371 * 1372 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1373 */ 1374 static void mas_node_count(struct ma_state *mas, int count) 1375 { 1376 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1377 } 1378 1379 /* 1380 * mas_start() - Sets up maple state for operations. 1381 * @mas: The maple state. 1382 * 1383 * If mas->node == MAS_START, then set the min, max and depth to 1384 * defaults. 1385 * 1386 * Return: 1387 * - If mas->node is an error or not MAS_START, return NULL. 1388 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1389 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1390 * - If it's a tree: NULL & mas->node == safe root node. 1391 */ 1392 static inline struct maple_enode *mas_start(struct ma_state *mas) 1393 { 1394 if (likely(mas_is_start(mas))) { 1395 struct maple_enode *root; 1396 1397 mas->min = 0; 1398 mas->max = ULONG_MAX; 1399 1400 retry: 1401 mas->depth = 0; 1402 root = mas_root(mas); 1403 /* Tree with nodes */ 1404 if (likely(xa_is_node(root))) { 1405 mas->depth = 1; 1406 mas->node = mte_safe_root(root); 1407 mas->offset = 0; 1408 if (mte_dead_node(mas->node)) 1409 goto retry; 1410 1411 return NULL; 1412 } 1413 1414 /* empty tree */ 1415 if (unlikely(!root)) { 1416 mas->node = MAS_NONE; 1417 mas->offset = MAPLE_NODE_SLOTS; 1418 return NULL; 1419 } 1420 1421 /* Single entry tree */ 1422 mas->node = MAS_ROOT; 1423 mas->offset = MAPLE_NODE_SLOTS; 1424 1425 /* Single entry tree. */ 1426 if (mas->index > 0) 1427 return NULL; 1428 1429 return root; 1430 } 1431 1432 return NULL; 1433 } 1434 1435 /* 1436 * ma_data_end() - Find the end of the data in a node. 1437 * @node: The maple node 1438 * @type: The maple node type 1439 * @pivots: The array of pivots in the node 1440 * @max: The maximum value in the node 1441 * 1442 * Uses metadata to find the end of the data when possible. 1443 * Return: The zero indexed last slot with data (may be null). 1444 */ 1445 static inline unsigned char ma_data_end(struct maple_node *node, 1446 enum maple_type type, 1447 unsigned long *pivots, 1448 unsigned long max) 1449 { 1450 unsigned char offset; 1451 1452 if (!pivots) 1453 return 0; 1454 1455 if (type == maple_arange_64) 1456 return ma_meta_end(node, type); 1457 1458 offset = mt_pivots[type] - 1; 1459 if (likely(!pivots[offset])) 1460 return ma_meta_end(node, type); 1461 1462 if (likely(pivots[offset] == max)) 1463 return offset; 1464 1465 return mt_pivots[type]; 1466 } 1467 1468 /* 1469 * mas_data_end() - Find the end of the data (slot). 1470 * @mas: the maple state 1471 * 1472 * This method is optimized to check the metadata of a node if the node type 1473 * supports data end metadata. 1474 * 1475 * Return: The zero indexed last slot with data (may be null). 1476 */ 1477 static inline unsigned char mas_data_end(struct ma_state *mas) 1478 { 1479 enum maple_type type; 1480 struct maple_node *node; 1481 unsigned char offset; 1482 unsigned long *pivots; 1483 1484 type = mte_node_type(mas->node); 1485 node = mas_mn(mas); 1486 if (type == maple_arange_64) 1487 return ma_meta_end(node, type); 1488 1489 pivots = ma_pivots(node, type); 1490 if (unlikely(ma_dead_node(node))) 1491 return 0; 1492 1493 offset = mt_pivots[type] - 1; 1494 if (likely(!pivots[offset])) 1495 return ma_meta_end(node, type); 1496 1497 if (likely(pivots[offset] == mas->max)) 1498 return offset; 1499 1500 return mt_pivots[type]; 1501 } 1502 1503 /* 1504 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1505 * @mas - the maple state 1506 * 1507 * Return: The maximum gap in the leaf. 1508 */ 1509 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1510 { 1511 enum maple_type mt; 1512 unsigned long pstart, gap, max_gap; 1513 struct maple_node *mn; 1514 unsigned long *pivots; 1515 void __rcu **slots; 1516 unsigned char i; 1517 unsigned char max_piv; 1518 1519 mt = mte_node_type(mas->node); 1520 mn = mas_mn(mas); 1521 slots = ma_slots(mn, mt); 1522 max_gap = 0; 1523 if (unlikely(ma_is_dense(mt))) { 1524 gap = 0; 1525 for (i = 0; i < mt_slots[mt]; i++) { 1526 if (slots[i]) { 1527 if (gap > max_gap) 1528 max_gap = gap; 1529 gap = 0; 1530 } else { 1531 gap++; 1532 } 1533 } 1534 if (gap > max_gap) 1535 max_gap = gap; 1536 return max_gap; 1537 } 1538 1539 /* 1540 * Check the first implied pivot optimizes the loop below and slot 1 may 1541 * be skipped if there is a gap in slot 0. 1542 */ 1543 pivots = ma_pivots(mn, mt); 1544 if (likely(!slots[0])) { 1545 max_gap = pivots[0] - mas->min + 1; 1546 i = 2; 1547 } else { 1548 i = 1; 1549 } 1550 1551 /* reduce max_piv as the special case is checked before the loop */ 1552 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1553 /* 1554 * Check end implied pivot which can only be a gap on the right most 1555 * node. 1556 */ 1557 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1558 gap = ULONG_MAX - pivots[max_piv]; 1559 if (gap > max_gap) 1560 max_gap = gap; 1561 } 1562 1563 for (; i <= max_piv; i++) { 1564 /* data == no gap. */ 1565 if (likely(slots[i])) 1566 continue; 1567 1568 pstart = pivots[i - 1]; 1569 gap = pivots[i] - pstart; 1570 if (gap > max_gap) 1571 max_gap = gap; 1572 1573 /* There cannot be two gaps in a row. */ 1574 i++; 1575 } 1576 return max_gap; 1577 } 1578 1579 /* 1580 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1581 * @node: The maple node 1582 * @gaps: The pointer to the gaps 1583 * @mt: The maple node type 1584 * @*off: Pointer to store the offset location of the gap. 1585 * 1586 * Uses the metadata data end to scan backwards across set gaps. 1587 * 1588 * Return: The maximum gap value 1589 */ 1590 static inline unsigned long 1591 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1592 unsigned char *off) 1593 { 1594 unsigned char offset, i; 1595 unsigned long max_gap = 0; 1596 1597 i = offset = ma_meta_end(node, mt); 1598 do { 1599 if (gaps[i] > max_gap) { 1600 max_gap = gaps[i]; 1601 offset = i; 1602 } 1603 } while (i--); 1604 1605 *off = offset; 1606 return max_gap; 1607 } 1608 1609 /* 1610 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1611 * @mas: The maple state. 1612 * 1613 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. 1614 * 1615 * Return: The gap value. 1616 */ 1617 static inline unsigned long mas_max_gap(struct ma_state *mas) 1618 { 1619 unsigned long *gaps; 1620 unsigned char offset; 1621 enum maple_type mt; 1622 struct maple_node *node; 1623 1624 mt = mte_node_type(mas->node); 1625 if (ma_is_leaf(mt)) 1626 return mas_leaf_max_gap(mas); 1627 1628 node = mas_mn(mas); 1629 MAS_BUG_ON(mas, mt != maple_arange_64); 1630 offset = ma_meta_gap(node, mt); 1631 if (offset == MAPLE_ARANGE64_META_MAX) 1632 return 0; 1633 1634 gaps = ma_gaps(node, mt); 1635 return gaps[offset]; 1636 } 1637 1638 /* 1639 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1640 * @mas: The maple state 1641 * @offset: The gap offset in the parent to set 1642 * @new: The new gap value. 1643 * 1644 * Set the parent gap then continue to set the gap upwards, using the metadata 1645 * of the parent to see if it is necessary to check the node above. 1646 */ 1647 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1648 unsigned long new) 1649 { 1650 unsigned long meta_gap = 0; 1651 struct maple_node *pnode; 1652 struct maple_enode *penode; 1653 unsigned long *pgaps; 1654 unsigned char meta_offset; 1655 enum maple_type pmt; 1656 1657 pnode = mte_parent(mas->node); 1658 pmt = mas_parent_type(mas, mas->node); 1659 penode = mt_mk_node(pnode, pmt); 1660 pgaps = ma_gaps(pnode, pmt); 1661 1662 ascend: 1663 MAS_BUG_ON(mas, pmt != maple_arange_64); 1664 meta_offset = ma_meta_gap(pnode, pmt); 1665 if (meta_offset == MAPLE_ARANGE64_META_MAX) 1666 meta_gap = 0; 1667 else 1668 meta_gap = pgaps[meta_offset]; 1669 1670 pgaps[offset] = new; 1671 1672 if (meta_gap == new) 1673 return; 1674 1675 if (offset != meta_offset) { 1676 if (meta_gap > new) 1677 return; 1678 1679 ma_set_meta_gap(pnode, pmt, offset); 1680 } else if (new < meta_gap) { 1681 meta_offset = 15; 1682 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1683 ma_set_meta_gap(pnode, pmt, meta_offset); 1684 } 1685 1686 if (ma_is_root(pnode)) 1687 return; 1688 1689 /* Go to the parent node. */ 1690 pnode = mte_parent(penode); 1691 pmt = mas_parent_type(mas, penode); 1692 pgaps = ma_gaps(pnode, pmt); 1693 offset = mte_parent_slot(penode); 1694 penode = mt_mk_node(pnode, pmt); 1695 goto ascend; 1696 } 1697 1698 /* 1699 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1700 * @mas - the maple state. 1701 */ 1702 static inline void mas_update_gap(struct ma_state *mas) 1703 { 1704 unsigned char pslot; 1705 unsigned long p_gap; 1706 unsigned long max_gap; 1707 1708 if (!mt_is_alloc(mas->tree)) 1709 return; 1710 1711 if (mte_is_root(mas->node)) 1712 return; 1713 1714 max_gap = mas_max_gap(mas); 1715 1716 pslot = mte_parent_slot(mas->node); 1717 p_gap = ma_gaps(mte_parent(mas->node), 1718 mas_parent_type(mas, mas->node))[pslot]; 1719 1720 if (p_gap != max_gap) 1721 mas_parent_gap(mas, pslot, max_gap); 1722 } 1723 1724 /* 1725 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1726 * @parent with the slot encoded. 1727 * @mas - the maple state (for the tree) 1728 * @parent - the maple encoded node containing the children. 1729 */ 1730 static inline void mas_adopt_children(struct ma_state *mas, 1731 struct maple_enode *parent) 1732 { 1733 enum maple_type type = mte_node_type(parent); 1734 struct maple_node *node = mas_mn(mas); 1735 void __rcu **slots = ma_slots(node, type); 1736 unsigned long *pivots = ma_pivots(node, type); 1737 struct maple_enode *child; 1738 unsigned char offset; 1739 1740 offset = ma_data_end(node, type, pivots, mas->max); 1741 do { 1742 child = mas_slot_locked(mas, slots, offset); 1743 mas_set_parent(mas, child, parent, offset); 1744 } while (offset--); 1745 } 1746 1747 /* 1748 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1749 * parent encoding to locate the maple node in the tree. 1750 * @mas - the ma_state to use for operations. 1751 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1752 * leave the node (true) and handle the adoption and free elsewhere. 1753 */ 1754 static inline void mas_replace(struct ma_state *mas, bool advanced) 1755 __must_hold(mas->tree->ma_lock) 1756 { 1757 struct maple_node *mn = mas_mn(mas); 1758 struct maple_enode *old_enode; 1759 unsigned char offset = 0; 1760 void __rcu **slots = NULL; 1761 1762 if (ma_is_root(mn)) { 1763 old_enode = mas_root_locked(mas); 1764 } else { 1765 offset = mte_parent_slot(mas->node); 1766 slots = ma_slots(mte_parent(mas->node), 1767 mas_parent_type(mas, mas->node)); 1768 old_enode = mas_slot_locked(mas, slots, offset); 1769 } 1770 1771 if (!advanced && !mte_is_leaf(mas->node)) 1772 mas_adopt_children(mas, mas->node); 1773 1774 if (mte_is_root(mas->node)) { 1775 mn->parent = ma_parent_ptr( 1776 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1777 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1778 mas_set_height(mas); 1779 } else { 1780 rcu_assign_pointer(slots[offset], mas->node); 1781 } 1782 1783 if (!advanced) { 1784 mte_set_node_dead(old_enode); 1785 mas_free(mas, old_enode); 1786 } 1787 } 1788 1789 /* 1790 * mas_new_child() - Find the new child of a node. 1791 * @mas: the maple state 1792 * @child: the maple state to store the child. 1793 */ 1794 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1795 __must_hold(mas->tree->ma_lock) 1796 { 1797 enum maple_type mt; 1798 unsigned char offset; 1799 unsigned char end; 1800 unsigned long *pivots; 1801 struct maple_enode *entry; 1802 struct maple_node *node; 1803 void __rcu **slots; 1804 1805 mt = mte_node_type(mas->node); 1806 node = mas_mn(mas); 1807 slots = ma_slots(node, mt); 1808 pivots = ma_pivots(node, mt); 1809 end = ma_data_end(node, mt, pivots, mas->max); 1810 for (offset = mas->offset; offset <= end; offset++) { 1811 entry = mas_slot_locked(mas, slots, offset); 1812 if (mte_parent(entry) == node) { 1813 *child = *mas; 1814 mas->offset = offset + 1; 1815 child->offset = offset; 1816 mas_descend(child); 1817 child->offset = 0; 1818 return true; 1819 } 1820 } 1821 return false; 1822 } 1823 1824 /* 1825 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1826 * old data or set b_node->b_end. 1827 * @b_node: the maple_big_node 1828 * @shift: the shift count 1829 */ 1830 static inline void mab_shift_right(struct maple_big_node *b_node, 1831 unsigned char shift) 1832 { 1833 unsigned long size = b_node->b_end * sizeof(unsigned long); 1834 1835 memmove(b_node->pivot + shift, b_node->pivot, size); 1836 memmove(b_node->slot + shift, b_node->slot, size); 1837 if (b_node->type == maple_arange_64) 1838 memmove(b_node->gap + shift, b_node->gap, size); 1839 } 1840 1841 /* 1842 * mab_middle_node() - Check if a middle node is needed (unlikely) 1843 * @b_node: the maple_big_node that contains the data. 1844 * @size: the amount of data in the b_node 1845 * @split: the potential split location 1846 * @slot_count: the size that can be stored in a single node being considered. 1847 * 1848 * Return: true if a middle node is required. 1849 */ 1850 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1851 unsigned char slot_count) 1852 { 1853 unsigned char size = b_node->b_end; 1854 1855 if (size >= 2 * slot_count) 1856 return true; 1857 1858 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1859 return true; 1860 1861 return false; 1862 } 1863 1864 /* 1865 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1866 * @b_node: the maple_big_node with the data 1867 * @split: the suggested split location 1868 * @slot_count: the number of slots in the node being considered. 1869 * 1870 * Return: the split location. 1871 */ 1872 static inline int mab_no_null_split(struct maple_big_node *b_node, 1873 unsigned char split, unsigned char slot_count) 1874 { 1875 if (!b_node->slot[split]) { 1876 /* 1877 * If the split is less than the max slot && the right side will 1878 * still be sufficient, then increment the split on NULL. 1879 */ 1880 if ((split < slot_count - 1) && 1881 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1882 split++; 1883 else 1884 split--; 1885 } 1886 return split; 1887 } 1888 1889 /* 1890 * mab_calc_split() - Calculate the split location and if there needs to be two 1891 * splits. 1892 * @bn: The maple_big_node with the data 1893 * @mid_split: The second split, if required. 0 otherwise. 1894 * 1895 * Return: The first split location. The middle split is set in @mid_split. 1896 */ 1897 static inline int mab_calc_split(struct ma_state *mas, 1898 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1899 { 1900 unsigned char b_end = bn->b_end; 1901 int split = b_end / 2; /* Assume equal split. */ 1902 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1903 1904 /* 1905 * To support gap tracking, all NULL entries are kept together and a node cannot 1906 * end on a NULL entry, with the exception of the left-most leaf. The 1907 * limitation means that the split of a node must be checked for this condition 1908 * and be able to put more data in one direction or the other. 1909 */ 1910 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1911 *mid_split = 0; 1912 split = b_end - mt_min_slots[bn->type]; 1913 1914 if (!ma_is_leaf(bn->type)) 1915 return split; 1916 1917 mas->mas_flags |= MA_STATE_REBALANCE; 1918 if (!bn->slot[split]) 1919 split--; 1920 return split; 1921 } 1922 1923 /* 1924 * Although extremely rare, it is possible to enter what is known as the 3-way 1925 * split scenario. The 3-way split comes about by means of a store of a range 1926 * that overwrites the end and beginning of two full nodes. The result is a set 1927 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1928 * also be located in different parent nodes which are also full. This can 1929 * carry upwards all the way to the root in the worst case. 1930 */ 1931 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1932 split = b_end / 3; 1933 *mid_split = split * 2; 1934 } else { 1935 slot_min = mt_min_slots[bn->type]; 1936 1937 *mid_split = 0; 1938 /* 1939 * Avoid having a range less than the slot count unless it 1940 * causes one node to be deficient. 1941 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1942 */ 1943 while ((split < slot_count - 1) && 1944 ((bn->pivot[split] - min) < slot_count - 1) && 1945 (b_end - split > slot_min)) 1946 split++; 1947 } 1948 1949 /* Avoid ending a node on a NULL entry */ 1950 split = mab_no_null_split(bn, split, slot_count); 1951 1952 if (unlikely(*mid_split)) 1953 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1954 1955 return split; 1956 } 1957 1958 /* 1959 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1960 * and set @b_node->b_end to the next free slot. 1961 * @mas: The maple state 1962 * @mas_start: The starting slot to copy 1963 * @mas_end: The end slot to copy (inclusively) 1964 * @b_node: The maple_big_node to place the data 1965 * @mab_start: The starting location in maple_big_node to store the data. 1966 */ 1967 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1968 unsigned char mas_end, struct maple_big_node *b_node, 1969 unsigned char mab_start) 1970 { 1971 enum maple_type mt; 1972 struct maple_node *node; 1973 void __rcu **slots; 1974 unsigned long *pivots, *gaps; 1975 int i = mas_start, j = mab_start; 1976 unsigned char piv_end; 1977 1978 node = mas_mn(mas); 1979 mt = mte_node_type(mas->node); 1980 pivots = ma_pivots(node, mt); 1981 if (!i) { 1982 b_node->pivot[j] = pivots[i++]; 1983 if (unlikely(i > mas_end)) 1984 goto complete; 1985 j++; 1986 } 1987 1988 piv_end = min(mas_end, mt_pivots[mt]); 1989 for (; i < piv_end; i++, j++) { 1990 b_node->pivot[j] = pivots[i]; 1991 if (unlikely(!b_node->pivot[j])) 1992 break; 1993 1994 if (unlikely(mas->max == b_node->pivot[j])) 1995 goto complete; 1996 } 1997 1998 if (likely(i <= mas_end)) 1999 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 2000 2001 complete: 2002 b_node->b_end = ++j; 2003 j -= mab_start; 2004 slots = ma_slots(node, mt); 2005 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 2006 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 2007 gaps = ma_gaps(node, mt); 2008 memcpy(b_node->gap + mab_start, gaps + mas_start, 2009 sizeof(unsigned long) * j); 2010 } 2011 } 2012 2013 /* 2014 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 2015 * @mas: The maple state 2016 * @node: The maple node 2017 * @pivots: pointer to the maple node pivots 2018 * @mt: The maple type 2019 * @end: The assumed end 2020 * 2021 * Note, end may be incremented within this function but not modified at the 2022 * source. This is fine since the metadata is the last thing to be stored in a 2023 * node during a write. 2024 */ 2025 static inline void mas_leaf_set_meta(struct ma_state *mas, 2026 struct maple_node *node, unsigned long *pivots, 2027 enum maple_type mt, unsigned char end) 2028 { 2029 /* There is no room for metadata already */ 2030 if (mt_pivots[mt] <= end) 2031 return; 2032 2033 if (pivots[end] && pivots[end] < mas->max) 2034 end++; 2035 2036 if (end < mt_slots[mt] - 1) 2037 ma_set_meta(node, mt, 0, end); 2038 } 2039 2040 /* 2041 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 2042 * @b_node: the maple_big_node that has the data 2043 * @mab_start: the start location in @b_node. 2044 * @mab_end: The end location in @b_node (inclusively) 2045 * @mas: The maple state with the maple encoded node. 2046 */ 2047 static inline void mab_mas_cp(struct maple_big_node *b_node, 2048 unsigned char mab_start, unsigned char mab_end, 2049 struct ma_state *mas, bool new_max) 2050 { 2051 int i, j = 0; 2052 enum maple_type mt = mte_node_type(mas->node); 2053 struct maple_node *node = mte_to_node(mas->node); 2054 void __rcu **slots = ma_slots(node, mt); 2055 unsigned long *pivots = ma_pivots(node, mt); 2056 unsigned long *gaps = NULL; 2057 unsigned char end; 2058 2059 if (mab_end - mab_start > mt_pivots[mt]) 2060 mab_end--; 2061 2062 if (!pivots[mt_pivots[mt] - 1]) 2063 slots[mt_pivots[mt]] = NULL; 2064 2065 i = mab_start; 2066 do { 2067 pivots[j++] = b_node->pivot[i++]; 2068 } while (i <= mab_end && likely(b_node->pivot[i])); 2069 2070 memcpy(slots, b_node->slot + mab_start, 2071 sizeof(void *) * (i - mab_start)); 2072 2073 if (new_max) 2074 mas->max = b_node->pivot[i - 1]; 2075 2076 end = j - 1; 2077 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2078 unsigned long max_gap = 0; 2079 unsigned char offset = 15; 2080 2081 gaps = ma_gaps(node, mt); 2082 do { 2083 gaps[--j] = b_node->gap[--i]; 2084 if (gaps[j] > max_gap) { 2085 offset = j; 2086 max_gap = gaps[j]; 2087 } 2088 } while (j); 2089 2090 ma_set_meta(node, mt, offset, end); 2091 } else { 2092 mas_leaf_set_meta(mas, node, pivots, mt, end); 2093 } 2094 } 2095 2096 /* 2097 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2098 * @mas: the maple state with the maple encoded node of the sub-tree. 2099 * 2100 * Descend through a sub-tree and adopt children who do not have the correct 2101 * parents set. Follow the parents which have the correct parents as they are 2102 * the new entries which need to be followed to find other incorrectly set 2103 * parents. 2104 */ 2105 static inline void mas_descend_adopt(struct ma_state *mas) 2106 { 2107 struct ma_state list[3], next[3]; 2108 int i, n; 2109 2110 /* 2111 * At each level there may be up to 3 correct parent pointers which indicates 2112 * the new nodes which need to be walked to find any new nodes at a lower level. 2113 */ 2114 2115 for (i = 0; i < 3; i++) { 2116 list[i] = *mas; 2117 list[i].offset = 0; 2118 next[i].offset = 0; 2119 } 2120 next[0] = *mas; 2121 2122 while (!mte_is_leaf(list[0].node)) { 2123 n = 0; 2124 for (i = 0; i < 3; i++) { 2125 if (mas_is_none(&list[i])) 2126 continue; 2127 2128 if (i && list[i-1].node == list[i].node) 2129 continue; 2130 2131 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2132 n++; 2133 2134 mas_adopt_children(&list[i], list[i].node); 2135 } 2136 2137 while (n < 3) 2138 next[n++].node = MAS_NONE; 2139 2140 /* descend by setting the list to the children */ 2141 for (i = 0; i < 3; i++) 2142 list[i] = next[i]; 2143 } 2144 } 2145 2146 /* 2147 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2148 * @mas: The maple state 2149 * @end: The maple node end 2150 * @mt: The maple node type 2151 */ 2152 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2153 enum maple_type mt) 2154 { 2155 if (!(mas->mas_flags & MA_STATE_BULK)) 2156 return; 2157 2158 if (mte_is_root(mas->node)) 2159 return; 2160 2161 if (end > mt_min_slots[mt]) { 2162 mas->mas_flags &= ~MA_STATE_REBALANCE; 2163 return; 2164 } 2165 } 2166 2167 /* 2168 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2169 * data from a maple encoded node. 2170 * @wr_mas: the maple write state 2171 * @b_node: the maple_big_node to fill with data 2172 * @offset_end: the offset to end copying 2173 * 2174 * Return: The actual end of the data stored in @b_node 2175 */ 2176 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, 2177 struct maple_big_node *b_node, unsigned char offset_end) 2178 { 2179 unsigned char slot; 2180 unsigned char b_end; 2181 /* Possible underflow of piv will wrap back to 0 before use. */ 2182 unsigned long piv; 2183 struct ma_state *mas = wr_mas->mas; 2184 2185 b_node->type = wr_mas->type; 2186 b_end = 0; 2187 slot = mas->offset; 2188 if (slot) { 2189 /* Copy start data up to insert. */ 2190 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2191 b_end = b_node->b_end; 2192 piv = b_node->pivot[b_end - 1]; 2193 } else 2194 piv = mas->min - 1; 2195 2196 if (piv + 1 < mas->index) { 2197 /* Handle range starting after old range */ 2198 b_node->slot[b_end] = wr_mas->content; 2199 if (!wr_mas->content) 2200 b_node->gap[b_end] = mas->index - 1 - piv; 2201 b_node->pivot[b_end++] = mas->index - 1; 2202 } 2203 2204 /* Store the new entry. */ 2205 mas->offset = b_end; 2206 b_node->slot[b_end] = wr_mas->entry; 2207 b_node->pivot[b_end] = mas->last; 2208 2209 /* Appended. */ 2210 if (mas->last >= mas->max) 2211 goto b_end; 2212 2213 /* Handle new range ending before old range ends */ 2214 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2215 if (piv > mas->last) { 2216 if (piv == ULONG_MAX) 2217 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2218 2219 if (offset_end != slot) 2220 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2221 offset_end); 2222 2223 b_node->slot[++b_end] = wr_mas->content; 2224 if (!wr_mas->content) 2225 b_node->gap[b_end] = piv - mas->last + 1; 2226 b_node->pivot[b_end] = piv; 2227 } 2228 2229 slot = offset_end + 1; 2230 if (slot > wr_mas->node_end) 2231 goto b_end; 2232 2233 /* Copy end data to the end of the node. */ 2234 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2235 b_node->b_end--; 2236 return; 2237 2238 b_end: 2239 b_node->b_end = b_end; 2240 } 2241 2242 /* 2243 * mas_prev_sibling() - Find the previous node with the same parent. 2244 * @mas: the maple state 2245 * 2246 * Return: True if there is a previous sibling, false otherwise. 2247 */ 2248 static inline bool mas_prev_sibling(struct ma_state *mas) 2249 { 2250 unsigned int p_slot = mte_parent_slot(mas->node); 2251 2252 if (mte_is_root(mas->node)) 2253 return false; 2254 2255 if (!p_slot) 2256 return false; 2257 2258 mas_ascend(mas); 2259 mas->offset = p_slot - 1; 2260 mas_descend(mas); 2261 return true; 2262 } 2263 2264 /* 2265 * mas_next_sibling() - Find the next node with the same parent. 2266 * @mas: the maple state 2267 * 2268 * Return: true if there is a next sibling, false otherwise. 2269 */ 2270 static inline bool mas_next_sibling(struct ma_state *mas) 2271 { 2272 MA_STATE(parent, mas->tree, mas->index, mas->last); 2273 2274 if (mte_is_root(mas->node)) 2275 return false; 2276 2277 parent = *mas; 2278 mas_ascend(&parent); 2279 parent.offset = mte_parent_slot(mas->node) + 1; 2280 if (parent.offset > mas_data_end(&parent)) 2281 return false; 2282 2283 *mas = parent; 2284 mas_descend(mas); 2285 return true; 2286 } 2287 2288 /* 2289 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2290 * @enode: The encoded maple node. 2291 * 2292 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2293 * 2294 * Return: @enode or MAS_NONE 2295 */ 2296 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2297 { 2298 if (enode) 2299 return enode; 2300 2301 return ma_enode_ptr(MAS_NONE); 2302 } 2303 2304 /* 2305 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2306 * @wr_mas: The maple write state 2307 * 2308 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2309 */ 2310 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2311 { 2312 struct ma_state *mas = wr_mas->mas; 2313 unsigned char count, offset; 2314 2315 if (unlikely(ma_is_dense(wr_mas->type))) { 2316 wr_mas->r_max = wr_mas->r_min = mas->index; 2317 mas->offset = mas->index = mas->min; 2318 return; 2319 } 2320 2321 wr_mas->node = mas_mn(wr_mas->mas); 2322 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2323 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2324 wr_mas->pivots, mas->max); 2325 offset = mas->offset; 2326 2327 while (offset < count && mas->index > wr_mas->pivots[offset]) 2328 offset++; 2329 2330 wr_mas->r_max = offset < count ? wr_mas->pivots[offset] : mas->max; 2331 wr_mas->r_min = mas_safe_min(mas, wr_mas->pivots, offset); 2332 wr_mas->offset_end = mas->offset = offset; 2333 } 2334 2335 /* 2336 * mas_topiary_range() - Add a range of slots to the topiary. 2337 * @mas: The maple state 2338 * @destroy: The topiary to add the slots (usually destroy) 2339 * @start: The starting slot inclusively 2340 * @end: The end slot inclusively 2341 */ 2342 static inline void mas_topiary_range(struct ma_state *mas, 2343 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2344 { 2345 void __rcu **slots; 2346 unsigned char offset; 2347 2348 MAS_BUG_ON(mas, mte_is_leaf(mas->node)); 2349 2350 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2351 for (offset = start; offset <= end; offset++) { 2352 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2353 2354 if (mte_dead_node(enode)) 2355 continue; 2356 2357 mat_add(destroy, enode); 2358 } 2359 } 2360 2361 /* 2362 * mast_topiary() - Add the portions of the tree to the removal list; either to 2363 * be freed or discarded (destroy walk). 2364 * @mast: The maple_subtree_state. 2365 */ 2366 static inline void mast_topiary(struct maple_subtree_state *mast) 2367 { 2368 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2369 unsigned char r_start, r_end; 2370 unsigned char l_start, l_end; 2371 void __rcu **l_slots, **r_slots; 2372 2373 wr_mas.type = mte_node_type(mast->orig_l->node); 2374 mast->orig_l->index = mast->orig_l->last; 2375 mas_wr_node_walk(&wr_mas); 2376 l_start = mast->orig_l->offset + 1; 2377 l_end = mas_data_end(mast->orig_l); 2378 r_start = 0; 2379 r_end = mast->orig_r->offset; 2380 2381 if (r_end) 2382 r_end--; 2383 2384 l_slots = ma_slots(mas_mn(mast->orig_l), 2385 mte_node_type(mast->orig_l->node)); 2386 2387 r_slots = ma_slots(mas_mn(mast->orig_r), 2388 mte_node_type(mast->orig_r->node)); 2389 2390 if ((l_start < l_end) && 2391 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2392 l_start++; 2393 } 2394 2395 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2396 if (r_end) 2397 r_end--; 2398 } 2399 2400 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2401 return; 2402 2403 /* At the node where left and right sides meet, add the parts between */ 2404 if (mast->orig_l->node == mast->orig_r->node) { 2405 return mas_topiary_range(mast->orig_l, mast->destroy, 2406 l_start, r_end); 2407 } 2408 2409 /* mast->orig_r is different and consumed. */ 2410 if (mte_is_leaf(mast->orig_r->node)) 2411 return; 2412 2413 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2414 l_end--; 2415 2416 2417 if (l_start <= l_end) 2418 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2419 2420 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2421 r_start++; 2422 2423 if (r_start <= r_end) 2424 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2425 } 2426 2427 /* 2428 * mast_rebalance_next() - Rebalance against the next node 2429 * @mast: The maple subtree state 2430 * @old_r: The encoded maple node to the right (next node). 2431 */ 2432 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2433 { 2434 unsigned char b_end = mast->bn->b_end; 2435 2436 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2437 mast->bn, b_end); 2438 mast->orig_r->last = mast->orig_r->max; 2439 } 2440 2441 /* 2442 * mast_rebalance_prev() - Rebalance against the previous node 2443 * @mast: The maple subtree state 2444 * @old_l: The encoded maple node to the left (previous node) 2445 */ 2446 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2447 { 2448 unsigned char end = mas_data_end(mast->orig_l) + 1; 2449 unsigned char b_end = mast->bn->b_end; 2450 2451 mab_shift_right(mast->bn, end); 2452 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2453 mast->l->min = mast->orig_l->min; 2454 mast->orig_l->index = mast->orig_l->min; 2455 mast->bn->b_end = end + b_end; 2456 mast->l->offset += end; 2457 } 2458 2459 /* 2460 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2461 * the node to the right. Checking the nodes to the right then the left at each 2462 * level upwards until root is reached. Free and destroy as needed. 2463 * Data is copied into the @mast->bn. 2464 * @mast: The maple_subtree_state. 2465 */ 2466 static inline 2467 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2468 { 2469 struct ma_state r_tmp = *mast->orig_r; 2470 struct ma_state l_tmp = *mast->orig_l; 2471 struct maple_enode *ancestor = NULL; 2472 unsigned char start, end; 2473 unsigned char depth = 0; 2474 2475 r_tmp = *mast->orig_r; 2476 l_tmp = *mast->orig_l; 2477 do { 2478 mas_ascend(mast->orig_r); 2479 mas_ascend(mast->orig_l); 2480 depth++; 2481 if (!ancestor && 2482 (mast->orig_r->node == mast->orig_l->node)) { 2483 ancestor = mast->orig_r->node; 2484 end = mast->orig_r->offset - 1; 2485 start = mast->orig_l->offset + 1; 2486 } 2487 2488 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2489 if (!ancestor) { 2490 ancestor = mast->orig_r->node; 2491 start = 0; 2492 } 2493 2494 mast->orig_r->offset++; 2495 do { 2496 mas_descend(mast->orig_r); 2497 mast->orig_r->offset = 0; 2498 depth--; 2499 } while (depth); 2500 2501 mast_rebalance_next(mast); 2502 do { 2503 unsigned char l_off = 0; 2504 struct maple_enode *child = r_tmp.node; 2505 2506 mas_ascend(&r_tmp); 2507 if (ancestor == r_tmp.node) 2508 l_off = start; 2509 2510 if (r_tmp.offset) 2511 r_tmp.offset--; 2512 2513 if (l_off < r_tmp.offset) 2514 mas_topiary_range(&r_tmp, mast->destroy, 2515 l_off, r_tmp.offset); 2516 2517 if (l_tmp.node != child) 2518 mat_add(mast->free, child); 2519 2520 } while (r_tmp.node != ancestor); 2521 2522 *mast->orig_l = l_tmp; 2523 return true; 2524 2525 } else if (mast->orig_l->offset != 0) { 2526 if (!ancestor) { 2527 ancestor = mast->orig_l->node; 2528 end = mas_data_end(mast->orig_l); 2529 } 2530 2531 mast->orig_l->offset--; 2532 do { 2533 mas_descend(mast->orig_l); 2534 mast->orig_l->offset = 2535 mas_data_end(mast->orig_l); 2536 depth--; 2537 } while (depth); 2538 2539 mast_rebalance_prev(mast); 2540 do { 2541 unsigned char r_off; 2542 struct maple_enode *child = l_tmp.node; 2543 2544 mas_ascend(&l_tmp); 2545 if (ancestor == l_tmp.node) 2546 r_off = end; 2547 else 2548 r_off = mas_data_end(&l_tmp); 2549 2550 if (l_tmp.offset < r_off) 2551 l_tmp.offset++; 2552 2553 if (l_tmp.offset < r_off) 2554 mas_topiary_range(&l_tmp, mast->destroy, 2555 l_tmp.offset, r_off); 2556 2557 if (r_tmp.node != child) 2558 mat_add(mast->free, child); 2559 2560 } while (l_tmp.node != ancestor); 2561 2562 *mast->orig_r = r_tmp; 2563 return true; 2564 } 2565 } while (!mte_is_root(mast->orig_r->node)); 2566 2567 *mast->orig_r = r_tmp; 2568 *mast->orig_l = l_tmp; 2569 return false; 2570 } 2571 2572 /* 2573 * mast_ascend_free() - Add current original maple state nodes to the free list 2574 * and ascend. 2575 * @mast: the maple subtree state. 2576 * 2577 * Ascend the original left and right sides and add the previous nodes to the 2578 * free list. Set the slots to point to the correct location in the new nodes. 2579 */ 2580 static inline void 2581 mast_ascend_free(struct maple_subtree_state *mast) 2582 { 2583 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2584 struct maple_enode *left = mast->orig_l->node; 2585 struct maple_enode *right = mast->orig_r->node; 2586 2587 mas_ascend(mast->orig_l); 2588 mas_ascend(mast->orig_r); 2589 mat_add(mast->free, left); 2590 2591 if (left != right) 2592 mat_add(mast->free, right); 2593 2594 mast->orig_r->offset = 0; 2595 mast->orig_r->index = mast->r->max; 2596 /* last should be larger than or equal to index */ 2597 if (mast->orig_r->last < mast->orig_r->index) 2598 mast->orig_r->last = mast->orig_r->index; 2599 /* 2600 * The node may not contain the value so set slot to ensure all 2601 * of the nodes contents are freed or destroyed. 2602 */ 2603 wr_mas.type = mte_node_type(mast->orig_r->node); 2604 mas_wr_node_walk(&wr_mas); 2605 /* Set up the left side of things */ 2606 mast->orig_l->offset = 0; 2607 mast->orig_l->index = mast->l->min; 2608 wr_mas.mas = mast->orig_l; 2609 wr_mas.type = mte_node_type(mast->orig_l->node); 2610 mas_wr_node_walk(&wr_mas); 2611 2612 mast->bn->type = wr_mas.type; 2613 } 2614 2615 /* 2616 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2617 * @mas: the maple state with the allocations. 2618 * @b_node: the maple_big_node with the type encoding. 2619 * 2620 * Use the node type from the maple_big_node to allocate a new node from the 2621 * ma_state. This function exists mainly for code readability. 2622 * 2623 * Return: A new maple encoded node 2624 */ 2625 static inline struct maple_enode 2626 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2627 { 2628 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2629 } 2630 2631 /* 2632 * mas_mab_to_node() - Set up right and middle nodes 2633 * 2634 * @mas: the maple state that contains the allocations. 2635 * @b_node: the node which contains the data. 2636 * @left: The pointer which will have the left node 2637 * @right: The pointer which may have the right node 2638 * @middle: the pointer which may have the middle node (rare) 2639 * @mid_split: the split location for the middle node 2640 * 2641 * Return: the split of left. 2642 */ 2643 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2644 struct maple_big_node *b_node, struct maple_enode **left, 2645 struct maple_enode **right, struct maple_enode **middle, 2646 unsigned char *mid_split, unsigned long min) 2647 { 2648 unsigned char split = 0; 2649 unsigned char slot_count = mt_slots[b_node->type]; 2650 2651 *left = mas_new_ma_node(mas, b_node); 2652 *right = NULL; 2653 *middle = NULL; 2654 *mid_split = 0; 2655 2656 if (b_node->b_end < slot_count) { 2657 split = b_node->b_end; 2658 } else { 2659 split = mab_calc_split(mas, b_node, mid_split, min); 2660 *right = mas_new_ma_node(mas, b_node); 2661 } 2662 2663 if (*mid_split) 2664 *middle = mas_new_ma_node(mas, b_node); 2665 2666 return split; 2667 2668 } 2669 2670 /* 2671 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2672 * pointer. 2673 * @b_node - the big node to add the entry 2674 * @mas - the maple state to get the pivot (mas->max) 2675 * @entry - the entry to add, if NULL nothing happens. 2676 */ 2677 static inline void mab_set_b_end(struct maple_big_node *b_node, 2678 struct ma_state *mas, 2679 void *entry) 2680 { 2681 if (!entry) 2682 return; 2683 2684 b_node->slot[b_node->b_end] = entry; 2685 if (mt_is_alloc(mas->tree)) 2686 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2687 b_node->pivot[b_node->b_end++] = mas->max; 2688 } 2689 2690 /* 2691 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2692 * of @mas->node to either @left or @right, depending on @slot and @split 2693 * 2694 * @mas - the maple state with the node that needs a parent 2695 * @left - possible parent 1 2696 * @right - possible parent 2 2697 * @slot - the slot the mas->node was placed 2698 * @split - the split location between @left and @right 2699 */ 2700 static inline void mas_set_split_parent(struct ma_state *mas, 2701 struct maple_enode *left, 2702 struct maple_enode *right, 2703 unsigned char *slot, unsigned char split) 2704 { 2705 if (mas_is_none(mas)) 2706 return; 2707 2708 if ((*slot) <= split) 2709 mas_set_parent(mas, mas->node, left, *slot); 2710 else if (right) 2711 mas_set_parent(mas, mas->node, right, (*slot) - split - 1); 2712 2713 (*slot)++; 2714 } 2715 2716 /* 2717 * mte_mid_split_check() - Check if the next node passes the mid-split 2718 * @**l: Pointer to left encoded maple node. 2719 * @**m: Pointer to middle encoded maple node. 2720 * @**r: Pointer to right encoded maple node. 2721 * @slot: The offset 2722 * @*split: The split location. 2723 * @mid_split: The middle split. 2724 */ 2725 static inline void mte_mid_split_check(struct maple_enode **l, 2726 struct maple_enode **r, 2727 struct maple_enode *right, 2728 unsigned char slot, 2729 unsigned char *split, 2730 unsigned char mid_split) 2731 { 2732 if (*r == right) 2733 return; 2734 2735 if (slot < mid_split) 2736 return; 2737 2738 *l = *r; 2739 *r = right; 2740 *split = mid_split; 2741 } 2742 2743 /* 2744 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2745 * is taken from @mast->l. 2746 * @mast - the maple subtree state 2747 * @left - the left node 2748 * @right - the right node 2749 * @split - the split location. 2750 */ 2751 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2752 struct maple_enode *left, 2753 struct maple_enode *middle, 2754 struct maple_enode *right, 2755 unsigned char split, 2756 unsigned char mid_split) 2757 { 2758 unsigned char slot; 2759 struct maple_enode *l = left; 2760 struct maple_enode *r = right; 2761 2762 if (mas_is_none(mast->l)) 2763 return; 2764 2765 if (middle) 2766 r = middle; 2767 2768 slot = mast->l->offset; 2769 2770 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2771 mas_set_split_parent(mast->l, l, r, &slot, split); 2772 2773 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2774 mas_set_split_parent(mast->m, l, r, &slot, split); 2775 2776 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2777 mas_set_split_parent(mast->r, l, r, &slot, split); 2778 } 2779 2780 /* 2781 * mas_wmb_replace() - Write memory barrier and replace 2782 * @mas: The maple state 2783 * @free: the maple topiary list of nodes to free 2784 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2785 * 2786 * Updates gap as necessary. 2787 */ 2788 static inline void mas_wmb_replace(struct ma_state *mas, 2789 struct ma_topiary *free, 2790 struct ma_topiary *destroy) 2791 { 2792 /* All nodes must see old data as dead prior to replacing that data */ 2793 smp_wmb(); /* Needed for RCU */ 2794 2795 /* Insert the new data in the tree */ 2796 mas_replace(mas, true); 2797 2798 if (!mte_is_leaf(mas->node)) 2799 mas_descend_adopt(mas); 2800 2801 mas_mat_free(mas, free); 2802 2803 if (destroy) 2804 mas_mat_destroy(mas, destroy); 2805 2806 if (mte_is_leaf(mas->node)) 2807 return; 2808 2809 mas_update_gap(mas); 2810 } 2811 2812 /* 2813 * mast_new_root() - Set a new tree root during subtree creation 2814 * @mast: The maple subtree state 2815 * @mas: The maple state 2816 */ 2817 static inline void mast_new_root(struct maple_subtree_state *mast, 2818 struct ma_state *mas) 2819 { 2820 mas_mn(mast->l)->parent = 2821 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2822 if (!mte_dead_node(mast->orig_l->node) && 2823 !mte_is_root(mast->orig_l->node)) { 2824 do { 2825 mast_ascend_free(mast); 2826 mast_topiary(mast); 2827 } while (!mte_is_root(mast->orig_l->node)); 2828 } 2829 if ((mast->orig_l->node != mas->node) && 2830 (mast->l->depth > mas_mt_height(mas))) { 2831 mat_add(mast->free, mas->node); 2832 } 2833 } 2834 2835 /* 2836 * mast_cp_to_nodes() - Copy data out to nodes. 2837 * @mast: The maple subtree state 2838 * @left: The left encoded maple node 2839 * @middle: The middle encoded maple node 2840 * @right: The right encoded maple node 2841 * @split: The location to split between left and (middle ? middle : right) 2842 * @mid_split: The location to split between middle and right. 2843 */ 2844 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2845 struct maple_enode *left, struct maple_enode *middle, 2846 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2847 { 2848 bool new_lmax = true; 2849 2850 mast->l->node = mte_node_or_none(left); 2851 mast->m->node = mte_node_or_none(middle); 2852 mast->r->node = mte_node_or_none(right); 2853 2854 mast->l->min = mast->orig_l->min; 2855 if (split == mast->bn->b_end) { 2856 mast->l->max = mast->orig_r->max; 2857 new_lmax = false; 2858 } 2859 2860 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2861 2862 if (middle) { 2863 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2864 mast->m->min = mast->bn->pivot[split] + 1; 2865 split = mid_split; 2866 } 2867 2868 mast->r->max = mast->orig_r->max; 2869 if (right) { 2870 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2871 mast->r->min = mast->bn->pivot[split] + 1; 2872 } 2873 } 2874 2875 /* 2876 * mast_combine_cp_left - Copy in the original left side of the tree into the 2877 * combined data set in the maple subtree state big node. 2878 * @mast: The maple subtree state 2879 */ 2880 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2881 { 2882 unsigned char l_slot = mast->orig_l->offset; 2883 2884 if (!l_slot) 2885 return; 2886 2887 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2888 } 2889 2890 /* 2891 * mast_combine_cp_right: Copy in the original right side of the tree into the 2892 * combined data set in the maple subtree state big node. 2893 * @mast: The maple subtree state 2894 */ 2895 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2896 { 2897 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2898 return; 2899 2900 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2901 mt_slot_count(mast->orig_r->node), mast->bn, 2902 mast->bn->b_end); 2903 mast->orig_r->last = mast->orig_r->max; 2904 } 2905 2906 /* 2907 * mast_sufficient: Check if the maple subtree state has enough data in the big 2908 * node to create at least one sufficient node 2909 * @mast: the maple subtree state 2910 */ 2911 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2912 { 2913 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2914 return true; 2915 2916 return false; 2917 } 2918 2919 /* 2920 * mast_overflow: Check if there is too much data in the subtree state for a 2921 * single node. 2922 * @mast: The maple subtree state 2923 */ 2924 static inline bool mast_overflow(struct maple_subtree_state *mast) 2925 { 2926 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2927 return true; 2928 2929 return false; 2930 } 2931 2932 static inline void *mtree_range_walk(struct ma_state *mas) 2933 { 2934 unsigned long *pivots; 2935 unsigned char offset; 2936 struct maple_node *node; 2937 struct maple_enode *next, *last; 2938 enum maple_type type; 2939 void __rcu **slots; 2940 unsigned char end; 2941 unsigned long max, min; 2942 unsigned long prev_max, prev_min; 2943 2944 next = mas->node; 2945 min = mas->min; 2946 max = mas->max; 2947 do { 2948 offset = 0; 2949 last = next; 2950 node = mte_to_node(next); 2951 type = mte_node_type(next); 2952 pivots = ma_pivots(node, type); 2953 end = ma_data_end(node, type, pivots, max); 2954 if (unlikely(ma_dead_node(node))) 2955 goto dead_node; 2956 2957 if (pivots[offset] >= mas->index) { 2958 prev_max = max; 2959 prev_min = min; 2960 max = pivots[offset]; 2961 goto next; 2962 } 2963 2964 do { 2965 offset++; 2966 } while ((offset < end) && (pivots[offset] < mas->index)); 2967 2968 prev_min = min; 2969 min = pivots[offset - 1] + 1; 2970 prev_max = max; 2971 if (likely(offset < end && pivots[offset])) 2972 max = pivots[offset]; 2973 2974 next: 2975 slots = ma_slots(node, type); 2976 next = mt_slot(mas->tree, slots, offset); 2977 if (unlikely(ma_dead_node(node))) 2978 goto dead_node; 2979 } while (!ma_is_leaf(type)); 2980 2981 mas->offset = offset; 2982 mas->index = min; 2983 mas->last = max; 2984 mas->min = prev_min; 2985 mas->max = prev_max; 2986 mas->node = last; 2987 return (void *)next; 2988 2989 dead_node: 2990 mas_reset(mas); 2991 return NULL; 2992 } 2993 2994 /* 2995 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2996 * @mas: The starting maple state 2997 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2998 * @count: The estimated count of iterations needed. 2999 * 3000 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 3001 * is hit. First @b_node is split into two entries which are inserted into the 3002 * next iteration of the loop. @b_node is returned populated with the final 3003 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 3004 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 3005 * to account of what has been copied into the new sub-tree. The update of 3006 * orig_l_mas->last is used in mas_consume to find the slots that will need to 3007 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 3008 * the new sub-tree in case the sub-tree becomes the full tree. 3009 * 3010 * Return: the number of elements in b_node during the last loop. 3011 */ 3012 static int mas_spanning_rebalance(struct ma_state *mas, 3013 struct maple_subtree_state *mast, unsigned char count) 3014 { 3015 unsigned char split, mid_split; 3016 unsigned char slot = 0; 3017 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 3018 3019 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 3020 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3021 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 3022 MA_TOPIARY(free, mas->tree); 3023 MA_TOPIARY(destroy, mas->tree); 3024 3025 /* 3026 * The tree needs to be rebalanced and leaves need to be kept at the same level. 3027 * Rebalancing is done by use of the ``struct maple_topiary``. 3028 */ 3029 mast->l = &l_mas; 3030 mast->m = &m_mas; 3031 mast->r = &r_mas; 3032 mast->free = &free; 3033 mast->destroy = &destroy; 3034 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 3035 3036 /* Check if this is not root and has sufficient data. */ 3037 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 3038 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 3039 mast_spanning_rebalance(mast); 3040 3041 mast->orig_l->depth = 0; 3042 3043 /* 3044 * Each level of the tree is examined and balanced, pushing data to the left or 3045 * right, or rebalancing against left or right nodes is employed to avoid 3046 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3047 * the tree is created, there may be a mix of new and old nodes. The old nodes 3048 * will have the incorrect parent pointers and currently be in two trees: the 3049 * original tree and the partially new tree. To remedy the parent pointers in 3050 * the old tree, the new data is swapped into the active tree and a walk down 3051 * the tree is performed and the parent pointers are updated. 3052 * See mas_descend_adopt() for more information.. 3053 */ 3054 while (count--) { 3055 mast->bn->b_end--; 3056 mast->bn->type = mte_node_type(mast->orig_l->node); 3057 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3058 &mid_split, mast->orig_l->min); 3059 mast_set_split_parents(mast, left, middle, right, split, 3060 mid_split); 3061 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3062 3063 /* 3064 * Copy data from next level in the tree to mast->bn from next 3065 * iteration 3066 */ 3067 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3068 mast->bn->type = mte_node_type(left); 3069 mast->orig_l->depth++; 3070 3071 /* Root already stored in l->node. */ 3072 if (mas_is_root_limits(mast->l)) 3073 goto new_root; 3074 3075 mast_ascend_free(mast); 3076 mast_combine_cp_left(mast); 3077 l_mas.offset = mast->bn->b_end; 3078 mab_set_b_end(mast->bn, &l_mas, left); 3079 mab_set_b_end(mast->bn, &m_mas, middle); 3080 mab_set_b_end(mast->bn, &r_mas, right); 3081 3082 /* Copy anything necessary out of the right node. */ 3083 mast_combine_cp_right(mast); 3084 mast_topiary(mast); 3085 mast->orig_l->last = mast->orig_l->max; 3086 3087 if (mast_sufficient(mast)) 3088 continue; 3089 3090 if (mast_overflow(mast)) 3091 continue; 3092 3093 /* May be a new root stored in mast->bn */ 3094 if (mas_is_root_limits(mast->orig_l)) 3095 break; 3096 3097 mast_spanning_rebalance(mast); 3098 3099 /* rebalancing from other nodes may require another loop. */ 3100 if (!count) 3101 count++; 3102 } 3103 3104 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3105 mte_node_type(mast->orig_l->node)); 3106 mast->orig_l->depth++; 3107 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3108 mas_set_parent(mas, left, l_mas.node, slot); 3109 if (middle) 3110 mas_set_parent(mas, middle, l_mas.node, ++slot); 3111 3112 if (right) 3113 mas_set_parent(mas, right, l_mas.node, ++slot); 3114 3115 if (mas_is_root_limits(mast->l)) { 3116 new_root: 3117 mast_new_root(mast, mas); 3118 } else { 3119 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3120 } 3121 3122 if (!mte_dead_node(mast->orig_l->node)) 3123 mat_add(&free, mast->orig_l->node); 3124 3125 mas->depth = mast->orig_l->depth; 3126 *mast->orig_l = l_mas; 3127 mte_set_node_dead(mas->node); 3128 3129 /* Set up mas for insertion. */ 3130 mast->orig_l->depth = mas->depth; 3131 mast->orig_l->alloc = mas->alloc; 3132 *mas = *mast->orig_l; 3133 mas_wmb_replace(mas, &free, &destroy); 3134 mtree_range_walk(mas); 3135 return mast->bn->b_end; 3136 } 3137 3138 /* 3139 * mas_rebalance() - Rebalance a given node. 3140 * @mas: The maple state 3141 * @b_node: The big maple node. 3142 * 3143 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3144 * Continue upwards until tree is sufficient. 3145 * 3146 * Return: the number of elements in b_node during the last loop. 3147 */ 3148 static inline int mas_rebalance(struct ma_state *mas, 3149 struct maple_big_node *b_node) 3150 { 3151 char empty_count = mas_mt_height(mas); 3152 struct maple_subtree_state mast; 3153 unsigned char shift, b_end = ++b_node->b_end; 3154 3155 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3156 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3157 3158 trace_ma_op(__func__, mas); 3159 3160 /* 3161 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3162 * against the node to the right if it exists, otherwise the node to the 3163 * left of this node is rebalanced against this node. If rebalancing 3164 * causes just one node to be produced instead of two, then the parent 3165 * is also examined and rebalanced if it is insufficient. Every level 3166 * tries to combine the data in the same way. If one node contains the 3167 * entire range of the tree, then that node is used as a new root node. 3168 */ 3169 mas_node_count(mas, 1 + empty_count * 3); 3170 if (mas_is_err(mas)) 3171 return 0; 3172 3173 mast.orig_l = &l_mas; 3174 mast.orig_r = &r_mas; 3175 mast.bn = b_node; 3176 mast.bn->type = mte_node_type(mas->node); 3177 3178 l_mas = r_mas = *mas; 3179 3180 if (mas_next_sibling(&r_mas)) { 3181 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3182 r_mas.last = r_mas.index = r_mas.max; 3183 } else { 3184 mas_prev_sibling(&l_mas); 3185 shift = mas_data_end(&l_mas) + 1; 3186 mab_shift_right(b_node, shift); 3187 mas->offset += shift; 3188 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3189 b_node->b_end = shift + b_end; 3190 l_mas.index = l_mas.last = l_mas.min; 3191 } 3192 3193 return mas_spanning_rebalance(mas, &mast, empty_count); 3194 } 3195 3196 /* 3197 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3198 * state. 3199 * @mas: The maple state 3200 * @end: The end of the left-most node. 3201 * 3202 * During a mass-insert event (such as forking), it may be necessary to 3203 * rebalance the left-most node when it is not sufficient. 3204 */ 3205 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3206 { 3207 enum maple_type mt = mte_node_type(mas->node); 3208 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3209 struct maple_enode *eparent; 3210 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3211 void __rcu **l_slots, **slots; 3212 unsigned long *l_pivs, *pivs, gap; 3213 bool in_rcu = mt_in_rcu(mas->tree); 3214 3215 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3216 3217 l_mas = *mas; 3218 mas_prev_sibling(&l_mas); 3219 3220 /* set up node. */ 3221 if (in_rcu) { 3222 /* Allocate for both left and right as well as parent. */ 3223 mas_node_count(mas, 3); 3224 if (mas_is_err(mas)) 3225 return; 3226 3227 newnode = mas_pop_node(mas); 3228 } else { 3229 newnode = &reuse; 3230 } 3231 3232 node = mas_mn(mas); 3233 newnode->parent = node->parent; 3234 slots = ma_slots(newnode, mt); 3235 pivs = ma_pivots(newnode, mt); 3236 left = mas_mn(&l_mas); 3237 l_slots = ma_slots(left, mt); 3238 l_pivs = ma_pivots(left, mt); 3239 if (!l_slots[split]) 3240 split++; 3241 tmp = mas_data_end(&l_mas) - split; 3242 3243 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3244 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3245 pivs[tmp] = l_mas.max; 3246 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3247 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3248 3249 l_mas.max = l_pivs[split]; 3250 mas->min = l_mas.max + 1; 3251 eparent = mt_mk_node(mte_parent(l_mas.node), 3252 mas_parent_type(&l_mas, l_mas.node)); 3253 tmp += end; 3254 if (!in_rcu) { 3255 unsigned char max_p = mt_pivots[mt]; 3256 unsigned char max_s = mt_slots[mt]; 3257 3258 if (tmp < max_p) 3259 memset(pivs + tmp, 0, 3260 sizeof(unsigned long) * (max_p - tmp)); 3261 3262 if (tmp < mt_slots[mt]) 3263 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3264 3265 memcpy(node, newnode, sizeof(struct maple_node)); 3266 ma_set_meta(node, mt, 0, tmp - 1); 3267 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3268 l_pivs[split]); 3269 3270 /* Remove data from l_pivs. */ 3271 tmp = split + 1; 3272 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3273 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3274 ma_set_meta(left, mt, 0, split); 3275 3276 goto done; 3277 } 3278 3279 /* RCU requires replacing both l_mas, mas, and parent. */ 3280 mas->node = mt_mk_node(newnode, mt); 3281 ma_set_meta(newnode, mt, 0, tmp); 3282 3283 new_left = mas_pop_node(mas); 3284 new_left->parent = left->parent; 3285 mt = mte_node_type(l_mas.node); 3286 slots = ma_slots(new_left, mt); 3287 pivs = ma_pivots(new_left, mt); 3288 memcpy(slots, l_slots, sizeof(void *) * split); 3289 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3290 ma_set_meta(new_left, mt, 0, split); 3291 l_mas.node = mt_mk_node(new_left, mt); 3292 3293 /* replace parent. */ 3294 offset = mte_parent_slot(mas->node); 3295 mt = mas_parent_type(&l_mas, l_mas.node); 3296 parent = mas_pop_node(mas); 3297 slots = ma_slots(parent, mt); 3298 pivs = ma_pivots(parent, mt); 3299 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3300 rcu_assign_pointer(slots[offset], mas->node); 3301 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3302 pivs[offset - 1] = l_mas.max; 3303 eparent = mt_mk_node(parent, mt); 3304 done: 3305 gap = mas_leaf_max_gap(mas); 3306 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3307 gap = mas_leaf_max_gap(&l_mas); 3308 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3309 mas_ascend(mas); 3310 3311 if (in_rcu) 3312 mas_replace(mas, false); 3313 3314 mas_update_gap(mas); 3315 } 3316 3317 /* 3318 * mas_split_final_node() - Split the final node in a subtree operation. 3319 * @mast: the maple subtree state 3320 * @mas: The maple state 3321 * @height: The height of the tree in case it's a new root. 3322 */ 3323 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3324 struct ma_state *mas, int height) 3325 { 3326 struct maple_enode *ancestor; 3327 3328 if (mte_is_root(mas->node)) { 3329 if (mt_is_alloc(mas->tree)) 3330 mast->bn->type = maple_arange_64; 3331 else 3332 mast->bn->type = maple_range_64; 3333 mas->depth = height; 3334 } 3335 /* 3336 * Only a single node is used here, could be root. 3337 * The Big_node data should just fit in a single node. 3338 */ 3339 ancestor = mas_new_ma_node(mas, mast->bn); 3340 mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset); 3341 mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset); 3342 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3343 3344 mast->l->node = ancestor; 3345 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3346 mas->offset = mast->bn->b_end - 1; 3347 return true; 3348 } 3349 3350 /* 3351 * mast_fill_bnode() - Copy data into the big node in the subtree state 3352 * @mast: The maple subtree state 3353 * @mas: the maple state 3354 * @skip: The number of entries to skip for new nodes insertion. 3355 */ 3356 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3357 struct ma_state *mas, 3358 unsigned char skip) 3359 { 3360 bool cp = true; 3361 struct maple_enode *old = mas->node; 3362 unsigned char split; 3363 3364 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3365 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3366 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3367 mast->bn->b_end = 0; 3368 3369 if (mte_is_root(mas->node)) { 3370 cp = false; 3371 } else { 3372 mas_ascend(mas); 3373 mat_add(mast->free, old); 3374 mas->offset = mte_parent_slot(mas->node); 3375 } 3376 3377 if (cp && mast->l->offset) 3378 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3379 3380 split = mast->bn->b_end; 3381 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3382 mast->r->offset = mast->bn->b_end; 3383 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3384 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3385 cp = false; 3386 3387 if (cp) 3388 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3389 mast->bn, mast->bn->b_end); 3390 3391 mast->bn->b_end--; 3392 mast->bn->type = mte_node_type(mas->node); 3393 } 3394 3395 /* 3396 * mast_split_data() - Split the data in the subtree state big node into regular 3397 * nodes. 3398 * @mast: The maple subtree state 3399 * @mas: The maple state 3400 * @split: The location to split the big node 3401 */ 3402 static inline void mast_split_data(struct maple_subtree_state *mast, 3403 struct ma_state *mas, unsigned char split) 3404 { 3405 unsigned char p_slot; 3406 3407 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3408 mte_set_pivot(mast->r->node, 0, mast->r->max); 3409 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3410 mast->l->offset = mte_parent_slot(mas->node); 3411 mast->l->max = mast->bn->pivot[split]; 3412 mast->r->min = mast->l->max + 1; 3413 if (mte_is_leaf(mas->node)) 3414 return; 3415 3416 p_slot = mast->orig_l->offset; 3417 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3418 &p_slot, split); 3419 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3420 &p_slot, split); 3421 } 3422 3423 /* 3424 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3425 * data to the right or left node if there is room. 3426 * @mas: The maple state 3427 * @height: The current height of the maple state 3428 * @mast: The maple subtree state 3429 * @left: Push left or not. 3430 * 3431 * Keeping the height of the tree low means faster lookups. 3432 * 3433 * Return: True if pushed, false otherwise. 3434 */ 3435 static inline bool mas_push_data(struct ma_state *mas, int height, 3436 struct maple_subtree_state *mast, bool left) 3437 { 3438 unsigned char slot_total = mast->bn->b_end; 3439 unsigned char end, space, split; 3440 3441 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3442 tmp_mas = *mas; 3443 tmp_mas.depth = mast->l->depth; 3444 3445 if (left && !mas_prev_sibling(&tmp_mas)) 3446 return false; 3447 else if (!left && !mas_next_sibling(&tmp_mas)) 3448 return false; 3449 3450 end = mas_data_end(&tmp_mas); 3451 slot_total += end; 3452 space = 2 * mt_slot_count(mas->node) - 2; 3453 /* -2 instead of -1 to ensure there isn't a triple split */ 3454 if (ma_is_leaf(mast->bn->type)) 3455 space--; 3456 3457 if (mas->max == ULONG_MAX) 3458 space--; 3459 3460 if (slot_total >= space) 3461 return false; 3462 3463 /* Get the data; Fill mast->bn */ 3464 mast->bn->b_end++; 3465 if (left) { 3466 mab_shift_right(mast->bn, end + 1); 3467 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3468 mast->bn->b_end = slot_total + 1; 3469 } else { 3470 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3471 } 3472 3473 /* Configure mast for splitting of mast->bn */ 3474 split = mt_slots[mast->bn->type] - 2; 3475 if (left) { 3476 /* Switch mas to prev node */ 3477 mat_add(mast->free, mas->node); 3478 *mas = tmp_mas; 3479 /* Start using mast->l for the left side. */ 3480 tmp_mas.node = mast->l->node; 3481 *mast->l = tmp_mas; 3482 } else { 3483 mat_add(mast->free, tmp_mas.node); 3484 tmp_mas.node = mast->r->node; 3485 *mast->r = tmp_mas; 3486 split = slot_total - split; 3487 } 3488 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3489 /* Update parent slot for split calculation. */ 3490 if (left) 3491 mast->orig_l->offset += end + 1; 3492 3493 mast_split_data(mast, mas, split); 3494 mast_fill_bnode(mast, mas, 2); 3495 mas_split_final_node(mast, mas, height + 1); 3496 return true; 3497 } 3498 3499 /* 3500 * mas_split() - Split data that is too big for one node into two. 3501 * @mas: The maple state 3502 * @b_node: The maple big node 3503 * Return: 1 on success, 0 on failure. 3504 */ 3505 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3506 { 3507 struct maple_subtree_state mast; 3508 int height = 0; 3509 unsigned char mid_split, split = 0; 3510 3511 /* 3512 * Splitting is handled differently from any other B-tree; the Maple 3513 * Tree splits upwards. Splitting up means that the split operation 3514 * occurs when the walk of the tree hits the leaves and not on the way 3515 * down. The reason for splitting up is that it is impossible to know 3516 * how much space will be needed until the leaf is (or leaves are) 3517 * reached. Since overwriting data is allowed and a range could 3518 * overwrite more than one range or result in changing one entry into 3 3519 * entries, it is impossible to know if a split is required until the 3520 * data is examined. 3521 * 3522 * Splitting is a balancing act between keeping allocations to a minimum 3523 * and avoiding a 'jitter' event where a tree is expanded to make room 3524 * for an entry followed by a contraction when the entry is removed. To 3525 * accomplish the balance, there are empty slots remaining in both left 3526 * and right nodes after a split. 3527 */ 3528 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3529 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3530 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3531 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3532 MA_TOPIARY(mat, mas->tree); 3533 3534 trace_ma_op(__func__, mas); 3535 mas->depth = mas_mt_height(mas); 3536 /* Allocation failures will happen early. */ 3537 mas_node_count(mas, 1 + mas->depth * 2); 3538 if (mas_is_err(mas)) 3539 return 0; 3540 3541 mast.l = &l_mas; 3542 mast.r = &r_mas; 3543 mast.orig_l = &prev_l_mas; 3544 mast.orig_r = &prev_r_mas; 3545 mast.free = &mat; 3546 mast.bn = b_node; 3547 3548 while (height++ <= mas->depth) { 3549 if (mt_slots[b_node->type] > b_node->b_end) { 3550 mas_split_final_node(&mast, mas, height); 3551 break; 3552 } 3553 3554 l_mas = r_mas = *mas; 3555 l_mas.node = mas_new_ma_node(mas, b_node); 3556 r_mas.node = mas_new_ma_node(mas, b_node); 3557 /* 3558 * Another way that 'jitter' is avoided is to terminate a split up early if the 3559 * left or right node has space to spare. This is referred to as "pushing left" 3560 * or "pushing right" and is similar to the B* tree, except the nodes left or 3561 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3562 * is a significant savings. 3563 */ 3564 /* Try to push left. */ 3565 if (mas_push_data(mas, height, &mast, true)) 3566 break; 3567 3568 /* Try to push right. */ 3569 if (mas_push_data(mas, height, &mast, false)) 3570 break; 3571 3572 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3573 mast_split_data(&mast, mas, split); 3574 /* 3575 * Usually correct, mab_mas_cp in the above call overwrites 3576 * r->max. 3577 */ 3578 mast.r->max = mas->max; 3579 mast_fill_bnode(&mast, mas, 1); 3580 prev_l_mas = *mast.l; 3581 prev_r_mas = *mast.r; 3582 } 3583 3584 /* Set the original node as dead */ 3585 mat_add(mast.free, mas->node); 3586 mas->node = l_mas.node; 3587 mas_wmb_replace(mas, mast.free, NULL); 3588 mtree_range_walk(mas); 3589 return 1; 3590 } 3591 3592 /* 3593 * mas_reuse_node() - Reuse the node to store the data. 3594 * @wr_mas: The maple write state 3595 * @bn: The maple big node 3596 * @end: The end of the data. 3597 * 3598 * Will always return false in RCU mode. 3599 * 3600 * Return: True if node was reused, false otherwise. 3601 */ 3602 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3603 struct maple_big_node *bn, unsigned char end) 3604 { 3605 /* Need to be rcu safe. */ 3606 if (mt_in_rcu(wr_mas->mas->tree)) 3607 return false; 3608 3609 if (end > bn->b_end) { 3610 int clear = mt_slots[wr_mas->type] - bn->b_end; 3611 3612 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3613 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3614 } 3615 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3616 return true; 3617 } 3618 3619 /* 3620 * mas_commit_b_node() - Commit the big node into the tree. 3621 * @wr_mas: The maple write state 3622 * @b_node: The maple big node 3623 * @end: The end of the data. 3624 */ 3625 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, 3626 struct maple_big_node *b_node, unsigned char end) 3627 { 3628 struct maple_node *node; 3629 unsigned char b_end = b_node->b_end; 3630 enum maple_type b_type = b_node->type; 3631 3632 if ((b_end < mt_min_slots[b_type]) && 3633 (!mte_is_root(wr_mas->mas->node)) && 3634 (mas_mt_height(wr_mas->mas) > 1)) 3635 return mas_rebalance(wr_mas->mas, b_node); 3636 3637 if (b_end >= mt_slots[b_type]) 3638 return mas_split(wr_mas->mas, b_node); 3639 3640 if (mas_reuse_node(wr_mas, b_node, end)) 3641 goto reuse_node; 3642 3643 mas_node_count(wr_mas->mas, 1); 3644 if (mas_is_err(wr_mas->mas)) 3645 return 0; 3646 3647 node = mas_pop_node(wr_mas->mas); 3648 node->parent = mas_mn(wr_mas->mas)->parent; 3649 wr_mas->mas->node = mt_mk_node(node, b_type); 3650 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3651 mas_replace(wr_mas->mas, false); 3652 reuse_node: 3653 mas_update_gap(wr_mas->mas); 3654 return 1; 3655 } 3656 3657 /* 3658 * mas_root_expand() - Expand a root to a node 3659 * @mas: The maple state 3660 * @entry: The entry to store into the tree 3661 */ 3662 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3663 { 3664 void *contents = mas_root_locked(mas); 3665 enum maple_type type = maple_leaf_64; 3666 struct maple_node *node; 3667 void __rcu **slots; 3668 unsigned long *pivots; 3669 int slot = 0; 3670 3671 mas_node_count(mas, 1); 3672 if (unlikely(mas_is_err(mas))) 3673 return 0; 3674 3675 node = mas_pop_node(mas); 3676 pivots = ma_pivots(node, type); 3677 slots = ma_slots(node, type); 3678 node->parent = ma_parent_ptr( 3679 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3680 mas->node = mt_mk_node(node, type); 3681 3682 if (mas->index) { 3683 if (contents) { 3684 rcu_assign_pointer(slots[slot], contents); 3685 if (likely(mas->index > 1)) 3686 slot++; 3687 } 3688 pivots[slot++] = mas->index - 1; 3689 } 3690 3691 rcu_assign_pointer(slots[slot], entry); 3692 mas->offset = slot; 3693 pivots[slot] = mas->last; 3694 if (mas->last != ULONG_MAX) 3695 slot++; 3696 mas->depth = 1; 3697 mas_set_height(mas); 3698 ma_set_meta(node, maple_leaf_64, 0, slot); 3699 /* swap the new root into the tree */ 3700 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3701 return slot; 3702 } 3703 3704 static inline void mas_store_root(struct ma_state *mas, void *entry) 3705 { 3706 if (likely((mas->last != 0) || (mas->index != 0))) 3707 mas_root_expand(mas, entry); 3708 else if (((unsigned long) (entry) & 3) == 2) 3709 mas_root_expand(mas, entry); 3710 else { 3711 rcu_assign_pointer(mas->tree->ma_root, entry); 3712 mas->node = MAS_START; 3713 } 3714 } 3715 3716 /* 3717 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3718 * spans the node. 3719 * @mas: The maple state 3720 * @piv: The pivot value being written 3721 * @type: The maple node type 3722 * @entry: The data to write 3723 * 3724 * Spanning writes are writes that start in one node and end in another OR if 3725 * the write of a %NULL will cause the node to end with a %NULL. 3726 * 3727 * Return: True if this is a spanning write, false otherwise. 3728 */ 3729 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3730 { 3731 unsigned long max = wr_mas->r_max; 3732 unsigned long last = wr_mas->mas->last; 3733 enum maple_type type = wr_mas->type; 3734 void *entry = wr_mas->entry; 3735 3736 /* Contained in this pivot, fast path */ 3737 if (last < max) 3738 return false; 3739 3740 if (ma_is_leaf(type)) { 3741 max = wr_mas->mas->max; 3742 if (last < max) 3743 return false; 3744 } 3745 3746 if (last == max) { 3747 /* 3748 * The last entry of leaf node cannot be NULL unless it is the 3749 * rightmost node (writing ULONG_MAX), otherwise it spans slots. 3750 */ 3751 if (entry || last == ULONG_MAX) 3752 return false; 3753 } 3754 3755 trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); 3756 return true; 3757 } 3758 3759 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3760 { 3761 wr_mas->type = mte_node_type(wr_mas->mas->node); 3762 mas_wr_node_walk(wr_mas); 3763 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3764 } 3765 3766 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3767 { 3768 wr_mas->mas->max = wr_mas->r_max; 3769 wr_mas->mas->min = wr_mas->r_min; 3770 wr_mas->mas->node = wr_mas->content; 3771 wr_mas->mas->offset = 0; 3772 wr_mas->mas->depth++; 3773 } 3774 /* 3775 * mas_wr_walk() - Walk the tree for a write. 3776 * @wr_mas: The maple write state 3777 * 3778 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3779 * 3780 * Return: True if it's contained in a node, false on spanning write. 3781 */ 3782 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3783 { 3784 struct ma_state *mas = wr_mas->mas; 3785 3786 while (true) { 3787 mas_wr_walk_descend(wr_mas); 3788 if (unlikely(mas_is_span_wr(wr_mas))) 3789 return false; 3790 3791 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3792 mas->offset); 3793 if (ma_is_leaf(wr_mas->type)) 3794 return true; 3795 3796 mas_wr_walk_traverse(wr_mas); 3797 } 3798 3799 return true; 3800 } 3801 3802 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3803 { 3804 struct ma_state *mas = wr_mas->mas; 3805 3806 while (true) { 3807 mas_wr_walk_descend(wr_mas); 3808 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3809 mas->offset); 3810 if (ma_is_leaf(wr_mas->type)) 3811 return true; 3812 mas_wr_walk_traverse(wr_mas); 3813 3814 } 3815 return true; 3816 } 3817 /* 3818 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3819 * @l_wr_mas: The left maple write state 3820 * @r_wr_mas: The right maple write state 3821 */ 3822 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3823 struct ma_wr_state *r_wr_mas) 3824 { 3825 struct ma_state *r_mas = r_wr_mas->mas; 3826 struct ma_state *l_mas = l_wr_mas->mas; 3827 unsigned char l_slot; 3828 3829 l_slot = l_mas->offset; 3830 if (!l_wr_mas->content) 3831 l_mas->index = l_wr_mas->r_min; 3832 3833 if ((l_mas->index == l_wr_mas->r_min) && 3834 (l_slot && 3835 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3836 if (l_slot > 1) 3837 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3838 else 3839 l_mas->index = l_mas->min; 3840 3841 l_mas->offset = l_slot - 1; 3842 } 3843 3844 if (!r_wr_mas->content) { 3845 if (r_mas->last < r_wr_mas->r_max) 3846 r_mas->last = r_wr_mas->r_max; 3847 r_mas->offset++; 3848 } else if ((r_mas->last == r_wr_mas->r_max) && 3849 (r_mas->last < r_mas->max) && 3850 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3851 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3852 r_wr_mas->type, r_mas->offset + 1); 3853 r_mas->offset++; 3854 } 3855 } 3856 3857 static inline void *mas_state_walk(struct ma_state *mas) 3858 { 3859 void *entry; 3860 3861 entry = mas_start(mas); 3862 if (mas_is_none(mas)) 3863 return NULL; 3864 3865 if (mas_is_ptr(mas)) 3866 return entry; 3867 3868 return mtree_range_walk(mas); 3869 } 3870 3871 /* 3872 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3873 * to date. 3874 * 3875 * @mas: The maple state. 3876 * 3877 * Note: Leaves mas in undesirable state. 3878 * Return: The entry for @mas->index or %NULL on dead node. 3879 */ 3880 static inline void *mtree_lookup_walk(struct ma_state *mas) 3881 { 3882 unsigned long *pivots; 3883 unsigned char offset; 3884 struct maple_node *node; 3885 struct maple_enode *next; 3886 enum maple_type type; 3887 void __rcu **slots; 3888 unsigned char end; 3889 unsigned long max; 3890 3891 next = mas->node; 3892 max = ULONG_MAX; 3893 do { 3894 offset = 0; 3895 node = mte_to_node(next); 3896 type = mte_node_type(next); 3897 pivots = ma_pivots(node, type); 3898 end = ma_data_end(node, type, pivots, max); 3899 if (unlikely(ma_dead_node(node))) 3900 goto dead_node; 3901 do { 3902 if (pivots[offset] >= mas->index) { 3903 max = pivots[offset]; 3904 break; 3905 } 3906 } while (++offset < end); 3907 3908 slots = ma_slots(node, type); 3909 next = mt_slot(mas->tree, slots, offset); 3910 if (unlikely(ma_dead_node(node))) 3911 goto dead_node; 3912 } while (!ma_is_leaf(type)); 3913 3914 return (void *)next; 3915 3916 dead_node: 3917 mas_reset(mas); 3918 return NULL; 3919 } 3920 3921 /* 3922 * mas_new_root() - Create a new root node that only contains the entry passed 3923 * in. 3924 * @mas: The maple state 3925 * @entry: The entry to store. 3926 * 3927 * Only valid when the index == 0 and the last == ULONG_MAX 3928 * 3929 * Return 0 on error, 1 on success. 3930 */ 3931 static inline int mas_new_root(struct ma_state *mas, void *entry) 3932 { 3933 struct maple_enode *root = mas_root_locked(mas); 3934 enum maple_type type = maple_leaf_64; 3935 struct maple_node *node; 3936 void __rcu **slots; 3937 unsigned long *pivots; 3938 3939 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3940 mas->depth = 0; 3941 mas_set_height(mas); 3942 rcu_assign_pointer(mas->tree->ma_root, entry); 3943 mas->node = MAS_START; 3944 goto done; 3945 } 3946 3947 mas_node_count(mas, 1); 3948 if (mas_is_err(mas)) 3949 return 0; 3950 3951 node = mas_pop_node(mas); 3952 pivots = ma_pivots(node, type); 3953 slots = ma_slots(node, type); 3954 node->parent = ma_parent_ptr( 3955 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3956 mas->node = mt_mk_node(node, type); 3957 rcu_assign_pointer(slots[0], entry); 3958 pivots[0] = mas->last; 3959 mas->depth = 1; 3960 mas_set_height(mas); 3961 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3962 3963 done: 3964 if (xa_is_node(root)) 3965 mte_destroy_walk(root, mas->tree); 3966 3967 return 1; 3968 } 3969 /* 3970 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3971 * and new nodes where necessary, then place the sub-tree in the actual tree. 3972 * Note that mas is expected to point to the node which caused the store to 3973 * span. 3974 * @wr_mas: The maple write state 3975 * 3976 * Return: 0 on error, positive on success. 3977 */ 3978 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3979 { 3980 struct maple_subtree_state mast; 3981 struct maple_big_node b_node; 3982 struct ma_state *mas; 3983 unsigned char height; 3984 3985 /* Left and Right side of spanning store */ 3986 MA_STATE(l_mas, NULL, 0, 0); 3987 MA_STATE(r_mas, NULL, 0, 0); 3988 3989 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3990 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3991 3992 /* 3993 * A store operation that spans multiple nodes is called a spanning 3994 * store and is handled early in the store call stack by the function 3995 * mas_is_span_wr(). When a spanning store is identified, the maple 3996 * state is duplicated. The first maple state walks the left tree path 3997 * to ``index``, the duplicate walks the right tree path to ``last``. 3998 * The data in the two nodes are combined into a single node, two nodes, 3999 * or possibly three nodes (see the 3-way split above). A ``NULL`` 4000 * written to the last entry of a node is considered a spanning store as 4001 * a rebalance is required for the operation to complete and an overflow 4002 * of data may happen. 4003 */ 4004 mas = wr_mas->mas; 4005 trace_ma_op(__func__, mas); 4006 4007 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 4008 return mas_new_root(mas, wr_mas->entry); 4009 /* 4010 * Node rebalancing may occur due to this store, so there may be three new 4011 * entries per level plus a new root. 4012 */ 4013 height = mas_mt_height(mas); 4014 mas_node_count(mas, 1 + height * 3); 4015 if (mas_is_err(mas)) 4016 return 0; 4017 4018 /* 4019 * Set up right side. Need to get to the next offset after the spanning 4020 * store to ensure it's not NULL and to combine both the next node and 4021 * the node with the start together. 4022 */ 4023 r_mas = *mas; 4024 /* Avoid overflow, walk to next slot in the tree. */ 4025 if (r_mas.last + 1) 4026 r_mas.last++; 4027 4028 r_mas.index = r_mas.last; 4029 mas_wr_walk_index(&r_wr_mas); 4030 r_mas.last = r_mas.index = mas->last; 4031 4032 /* Set up left side. */ 4033 l_mas = *mas; 4034 mas_wr_walk_index(&l_wr_mas); 4035 4036 if (!wr_mas->entry) { 4037 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4038 mas->offset = l_mas.offset; 4039 mas->index = l_mas.index; 4040 mas->last = l_mas.last = r_mas.last; 4041 } 4042 4043 /* expanding NULLs may make this cover the entire range */ 4044 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4045 mas_set_range(mas, 0, ULONG_MAX); 4046 return mas_new_root(mas, wr_mas->entry); 4047 } 4048 4049 memset(&b_node, 0, sizeof(struct maple_big_node)); 4050 /* Copy l_mas and store the value in b_node. */ 4051 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4052 /* Copy r_mas into b_node. */ 4053 if (r_mas.offset <= r_wr_mas.node_end) 4054 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4055 &b_node, b_node.b_end + 1); 4056 else 4057 b_node.b_end++; 4058 4059 /* Stop spanning searches by searching for just index. */ 4060 l_mas.index = l_mas.last = mas->index; 4061 4062 mast.bn = &b_node; 4063 mast.orig_l = &l_mas; 4064 mast.orig_r = &r_mas; 4065 /* Combine l_mas and r_mas and split them up evenly again. */ 4066 return mas_spanning_rebalance(mas, &mast, height + 1); 4067 } 4068 4069 /* 4070 * mas_wr_node_store() - Attempt to store the value in a node 4071 * @wr_mas: The maple write state 4072 * 4073 * Attempts to reuse the node, but may allocate. 4074 * 4075 * Return: True if stored, false otherwise 4076 */ 4077 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, 4078 unsigned char new_end) 4079 { 4080 struct ma_state *mas = wr_mas->mas; 4081 void __rcu **dst_slots; 4082 unsigned long *dst_pivots; 4083 unsigned char dst_offset, offset_end = wr_mas->offset_end; 4084 struct maple_node reuse, *newnode; 4085 unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; 4086 bool in_rcu = mt_in_rcu(mas->tree); 4087 4088 /* Check if there is enough data. The room is enough. */ 4089 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4090 !(mas->mas_flags & MA_STATE_BULK)) 4091 return false; 4092 4093 if (mas->last == wr_mas->end_piv) 4094 offset_end++; /* don't copy this offset */ 4095 else if (unlikely(wr_mas->r_max == ULONG_MAX)) 4096 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4097 4098 /* set up node. */ 4099 if (in_rcu) { 4100 mas_node_count(mas, 1); 4101 if (mas_is_err(mas)) 4102 return false; 4103 4104 newnode = mas_pop_node(mas); 4105 } else { 4106 memset(&reuse, 0, sizeof(struct maple_node)); 4107 newnode = &reuse; 4108 } 4109 4110 newnode->parent = mas_mn(mas)->parent; 4111 dst_pivots = ma_pivots(newnode, wr_mas->type); 4112 dst_slots = ma_slots(newnode, wr_mas->type); 4113 /* Copy from start to insert point */ 4114 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); 4115 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); 4116 4117 /* Handle insert of new range starting after old range */ 4118 if (wr_mas->r_min < mas->index) { 4119 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); 4120 dst_pivots[mas->offset++] = mas->index - 1; 4121 } 4122 4123 /* Store the new entry and range end. */ 4124 if (mas->offset < node_pivots) 4125 dst_pivots[mas->offset] = mas->last; 4126 rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); 4127 4128 /* 4129 * this range wrote to the end of the node or it overwrote the rest of 4130 * the data 4131 */ 4132 if (offset_end > wr_mas->node_end) 4133 goto done; 4134 4135 dst_offset = mas->offset + 1; 4136 /* Copy to the end of node if necessary. */ 4137 copy_size = wr_mas->node_end - offset_end + 1; 4138 memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, 4139 sizeof(void *) * copy_size); 4140 memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, 4141 sizeof(unsigned long) * (copy_size - 1)); 4142 4143 if (new_end < node_pivots) 4144 dst_pivots[new_end] = mas->max; 4145 4146 done: 4147 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4148 if (in_rcu) { 4149 mte_set_node_dead(mas->node); 4150 mas->node = mt_mk_node(newnode, wr_mas->type); 4151 mas_replace(mas, false); 4152 } else { 4153 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4154 } 4155 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4156 mas_update_gap(mas); 4157 return true; 4158 } 4159 4160 /* 4161 * mas_wr_slot_store: Attempt to store a value in a slot. 4162 * @wr_mas: the maple write state 4163 * 4164 * Return: True if stored, false otherwise 4165 */ 4166 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4167 { 4168 struct ma_state *mas = wr_mas->mas; 4169 unsigned char offset = mas->offset; 4170 bool gap = false; 4171 4172 if (wr_mas->offset_end - offset != 1) 4173 return false; 4174 4175 gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset); 4176 gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1); 4177 4178 if (mas->index == wr_mas->r_min) { 4179 /* Overwriting the range and over a part of the next range. */ 4180 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); 4181 wr_mas->pivots[offset] = mas->last; 4182 } else { 4183 /* Overwriting a part of the range and over the next range */ 4184 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); 4185 wr_mas->pivots[offset] = mas->index - 1; 4186 mas->offset++; /* Keep mas accurate. */ 4187 } 4188 4189 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4190 /* 4191 * Only update gap when the new entry is empty or there is an empty 4192 * entry in the original two ranges. 4193 */ 4194 if (!wr_mas->entry || gap) 4195 mas_update_gap(mas); 4196 4197 return true; 4198 } 4199 4200 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4201 { 4202 while ((wr_mas->offset_end < wr_mas->node_end) && 4203 (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) 4204 wr_mas->offset_end++; 4205 4206 if (wr_mas->offset_end < wr_mas->node_end) 4207 wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; 4208 else 4209 wr_mas->end_piv = wr_mas->mas->max; 4210 } 4211 4212 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4213 { 4214 struct ma_state *mas = wr_mas->mas; 4215 4216 if (!wr_mas->slots[wr_mas->offset_end]) { 4217 /* If this one is null, the next and prev are not */ 4218 mas->last = wr_mas->end_piv; 4219 } else { 4220 /* Check next slot(s) if we are overwriting the end */ 4221 if ((mas->last == wr_mas->end_piv) && 4222 (wr_mas->node_end != wr_mas->offset_end) && 4223 !wr_mas->slots[wr_mas->offset_end + 1]) { 4224 wr_mas->offset_end++; 4225 if (wr_mas->offset_end == wr_mas->node_end) 4226 mas->last = mas->max; 4227 else 4228 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4229 wr_mas->end_piv = mas->last; 4230 } 4231 } 4232 4233 if (!wr_mas->content) { 4234 /* If this one is null, the next and prev are not */ 4235 mas->index = wr_mas->r_min; 4236 } else { 4237 /* Check prev slot if we are overwriting the start */ 4238 if (mas->index == wr_mas->r_min && mas->offset && 4239 !wr_mas->slots[mas->offset - 1]) { 4240 mas->offset--; 4241 wr_mas->r_min = mas->index = 4242 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4243 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4244 } 4245 } 4246 } 4247 4248 static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) 4249 { 4250 struct ma_state *mas = wr_mas->mas; 4251 unsigned char new_end = wr_mas->node_end + 2; 4252 4253 new_end -= wr_mas->offset_end - mas->offset; 4254 if (wr_mas->r_min == mas->index) 4255 new_end--; 4256 4257 if (wr_mas->end_piv == mas->last) 4258 new_end--; 4259 4260 return new_end; 4261 } 4262 4263 /* 4264 * mas_wr_append: Attempt to append 4265 * @wr_mas: the maple write state 4266 * 4267 * Return: True if appended, false otherwise 4268 */ 4269 static inline bool mas_wr_append(struct ma_wr_state *wr_mas) 4270 { 4271 unsigned char end = wr_mas->node_end; 4272 unsigned char new_end = end + 1; 4273 struct ma_state *mas = wr_mas->mas; 4274 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4275 4276 if (mas->offset != wr_mas->node_end) 4277 return false; 4278 4279 if (new_end < node_pivots) { 4280 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4281 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4282 } 4283 4284 if (mas->last == wr_mas->r_max) { 4285 /* Append to end of range */ 4286 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); 4287 wr_mas->pivots[end] = mas->index - 1; 4288 mas->offset = new_end; 4289 } else { 4290 /* Append to start of range */ 4291 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4292 wr_mas->pivots[end] = mas->last; 4293 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4294 } 4295 4296 if (!wr_mas->content || !wr_mas->entry) 4297 mas_update_gap(mas); 4298 4299 return true; 4300 } 4301 4302 /* 4303 * mas_wr_bnode() - Slow path for a modification. 4304 * @wr_mas: The write maple state 4305 * 4306 * This is where split, rebalance end up. 4307 */ 4308 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4309 { 4310 struct maple_big_node b_node; 4311 4312 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4313 memset(&b_node, 0, sizeof(struct maple_big_node)); 4314 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4315 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4316 } 4317 4318 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4319 { 4320 struct ma_state *mas = wr_mas->mas; 4321 unsigned char new_end; 4322 4323 /* Direct replacement */ 4324 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4325 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4326 if (!!wr_mas->entry ^ !!wr_mas->content) 4327 mas_update_gap(mas); 4328 return; 4329 } 4330 4331 /* 4332 * new_end exceeds the size of the maple node and cannot enter the fast 4333 * path. 4334 */ 4335 new_end = mas_wr_new_end(wr_mas); 4336 if (new_end >= mt_slots[wr_mas->type]) 4337 goto slow_path; 4338 4339 /* Attempt to append */ 4340 if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas)) 4341 return; 4342 4343 if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) 4344 return; 4345 4346 if (mas_wr_node_store(wr_mas, new_end)) 4347 return; 4348 4349 if (mas_is_err(mas)) 4350 return; 4351 4352 slow_path: 4353 mas_wr_bnode(wr_mas); 4354 } 4355 4356 /* 4357 * mas_wr_store_entry() - Internal call to store a value 4358 * @mas: The maple state 4359 * @entry: The entry to store. 4360 * 4361 * Return: The contents that was stored at the index. 4362 */ 4363 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4364 { 4365 struct ma_state *mas = wr_mas->mas; 4366 4367 wr_mas->content = mas_start(mas); 4368 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4369 mas_store_root(mas, wr_mas->entry); 4370 return wr_mas->content; 4371 } 4372 4373 if (unlikely(!mas_wr_walk(wr_mas))) { 4374 mas_wr_spanning_store(wr_mas); 4375 return wr_mas->content; 4376 } 4377 4378 /* At this point, we are at the leaf node that needs to be altered. */ 4379 mas_wr_end_piv(wr_mas); 4380 4381 if (!wr_mas->entry) 4382 mas_wr_extend_null(wr_mas); 4383 4384 /* New root for a single pointer */ 4385 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4386 mas_new_root(mas, wr_mas->entry); 4387 return wr_mas->content; 4388 } 4389 4390 mas_wr_modify(wr_mas); 4391 return wr_mas->content; 4392 } 4393 4394 /** 4395 * mas_insert() - Internal call to insert a value 4396 * @mas: The maple state 4397 * @entry: The entry to store 4398 * 4399 * Return: %NULL or the contents that already exists at the requested index 4400 * otherwise. The maple state needs to be checked for error conditions. 4401 */ 4402 static inline void *mas_insert(struct ma_state *mas, void *entry) 4403 { 4404 MA_WR_STATE(wr_mas, mas, entry); 4405 4406 /* 4407 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4408 * tree. If the insert fits exactly into an existing gap with a value 4409 * of NULL, then the slot only needs to be written with the new value. 4410 * If the range being inserted is adjacent to another range, then only a 4411 * single pivot needs to be inserted (as well as writing the entry). If 4412 * the new range is within a gap but does not touch any other ranges, 4413 * then two pivots need to be inserted: the start - 1, and the end. As 4414 * usual, the entry must be written. Most operations require a new node 4415 * to be allocated and replace an existing node to ensure RCU safety, 4416 * when in RCU mode. The exception to requiring a newly allocated node 4417 * is when inserting at the end of a node (appending). When done 4418 * carefully, appending can reuse the node in place. 4419 */ 4420 wr_mas.content = mas_start(mas); 4421 if (wr_mas.content) 4422 goto exists; 4423 4424 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4425 mas_store_root(mas, entry); 4426 return NULL; 4427 } 4428 4429 /* spanning writes always overwrite something */ 4430 if (!mas_wr_walk(&wr_mas)) 4431 goto exists; 4432 4433 /* At this point, we are at the leaf node that needs to be altered. */ 4434 wr_mas.offset_end = mas->offset; 4435 wr_mas.end_piv = wr_mas.r_max; 4436 4437 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4438 goto exists; 4439 4440 if (!entry) 4441 return NULL; 4442 4443 mas_wr_modify(&wr_mas); 4444 return wr_mas.content; 4445 4446 exists: 4447 mas_set_err(mas, -EEXIST); 4448 return wr_mas.content; 4449 4450 } 4451 4452 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4453 { 4454 retry: 4455 mas_set(mas, index); 4456 mas_state_walk(mas); 4457 if (mas_is_start(mas)) 4458 goto retry; 4459 } 4460 4461 static inline bool mas_rewalk_if_dead(struct ma_state *mas, 4462 struct maple_node *node, const unsigned long index) 4463 { 4464 if (unlikely(ma_dead_node(node))) { 4465 mas_rewalk(mas, index); 4466 return true; 4467 } 4468 return false; 4469 } 4470 4471 /* 4472 * mas_prev_node() - Find the prev non-null entry at the same level in the 4473 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4474 * @mas: The maple state 4475 * @min: The lower limit to search 4476 * 4477 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4478 * Return: 1 if the node is dead, 0 otherwise. 4479 */ 4480 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4481 { 4482 enum maple_type mt; 4483 int offset, level; 4484 void __rcu **slots; 4485 struct maple_node *node; 4486 unsigned long *pivots; 4487 unsigned long max; 4488 4489 node = mas_mn(mas); 4490 if (!mas->min) 4491 goto no_entry; 4492 4493 max = mas->min - 1; 4494 if (max < min) 4495 goto no_entry; 4496 4497 level = 0; 4498 do { 4499 if (ma_is_root(node)) 4500 goto no_entry; 4501 4502 /* Walk up. */ 4503 if (unlikely(mas_ascend(mas))) 4504 return 1; 4505 offset = mas->offset; 4506 level++; 4507 node = mas_mn(mas); 4508 } while (!offset); 4509 4510 offset--; 4511 mt = mte_node_type(mas->node); 4512 while (level > 1) { 4513 level--; 4514 slots = ma_slots(node, mt); 4515 mas->node = mas_slot(mas, slots, offset); 4516 if (unlikely(ma_dead_node(node))) 4517 return 1; 4518 4519 mt = mte_node_type(mas->node); 4520 node = mas_mn(mas); 4521 pivots = ma_pivots(node, mt); 4522 offset = ma_data_end(node, mt, pivots, max); 4523 if (unlikely(ma_dead_node(node))) 4524 return 1; 4525 } 4526 4527 slots = ma_slots(node, mt); 4528 mas->node = mas_slot(mas, slots, offset); 4529 pivots = ma_pivots(node, mt); 4530 if (unlikely(ma_dead_node(node))) 4531 return 1; 4532 4533 if (likely(offset)) 4534 mas->min = pivots[offset - 1] + 1; 4535 mas->max = max; 4536 mas->offset = mas_data_end(mas); 4537 if (unlikely(mte_dead_node(mas->node))) 4538 return 1; 4539 4540 return 0; 4541 4542 no_entry: 4543 if (unlikely(ma_dead_node(node))) 4544 return 1; 4545 4546 mas->node = MAS_NONE; 4547 return 0; 4548 } 4549 4550 /* 4551 * mas_prev_slot() - Get the entry in the previous slot 4552 * 4553 * @mas: The maple state 4554 * @max: The minimum starting range 4555 * 4556 * Return: The entry in the previous slot which is possibly NULL 4557 */ 4558 static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) 4559 { 4560 void *entry; 4561 void __rcu **slots; 4562 unsigned long pivot; 4563 enum maple_type type; 4564 unsigned long *pivots; 4565 struct maple_node *node; 4566 unsigned long save_point = mas->index; 4567 4568 retry: 4569 node = mas_mn(mas); 4570 type = mte_node_type(mas->node); 4571 pivots = ma_pivots(node, type); 4572 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4573 goto retry; 4574 4575 again: 4576 if (mas->min <= min) { 4577 pivot = mas_safe_min(mas, pivots, mas->offset); 4578 4579 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4580 goto retry; 4581 4582 if (pivot <= min) 4583 return NULL; 4584 } 4585 4586 if (likely(mas->offset)) { 4587 mas->offset--; 4588 mas->last = mas->index - 1; 4589 mas->index = mas_safe_min(mas, pivots, mas->offset); 4590 } else { 4591 if (mas_prev_node(mas, min)) { 4592 mas_rewalk(mas, save_point); 4593 goto retry; 4594 } 4595 4596 if (mas_is_none(mas)) 4597 return NULL; 4598 4599 mas->last = mas->max; 4600 node = mas_mn(mas); 4601 type = mte_node_type(mas->node); 4602 pivots = ma_pivots(node, type); 4603 mas->index = pivots[mas->offset - 1] + 1; 4604 } 4605 4606 slots = ma_slots(node, type); 4607 entry = mas_slot(mas, slots, mas->offset); 4608 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4609 goto retry; 4610 4611 if (likely(entry)) 4612 return entry; 4613 4614 if (!empty) 4615 goto again; 4616 4617 return entry; 4618 } 4619 4620 /* 4621 * mas_next_node() - Get the next node at the same level in the tree. 4622 * @mas: The maple state 4623 * @max: The maximum pivot value to check. 4624 * 4625 * The next value will be mas->node[mas->offset] or MAS_NONE. 4626 * Return: 1 on dead node, 0 otherwise. 4627 */ 4628 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4629 unsigned long max) 4630 { 4631 unsigned long min; 4632 unsigned long *pivots; 4633 struct maple_enode *enode; 4634 int level = 0; 4635 unsigned char node_end; 4636 enum maple_type mt; 4637 void __rcu **slots; 4638 4639 if (mas->max >= max) 4640 goto no_entry; 4641 4642 min = mas->max + 1; 4643 level = 0; 4644 do { 4645 if (ma_is_root(node)) 4646 goto no_entry; 4647 4648 /* Walk up. */ 4649 if (unlikely(mas_ascend(mas))) 4650 return 1; 4651 4652 level++; 4653 node = mas_mn(mas); 4654 mt = mte_node_type(mas->node); 4655 pivots = ma_pivots(node, mt); 4656 node_end = ma_data_end(node, mt, pivots, mas->max); 4657 if (unlikely(ma_dead_node(node))) 4658 return 1; 4659 4660 } while (unlikely(mas->offset == node_end)); 4661 4662 slots = ma_slots(node, mt); 4663 mas->offset++; 4664 enode = mas_slot(mas, slots, mas->offset); 4665 if (unlikely(ma_dead_node(node))) 4666 return 1; 4667 4668 if (level > 1) 4669 mas->offset = 0; 4670 4671 while (unlikely(level > 1)) { 4672 level--; 4673 mas->node = enode; 4674 node = mas_mn(mas); 4675 mt = mte_node_type(mas->node); 4676 slots = ma_slots(node, mt); 4677 enode = mas_slot(mas, slots, 0); 4678 if (unlikely(ma_dead_node(node))) 4679 return 1; 4680 } 4681 4682 if (!mas->offset) 4683 pivots = ma_pivots(node, mt); 4684 4685 mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); 4686 if (unlikely(ma_dead_node(node))) 4687 return 1; 4688 4689 mas->node = enode; 4690 mas->min = min; 4691 return 0; 4692 4693 no_entry: 4694 if (unlikely(ma_dead_node(node))) 4695 return 1; 4696 4697 mas->node = MAS_NONE; 4698 return 0; 4699 } 4700 4701 /* 4702 * mas_next_slot() - Get the entry in the next slot 4703 * 4704 * @mas: The maple state 4705 * @max: The maximum starting range 4706 * @empty: Can be empty 4707 * 4708 * Return: The entry in the next slot which is possibly NULL 4709 */ 4710 static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) 4711 { 4712 void __rcu **slots; 4713 unsigned long *pivots; 4714 unsigned long pivot; 4715 enum maple_type type; 4716 struct maple_node *node; 4717 unsigned char data_end; 4718 unsigned long save_point = mas->last; 4719 void *entry; 4720 4721 retry: 4722 node = mas_mn(mas); 4723 type = mte_node_type(mas->node); 4724 pivots = ma_pivots(node, type); 4725 data_end = ma_data_end(node, type, pivots, mas->max); 4726 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4727 goto retry; 4728 4729 again: 4730 if (mas->max >= max) { 4731 if (likely(mas->offset < data_end)) 4732 pivot = pivots[mas->offset]; 4733 else 4734 return NULL; /* must be mas->max */ 4735 4736 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4737 goto retry; 4738 4739 if (pivot >= max) 4740 return NULL; 4741 } 4742 4743 if (likely(mas->offset < data_end)) { 4744 mas->index = pivots[mas->offset] + 1; 4745 mas->offset++; 4746 if (likely(mas->offset < data_end)) 4747 mas->last = pivots[mas->offset]; 4748 else 4749 mas->last = mas->max; 4750 } else { 4751 if (mas_next_node(mas, node, max)) { 4752 mas_rewalk(mas, save_point); 4753 goto retry; 4754 } 4755 4756 if (mas_is_none(mas)) 4757 return NULL; 4758 4759 mas->offset = 0; 4760 mas->index = mas->min; 4761 node = mas_mn(mas); 4762 type = mte_node_type(mas->node); 4763 pivots = ma_pivots(node, type); 4764 mas->last = pivots[0]; 4765 } 4766 4767 slots = ma_slots(node, type); 4768 entry = mt_slot(mas->tree, slots, mas->offset); 4769 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4770 goto retry; 4771 4772 if (entry) 4773 return entry; 4774 4775 if (!empty) { 4776 if (!mas->offset) 4777 data_end = 2; 4778 goto again; 4779 } 4780 4781 return entry; 4782 } 4783 4784 /* 4785 * mas_next_entry() - Internal function to get the next entry. 4786 * @mas: The maple state 4787 * @limit: The maximum range start. 4788 * 4789 * Set the @mas->node to the next entry and the range_start to 4790 * the beginning value for the entry. Does not check beyond @limit. 4791 * Sets @mas->index and @mas->last to the limit if it is hit. 4792 * Restarts on dead nodes. 4793 * 4794 * Return: the next entry or %NULL. 4795 */ 4796 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4797 { 4798 if (mas->last >= limit) 4799 return NULL; 4800 4801 return mas_next_slot(mas, limit, false); 4802 } 4803 4804 /* 4805 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4806 * highest gap address of a given size in a given node and descend. 4807 * @mas: The maple state 4808 * @size: The needed size. 4809 * 4810 * Return: True if found in a leaf, false otherwise. 4811 * 4812 */ 4813 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, 4814 unsigned long *gap_min, unsigned long *gap_max) 4815 { 4816 enum maple_type type = mte_node_type(mas->node); 4817 struct maple_node *node = mas_mn(mas); 4818 unsigned long *pivots, *gaps; 4819 void __rcu **slots; 4820 unsigned long gap = 0; 4821 unsigned long max, min; 4822 unsigned char offset; 4823 4824 if (unlikely(mas_is_err(mas))) 4825 return true; 4826 4827 if (ma_is_dense(type)) { 4828 /* dense nodes. */ 4829 mas->offset = (unsigned char)(mas->index - mas->min); 4830 return true; 4831 } 4832 4833 pivots = ma_pivots(node, type); 4834 slots = ma_slots(node, type); 4835 gaps = ma_gaps(node, type); 4836 offset = mas->offset; 4837 min = mas_safe_min(mas, pivots, offset); 4838 /* Skip out of bounds. */ 4839 while (mas->last < min) 4840 min = mas_safe_min(mas, pivots, --offset); 4841 4842 max = mas_safe_pivot(mas, pivots, offset, type); 4843 while (mas->index <= max) { 4844 gap = 0; 4845 if (gaps) 4846 gap = gaps[offset]; 4847 else if (!mas_slot(mas, slots, offset)) 4848 gap = max - min + 1; 4849 4850 if (gap) { 4851 if ((size <= gap) && (size <= mas->last - min + 1)) 4852 break; 4853 4854 if (!gaps) { 4855 /* Skip the next slot, it cannot be a gap. */ 4856 if (offset < 2) 4857 goto ascend; 4858 4859 offset -= 2; 4860 max = pivots[offset]; 4861 min = mas_safe_min(mas, pivots, offset); 4862 continue; 4863 } 4864 } 4865 4866 if (!offset) 4867 goto ascend; 4868 4869 offset--; 4870 max = min - 1; 4871 min = mas_safe_min(mas, pivots, offset); 4872 } 4873 4874 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4875 goto no_space; 4876 4877 if (unlikely(ma_is_leaf(type))) { 4878 mas->offset = offset; 4879 *gap_min = min; 4880 *gap_max = min + gap - 1; 4881 return true; 4882 } 4883 4884 /* descend, only happens under lock. */ 4885 mas->node = mas_slot(mas, slots, offset); 4886 mas->min = min; 4887 mas->max = max; 4888 mas->offset = mas_data_end(mas); 4889 return false; 4890 4891 ascend: 4892 if (!mte_is_root(mas->node)) 4893 return false; 4894 4895 no_space: 4896 mas_set_err(mas, -EBUSY); 4897 return false; 4898 } 4899 4900 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4901 { 4902 enum maple_type type = mte_node_type(mas->node); 4903 unsigned long pivot, min, gap = 0; 4904 unsigned char offset, data_end; 4905 unsigned long *gaps, *pivots; 4906 void __rcu **slots; 4907 struct maple_node *node; 4908 bool found = false; 4909 4910 if (ma_is_dense(type)) { 4911 mas->offset = (unsigned char)(mas->index - mas->min); 4912 return true; 4913 } 4914 4915 node = mas_mn(mas); 4916 pivots = ma_pivots(node, type); 4917 slots = ma_slots(node, type); 4918 gaps = ma_gaps(node, type); 4919 offset = mas->offset; 4920 min = mas_safe_min(mas, pivots, offset); 4921 data_end = ma_data_end(node, type, pivots, mas->max); 4922 for (; offset <= data_end; offset++) { 4923 pivot = mas_logical_pivot(mas, pivots, offset, type); 4924 4925 /* Not within lower bounds */ 4926 if (mas->index > pivot) 4927 goto next_slot; 4928 4929 if (gaps) 4930 gap = gaps[offset]; 4931 else if (!mas_slot(mas, slots, offset)) 4932 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 4933 else 4934 goto next_slot; 4935 4936 if (gap >= size) { 4937 if (ma_is_leaf(type)) { 4938 found = true; 4939 goto done; 4940 } 4941 if (mas->index <= pivot) { 4942 mas->node = mas_slot(mas, slots, offset); 4943 mas->min = min; 4944 mas->max = pivot; 4945 offset = 0; 4946 break; 4947 } 4948 } 4949 next_slot: 4950 min = pivot + 1; 4951 if (mas->last <= pivot) { 4952 mas_set_err(mas, -EBUSY); 4953 return true; 4954 } 4955 } 4956 4957 if (mte_is_root(mas->node)) 4958 found = true; 4959 done: 4960 mas->offset = offset; 4961 return found; 4962 } 4963 4964 /** 4965 * mas_walk() - Search for @mas->index in the tree. 4966 * @mas: The maple state. 4967 * 4968 * mas->index and mas->last will be set to the range if there is a value. If 4969 * mas->node is MAS_NONE, reset to MAS_START. 4970 * 4971 * Return: the entry at the location or %NULL. 4972 */ 4973 void *mas_walk(struct ma_state *mas) 4974 { 4975 void *entry; 4976 4977 if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) 4978 mas->node = MAS_START; 4979 retry: 4980 entry = mas_state_walk(mas); 4981 if (mas_is_start(mas)) { 4982 goto retry; 4983 } else if (mas_is_none(mas)) { 4984 mas->index = 0; 4985 mas->last = ULONG_MAX; 4986 } else if (mas_is_ptr(mas)) { 4987 if (!mas->index) { 4988 mas->last = 0; 4989 return entry; 4990 } 4991 4992 mas->index = 1; 4993 mas->last = ULONG_MAX; 4994 mas->node = MAS_NONE; 4995 return NULL; 4996 } 4997 4998 return entry; 4999 } 5000 EXPORT_SYMBOL_GPL(mas_walk); 5001 5002 static inline bool mas_rewind_node(struct ma_state *mas) 5003 { 5004 unsigned char slot; 5005 5006 do { 5007 if (mte_is_root(mas->node)) { 5008 slot = mas->offset; 5009 if (!slot) 5010 return false; 5011 } else { 5012 mas_ascend(mas); 5013 slot = mas->offset; 5014 } 5015 } while (!slot); 5016 5017 mas->offset = --slot; 5018 return true; 5019 } 5020 5021 /* 5022 * mas_skip_node() - Internal function. Skip over a node. 5023 * @mas: The maple state. 5024 * 5025 * Return: true if there is another node, false otherwise. 5026 */ 5027 static inline bool mas_skip_node(struct ma_state *mas) 5028 { 5029 if (mas_is_err(mas)) 5030 return false; 5031 5032 do { 5033 if (mte_is_root(mas->node)) { 5034 if (mas->offset >= mas_data_end(mas)) { 5035 mas_set_err(mas, -EBUSY); 5036 return false; 5037 } 5038 } else { 5039 mas_ascend(mas); 5040 } 5041 } while (mas->offset >= mas_data_end(mas)); 5042 5043 mas->offset++; 5044 return true; 5045 } 5046 5047 /* 5048 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5049 * @size 5050 * @mas: The maple state 5051 * @size: The size of the gap required 5052 * 5053 * Search between @mas->index and @mas->last for a gap of @size. 5054 */ 5055 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5056 { 5057 struct maple_enode *last = NULL; 5058 5059 /* 5060 * There are 4 options: 5061 * go to child (descend) 5062 * go back to parent (ascend) 5063 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5064 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5065 */ 5066 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5067 if (last == mas->node) 5068 mas_skip_node(mas); 5069 else 5070 last = mas->node; 5071 } 5072 } 5073 5074 /* 5075 * mas_sparse_area() - Internal function. Return upper or lower limit when 5076 * searching for a gap in an empty tree. 5077 * @mas: The maple state 5078 * @min: the minimum range 5079 * @max: The maximum range 5080 * @size: The size of the gap 5081 * @fwd: Searching forward or back 5082 */ 5083 static inline int mas_sparse_area(struct ma_state *mas, unsigned long min, 5084 unsigned long max, unsigned long size, bool fwd) 5085 { 5086 if (!unlikely(mas_is_none(mas)) && min == 0) { 5087 min++; 5088 /* 5089 * At this time, min is increased, we need to recheck whether 5090 * the size is satisfied. 5091 */ 5092 if (min > max || max - min + 1 < size) 5093 return -EBUSY; 5094 } 5095 /* mas_is_ptr */ 5096 5097 if (fwd) { 5098 mas->index = min; 5099 mas->last = min + size - 1; 5100 } else { 5101 mas->last = max; 5102 mas->index = max - size + 1; 5103 } 5104 return 0; 5105 } 5106 5107 /* 5108 * mas_empty_area() - Get the lowest address within the range that is 5109 * sufficient for the size requested. 5110 * @mas: The maple state 5111 * @min: The lowest value of the range 5112 * @max: The highest value of the range 5113 * @size: The size needed 5114 */ 5115 int mas_empty_area(struct ma_state *mas, unsigned long min, 5116 unsigned long max, unsigned long size) 5117 { 5118 unsigned char offset; 5119 unsigned long *pivots; 5120 enum maple_type mt; 5121 5122 if (min > max) 5123 return -EINVAL; 5124 5125 if (size == 0 || max - min < size - 1) 5126 return -EINVAL; 5127 5128 if (mas_is_start(mas)) 5129 mas_start(mas); 5130 else if (mas->offset >= 2) 5131 mas->offset -= 2; 5132 else if (!mas_skip_node(mas)) 5133 return -EBUSY; 5134 5135 /* Empty set */ 5136 if (mas_is_none(mas) || mas_is_ptr(mas)) 5137 return mas_sparse_area(mas, min, max, size, true); 5138 5139 /* The start of the window can only be within these values */ 5140 mas->index = min; 5141 mas->last = max; 5142 mas_awalk(mas, size); 5143 5144 if (unlikely(mas_is_err(mas))) 5145 return xa_err(mas->node); 5146 5147 offset = mas->offset; 5148 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5149 return -EBUSY; 5150 5151 mt = mte_node_type(mas->node); 5152 pivots = ma_pivots(mas_mn(mas), mt); 5153 min = mas_safe_min(mas, pivots, offset); 5154 if (mas->index < min) 5155 mas->index = min; 5156 mas->last = mas->index + size - 1; 5157 return 0; 5158 } 5159 EXPORT_SYMBOL_GPL(mas_empty_area); 5160 5161 /* 5162 * mas_empty_area_rev() - Get the highest address within the range that is 5163 * sufficient for the size requested. 5164 * @mas: The maple state 5165 * @min: The lowest value of the range 5166 * @max: The highest value of the range 5167 * @size: The size needed 5168 */ 5169 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5170 unsigned long max, unsigned long size) 5171 { 5172 struct maple_enode *last = mas->node; 5173 5174 if (min > max) 5175 return -EINVAL; 5176 5177 if (size == 0 || max - min < size - 1) 5178 return -EINVAL; 5179 5180 if (mas_is_start(mas)) { 5181 mas_start(mas); 5182 mas->offset = mas_data_end(mas); 5183 } else if (mas->offset >= 2) { 5184 mas->offset -= 2; 5185 } else if (!mas_rewind_node(mas)) { 5186 return -EBUSY; 5187 } 5188 5189 /* Empty set. */ 5190 if (mas_is_none(mas) || mas_is_ptr(mas)) 5191 return mas_sparse_area(mas, min, max, size, false); 5192 5193 /* The start of the window can only be within these values. */ 5194 mas->index = min; 5195 mas->last = max; 5196 5197 while (!mas_rev_awalk(mas, size, &min, &max)) { 5198 if (last == mas->node) { 5199 if (!mas_rewind_node(mas)) 5200 return -EBUSY; 5201 } else { 5202 last = mas->node; 5203 } 5204 } 5205 5206 if (mas_is_err(mas)) 5207 return xa_err(mas->node); 5208 5209 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5210 return -EBUSY; 5211 5212 /* Trim the upper limit to the max. */ 5213 if (max < mas->last) 5214 mas->last = max; 5215 5216 mas->index = mas->last - size + 1; 5217 return 0; 5218 } 5219 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5220 5221 /* 5222 * mte_dead_leaves() - Mark all leaves of a node as dead. 5223 * @mas: The maple state 5224 * @slots: Pointer to the slot array 5225 * @type: The maple node type 5226 * 5227 * Must hold the write lock. 5228 * 5229 * Return: The number of leaves marked as dead. 5230 */ 5231 static inline 5232 unsigned char mte_dead_leaves(struct maple_enode *enode, struct maple_tree *mt, 5233 void __rcu **slots) 5234 { 5235 struct maple_node *node; 5236 enum maple_type type; 5237 void *entry; 5238 int offset; 5239 5240 for (offset = 0; offset < mt_slot_count(enode); offset++) { 5241 entry = mt_slot(mt, slots, offset); 5242 type = mte_node_type(entry); 5243 node = mte_to_node(entry); 5244 /* Use both node and type to catch LE & BE metadata */ 5245 if (!node || !type) 5246 break; 5247 5248 mte_set_node_dead(entry); 5249 node->type = type; 5250 rcu_assign_pointer(slots[offset], node); 5251 } 5252 5253 return offset; 5254 } 5255 5256 /** 5257 * mte_dead_walk() - Walk down a dead tree to just before the leaves 5258 * @enode: The maple encoded node 5259 * @offset: The starting offset 5260 * 5261 * Note: This can only be used from the RCU callback context. 5262 */ 5263 static void __rcu **mte_dead_walk(struct maple_enode **enode, unsigned char offset) 5264 { 5265 struct maple_node *node, *next; 5266 void __rcu **slots = NULL; 5267 5268 next = mte_to_node(*enode); 5269 do { 5270 *enode = ma_enode_ptr(next); 5271 node = mte_to_node(*enode); 5272 slots = ma_slots(node, node->type); 5273 next = rcu_dereference_protected(slots[offset], 5274 lock_is_held(&rcu_callback_map)); 5275 offset = 0; 5276 } while (!ma_is_leaf(next->type)); 5277 5278 return slots; 5279 } 5280 5281 /** 5282 * mt_free_walk() - Walk & free a tree in the RCU callback context 5283 * @head: The RCU head that's within the node. 5284 * 5285 * Note: This can only be used from the RCU callback context. 5286 */ 5287 static void mt_free_walk(struct rcu_head *head) 5288 { 5289 void __rcu **slots; 5290 struct maple_node *node, *start; 5291 struct maple_enode *enode; 5292 unsigned char offset; 5293 enum maple_type type; 5294 5295 node = container_of(head, struct maple_node, rcu); 5296 5297 if (ma_is_leaf(node->type)) 5298 goto free_leaf; 5299 5300 start = node; 5301 enode = mt_mk_node(node, node->type); 5302 slots = mte_dead_walk(&enode, 0); 5303 node = mte_to_node(enode); 5304 do { 5305 mt_free_bulk(node->slot_len, slots); 5306 offset = node->parent_slot + 1; 5307 enode = node->piv_parent; 5308 if (mte_to_node(enode) == node) 5309 goto free_leaf; 5310 5311 type = mte_node_type(enode); 5312 slots = ma_slots(mte_to_node(enode), type); 5313 if ((offset < mt_slots[type]) && 5314 rcu_dereference_protected(slots[offset], 5315 lock_is_held(&rcu_callback_map))) 5316 slots = mte_dead_walk(&enode, offset); 5317 node = mte_to_node(enode); 5318 } while ((node != start) || (node->slot_len < offset)); 5319 5320 slots = ma_slots(node, node->type); 5321 mt_free_bulk(node->slot_len, slots); 5322 5323 free_leaf: 5324 mt_free_rcu(&node->rcu); 5325 } 5326 5327 static inline void __rcu **mte_destroy_descend(struct maple_enode **enode, 5328 struct maple_tree *mt, struct maple_enode *prev, unsigned char offset) 5329 { 5330 struct maple_node *node; 5331 struct maple_enode *next = *enode; 5332 void __rcu **slots = NULL; 5333 enum maple_type type; 5334 unsigned char next_offset = 0; 5335 5336 do { 5337 *enode = next; 5338 node = mte_to_node(*enode); 5339 type = mte_node_type(*enode); 5340 slots = ma_slots(node, type); 5341 next = mt_slot_locked(mt, slots, next_offset); 5342 if ((mte_dead_node(next))) 5343 next = mt_slot_locked(mt, slots, ++next_offset); 5344 5345 mte_set_node_dead(*enode); 5346 node->type = type; 5347 node->piv_parent = prev; 5348 node->parent_slot = offset; 5349 offset = next_offset; 5350 next_offset = 0; 5351 prev = *enode; 5352 } while (!mte_is_leaf(next)); 5353 5354 return slots; 5355 } 5356 5357 static void mt_destroy_walk(struct maple_enode *enode, struct maple_tree *mt, 5358 bool free) 5359 { 5360 void __rcu **slots; 5361 struct maple_node *node = mte_to_node(enode); 5362 struct maple_enode *start; 5363 5364 if (mte_is_leaf(enode)) { 5365 node->type = mte_node_type(enode); 5366 goto free_leaf; 5367 } 5368 5369 start = enode; 5370 slots = mte_destroy_descend(&enode, mt, start, 0); 5371 node = mte_to_node(enode); // Updated in the above call. 5372 do { 5373 enum maple_type type; 5374 unsigned char offset; 5375 struct maple_enode *parent, *tmp; 5376 5377 node->slot_len = mte_dead_leaves(enode, mt, slots); 5378 if (free) 5379 mt_free_bulk(node->slot_len, slots); 5380 offset = node->parent_slot + 1; 5381 enode = node->piv_parent; 5382 if (mte_to_node(enode) == node) 5383 goto free_leaf; 5384 5385 type = mte_node_type(enode); 5386 slots = ma_slots(mte_to_node(enode), type); 5387 if (offset >= mt_slots[type]) 5388 goto next; 5389 5390 tmp = mt_slot_locked(mt, slots, offset); 5391 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5392 parent = enode; 5393 enode = tmp; 5394 slots = mte_destroy_descend(&enode, mt, parent, offset); 5395 } 5396 next: 5397 node = mte_to_node(enode); 5398 } while (start != enode); 5399 5400 node = mte_to_node(enode); 5401 node->slot_len = mte_dead_leaves(enode, mt, slots); 5402 if (free) 5403 mt_free_bulk(node->slot_len, slots); 5404 5405 free_leaf: 5406 if (free) 5407 mt_free_rcu(&node->rcu); 5408 else 5409 mt_clear_meta(mt, node, node->type); 5410 } 5411 5412 /* 5413 * mte_destroy_walk() - Free a tree or sub-tree. 5414 * @enode: the encoded maple node (maple_enode) to start 5415 * @mt: the tree to free - needed for node types. 5416 * 5417 * Must hold the write lock. 5418 */ 5419 static inline void mte_destroy_walk(struct maple_enode *enode, 5420 struct maple_tree *mt) 5421 { 5422 struct maple_node *node = mte_to_node(enode); 5423 5424 if (mt_in_rcu(mt)) { 5425 mt_destroy_walk(enode, mt, false); 5426 call_rcu(&node->rcu, mt_free_walk); 5427 } else { 5428 mt_destroy_walk(enode, mt, true); 5429 } 5430 } 5431 5432 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5433 { 5434 if (unlikely(mas_is_paused(wr_mas->mas))) 5435 mas_reset(wr_mas->mas); 5436 5437 if (!mas_is_start(wr_mas->mas)) { 5438 if (mas_is_none(wr_mas->mas)) { 5439 mas_reset(wr_mas->mas); 5440 } else { 5441 wr_mas->r_max = wr_mas->mas->max; 5442 wr_mas->type = mte_node_type(wr_mas->mas->node); 5443 if (mas_is_span_wr(wr_mas)) 5444 mas_reset(wr_mas->mas); 5445 } 5446 } 5447 } 5448 5449 /* Interface */ 5450 5451 /** 5452 * mas_store() - Store an @entry. 5453 * @mas: The maple state. 5454 * @entry: The entry to store. 5455 * 5456 * The @mas->index and @mas->last is used to set the range for the @entry. 5457 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5458 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5459 * 5460 * Return: the first entry between mas->index and mas->last or %NULL. 5461 */ 5462 void *mas_store(struct ma_state *mas, void *entry) 5463 { 5464 MA_WR_STATE(wr_mas, mas, entry); 5465 5466 trace_ma_write(__func__, mas, 0, entry); 5467 #ifdef CONFIG_DEBUG_MAPLE_TREE 5468 if (MAS_WARN_ON(mas, mas->index > mas->last)) 5469 pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); 5470 5471 if (mas->index > mas->last) { 5472 mas_set_err(mas, -EINVAL); 5473 return NULL; 5474 } 5475 5476 #endif 5477 5478 /* 5479 * Storing is the same operation as insert with the added caveat that it 5480 * can overwrite entries. Although this seems simple enough, one may 5481 * want to examine what happens if a single store operation was to 5482 * overwrite multiple entries within a self-balancing B-Tree. 5483 */ 5484 mas_wr_store_setup(&wr_mas); 5485 mas_wr_store_entry(&wr_mas); 5486 return wr_mas.content; 5487 } 5488 EXPORT_SYMBOL_GPL(mas_store); 5489 5490 /** 5491 * mas_store_gfp() - Store a value into the tree. 5492 * @mas: The maple state 5493 * @entry: The entry to store 5494 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5495 * 5496 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5497 * be allocated. 5498 */ 5499 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5500 { 5501 MA_WR_STATE(wr_mas, mas, entry); 5502 5503 mas_wr_store_setup(&wr_mas); 5504 trace_ma_write(__func__, mas, 0, entry); 5505 retry: 5506 mas_wr_store_entry(&wr_mas); 5507 if (unlikely(mas_nomem(mas, gfp))) 5508 goto retry; 5509 5510 if (unlikely(mas_is_err(mas))) 5511 return xa_err(mas->node); 5512 5513 return 0; 5514 } 5515 EXPORT_SYMBOL_GPL(mas_store_gfp); 5516 5517 /** 5518 * mas_store_prealloc() - Store a value into the tree using memory 5519 * preallocated in the maple state. 5520 * @mas: The maple state 5521 * @entry: The entry to store. 5522 */ 5523 void mas_store_prealloc(struct ma_state *mas, void *entry) 5524 { 5525 MA_WR_STATE(wr_mas, mas, entry); 5526 5527 mas_wr_store_setup(&wr_mas); 5528 trace_ma_write(__func__, mas, 0, entry); 5529 mas_wr_store_entry(&wr_mas); 5530 MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); 5531 mas_destroy(mas); 5532 } 5533 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5534 5535 /** 5536 * mas_preallocate() - Preallocate enough nodes for a store operation 5537 * @mas: The maple state 5538 * @gfp: The GFP_FLAGS to use for allocations. 5539 * 5540 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5541 */ 5542 int mas_preallocate(struct ma_state *mas, gfp_t gfp) 5543 { 5544 int ret; 5545 5546 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5547 mas->mas_flags |= MA_STATE_PREALLOC; 5548 if (likely(!mas_is_err(mas))) 5549 return 0; 5550 5551 mas_set_alloc_req(mas, 0); 5552 ret = xa_err(mas->node); 5553 mas_reset(mas); 5554 mas_destroy(mas); 5555 mas_reset(mas); 5556 return ret; 5557 } 5558 EXPORT_SYMBOL_GPL(mas_preallocate); 5559 5560 /* 5561 * mas_destroy() - destroy a maple state. 5562 * @mas: The maple state 5563 * 5564 * Upon completion, check the left-most node and rebalance against the node to 5565 * the right if necessary. Frees any allocated nodes associated with this maple 5566 * state. 5567 */ 5568 void mas_destroy(struct ma_state *mas) 5569 { 5570 struct maple_alloc *node; 5571 unsigned long total; 5572 5573 /* 5574 * When using mas_for_each() to insert an expected number of elements, 5575 * it is possible that the number inserted is less than the expected 5576 * number. To fix an invalid final node, a check is performed here to 5577 * rebalance the previous node with the final node. 5578 */ 5579 if (mas->mas_flags & MA_STATE_REBALANCE) { 5580 unsigned char end; 5581 5582 mas_start(mas); 5583 mtree_range_walk(mas); 5584 end = mas_data_end(mas) + 1; 5585 if (end < mt_min_slot_count(mas->node) - 1) 5586 mas_destroy_rebalance(mas, end); 5587 5588 mas->mas_flags &= ~MA_STATE_REBALANCE; 5589 } 5590 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5591 5592 total = mas_allocated(mas); 5593 while (total) { 5594 node = mas->alloc; 5595 mas->alloc = node->slot[0]; 5596 if (node->node_count > 1) { 5597 size_t count = node->node_count - 1; 5598 5599 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5600 total -= count; 5601 } 5602 kmem_cache_free(maple_node_cache, node); 5603 total--; 5604 } 5605 5606 mas->alloc = NULL; 5607 } 5608 EXPORT_SYMBOL_GPL(mas_destroy); 5609 5610 /* 5611 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5612 * @mas: The maple state 5613 * @nr_entries: The number of expected entries. 5614 * 5615 * This will attempt to pre-allocate enough nodes to store the expected number 5616 * of entries. The allocations will occur using the bulk allocator interface 5617 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5618 * to ensure any unused nodes are freed. 5619 * 5620 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5621 */ 5622 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5623 { 5624 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5625 struct maple_enode *enode = mas->node; 5626 int nr_nodes; 5627 int ret; 5628 5629 /* 5630 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5631 * forking a process and duplicating the VMAs from one tree to a new 5632 * tree. When such a situation arises, it is known that the new tree is 5633 * not going to be used until the entire tree is populated. For 5634 * performance reasons, it is best to use a bulk load with RCU disabled. 5635 * This allows for optimistic splitting that favours the left and reuse 5636 * of nodes during the operation. 5637 */ 5638 5639 /* Optimize splitting for bulk insert in-order */ 5640 mas->mas_flags |= MA_STATE_BULK; 5641 5642 /* 5643 * Avoid overflow, assume a gap between each entry and a trailing null. 5644 * If this is wrong, it just means allocation can happen during 5645 * insertion of entries. 5646 */ 5647 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5648 if (!mt_is_alloc(mas->tree)) 5649 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5650 5651 /* Leaves; reduce slots to keep space for expansion */ 5652 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5653 /* Internal nodes */ 5654 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5655 /* Add working room for split (2 nodes) + new parents */ 5656 mas_node_count(mas, nr_nodes + 3); 5657 5658 /* Detect if allocations run out */ 5659 mas->mas_flags |= MA_STATE_PREALLOC; 5660 5661 if (!mas_is_err(mas)) 5662 return 0; 5663 5664 ret = xa_err(mas->node); 5665 mas->node = enode; 5666 mas_destroy(mas); 5667 return ret; 5668 5669 } 5670 EXPORT_SYMBOL_GPL(mas_expected_entries); 5671 5672 static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, 5673 void **entry) 5674 { 5675 bool was_none = mas_is_none(mas); 5676 5677 if (mas_is_none(mas) || mas_is_paused(mas)) 5678 mas->node = MAS_START; 5679 5680 if (mas_is_start(mas)) 5681 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5682 5683 if (mas_is_ptr(mas)) { 5684 *entry = NULL; 5685 if (was_none && mas->index == 0) { 5686 mas->index = mas->last = 0; 5687 return true; 5688 } 5689 mas->index = 1; 5690 mas->last = ULONG_MAX; 5691 mas->node = MAS_NONE; 5692 return true; 5693 } 5694 5695 if (mas_is_none(mas)) 5696 return true; 5697 return false; 5698 } 5699 5700 /** 5701 * mas_next() - Get the next entry. 5702 * @mas: The maple state 5703 * @max: The maximum index to check. 5704 * 5705 * Returns the next entry after @mas->index. 5706 * Must hold rcu_read_lock or the write lock. 5707 * Can return the zero entry. 5708 * 5709 * Return: The next entry or %NULL 5710 */ 5711 void *mas_next(struct ma_state *mas, unsigned long max) 5712 { 5713 void *entry = NULL; 5714 5715 if (mas_next_setup(mas, max, &entry)) 5716 return entry; 5717 5718 /* Retries on dead nodes handled by mas_next_slot */ 5719 return mas_next_slot(mas, max, false); 5720 } 5721 EXPORT_SYMBOL_GPL(mas_next); 5722 5723 /** 5724 * mas_next_range() - Advance the maple state to the next range 5725 * @mas: The maple state 5726 * @max: The maximum index to check. 5727 * 5728 * Sets @mas->index and @mas->last to the range. 5729 * Must hold rcu_read_lock or the write lock. 5730 * Can return the zero entry. 5731 * 5732 * Return: The next entry or %NULL 5733 */ 5734 void *mas_next_range(struct ma_state *mas, unsigned long max) 5735 { 5736 void *entry = NULL; 5737 5738 if (mas_next_setup(mas, max, &entry)) 5739 return entry; 5740 5741 /* Retries on dead nodes handled by mas_next_slot */ 5742 return mas_next_slot(mas, max, true); 5743 } 5744 EXPORT_SYMBOL_GPL(mas_next_range); 5745 5746 /** 5747 * mt_next() - get the next value in the maple tree 5748 * @mt: The maple tree 5749 * @index: The start index 5750 * @max: The maximum index to check 5751 * 5752 * Return: The entry at @index or higher, or %NULL if nothing is found. 5753 */ 5754 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5755 { 5756 void *entry = NULL; 5757 MA_STATE(mas, mt, index, index); 5758 5759 rcu_read_lock(); 5760 entry = mas_next(&mas, max); 5761 rcu_read_unlock(); 5762 return entry; 5763 } 5764 EXPORT_SYMBOL_GPL(mt_next); 5765 5766 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, 5767 void **entry) 5768 { 5769 if (mas->index <= min) 5770 goto none; 5771 5772 if (mas_is_none(mas) || mas_is_paused(mas)) 5773 mas->node = MAS_START; 5774 5775 if (mas_is_start(mas)) { 5776 mas_walk(mas); 5777 if (!mas->index) 5778 goto none; 5779 } 5780 5781 if (unlikely(mas_is_ptr(mas))) { 5782 if (!mas->index) 5783 goto none; 5784 mas->index = mas->last = 0; 5785 *entry = mas_root(mas); 5786 return true; 5787 } 5788 5789 if (mas_is_none(mas)) { 5790 if (mas->index) { 5791 /* Walked to out-of-range pointer? */ 5792 mas->index = mas->last = 0; 5793 mas->node = MAS_ROOT; 5794 *entry = mas_root(mas); 5795 return true; 5796 } 5797 return true; 5798 } 5799 5800 return false; 5801 5802 none: 5803 mas->node = MAS_NONE; 5804 return true; 5805 } 5806 5807 /** 5808 * mas_prev() - Get the previous entry 5809 * @mas: The maple state 5810 * @min: The minimum value to check. 5811 * 5812 * Must hold rcu_read_lock or the write lock. 5813 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5814 * searchable nodes. 5815 * 5816 * Return: the previous value or %NULL. 5817 */ 5818 void *mas_prev(struct ma_state *mas, unsigned long min) 5819 { 5820 void *entry = NULL; 5821 5822 if (mas_prev_setup(mas, min, &entry)) 5823 return entry; 5824 5825 return mas_prev_slot(mas, min, false); 5826 } 5827 EXPORT_SYMBOL_GPL(mas_prev); 5828 5829 /** 5830 * mas_prev_range() - Advance to the previous range 5831 * @mas: The maple state 5832 * @min: The minimum value to check. 5833 * 5834 * Sets @mas->index and @mas->last to the range. 5835 * Must hold rcu_read_lock or the write lock. 5836 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5837 * searchable nodes. 5838 * 5839 * Return: the previous value or %NULL. 5840 */ 5841 void *mas_prev_range(struct ma_state *mas, unsigned long min) 5842 { 5843 void *entry = NULL; 5844 5845 if (mas_prev_setup(mas, min, &entry)) 5846 return entry; 5847 5848 return mas_prev_slot(mas, min, true); 5849 } 5850 EXPORT_SYMBOL_GPL(mas_prev_range); 5851 5852 /** 5853 * mt_prev() - get the previous value in the maple tree 5854 * @mt: The maple tree 5855 * @index: The start index 5856 * @min: The minimum index to check 5857 * 5858 * Return: The entry at @index or lower, or %NULL if nothing is found. 5859 */ 5860 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5861 { 5862 void *entry = NULL; 5863 MA_STATE(mas, mt, index, index); 5864 5865 rcu_read_lock(); 5866 entry = mas_prev(&mas, min); 5867 rcu_read_unlock(); 5868 return entry; 5869 } 5870 EXPORT_SYMBOL_GPL(mt_prev); 5871 5872 /** 5873 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5874 * @mas: The maple state to pause 5875 * 5876 * Some users need to pause a walk and drop the lock they're holding in 5877 * order to yield to a higher priority thread or carry out an operation 5878 * on an entry. Those users should call this function before they drop 5879 * the lock. It resets the @mas to be suitable for the next iteration 5880 * of the loop after the user has reacquired the lock. If most entries 5881 * found during a walk require you to call mas_pause(), the mt_for_each() 5882 * iterator may be more appropriate. 5883 * 5884 */ 5885 void mas_pause(struct ma_state *mas) 5886 { 5887 mas->node = MAS_PAUSE; 5888 } 5889 EXPORT_SYMBOL_GPL(mas_pause); 5890 5891 /** 5892 * mas_find_setup() - Internal function to set up mas_find*(). 5893 * @mas: The maple state 5894 * @max: The maximum index 5895 * @entry: Pointer to the entry 5896 * 5897 * Returns: True if entry is the answer, false otherwise. 5898 */ 5899 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, 5900 void **entry) 5901 { 5902 *entry = NULL; 5903 5904 if (unlikely(mas_is_none(mas))) { 5905 if (unlikely(mas->last >= max)) 5906 return true; 5907 5908 mas->index = mas->last; 5909 mas->node = MAS_START; 5910 } else if (unlikely(mas_is_paused(mas))) { 5911 if (unlikely(mas->last >= max)) 5912 return true; 5913 5914 mas->node = MAS_START; 5915 mas->index = ++mas->last; 5916 } else if (unlikely(mas_is_ptr(mas))) 5917 goto ptr_out_of_range; 5918 5919 if (unlikely(mas_is_start(mas))) { 5920 /* First run or continue */ 5921 if (mas->index > max) 5922 return true; 5923 5924 *entry = mas_walk(mas); 5925 if (*entry) 5926 return true; 5927 5928 } 5929 5930 if (unlikely(!mas_searchable(mas))) { 5931 if (unlikely(mas_is_ptr(mas))) 5932 goto ptr_out_of_range; 5933 5934 return true; 5935 } 5936 5937 if (mas->index == max) 5938 return true; 5939 5940 return false; 5941 5942 ptr_out_of_range: 5943 mas->node = MAS_NONE; 5944 mas->index = 1; 5945 mas->last = ULONG_MAX; 5946 return true; 5947 } 5948 5949 /** 5950 * mas_find() - On the first call, find the entry at or after mas->index up to 5951 * %max. Otherwise, find the entry after mas->index. 5952 * @mas: The maple state 5953 * @max: The maximum value to check. 5954 * 5955 * Must hold rcu_read_lock or the write lock. 5956 * If an entry exists, last and index are updated accordingly. 5957 * May set @mas->node to MAS_NONE. 5958 * 5959 * Return: The entry or %NULL. 5960 */ 5961 void *mas_find(struct ma_state *mas, unsigned long max) 5962 { 5963 void *entry = NULL; 5964 5965 if (mas_find_setup(mas, max, &entry)) 5966 return entry; 5967 5968 /* Retries on dead nodes handled by mas_next_slot */ 5969 return mas_next_slot(mas, max, false); 5970 } 5971 EXPORT_SYMBOL_GPL(mas_find); 5972 5973 /** 5974 * mas_find_range() - On the first call, find the entry at or after 5975 * mas->index up to %max. Otherwise, advance to the next slot mas->index. 5976 * @mas: The maple state 5977 * @max: The maximum value to check. 5978 * 5979 * Must hold rcu_read_lock or the write lock. 5980 * If an entry exists, last and index are updated accordingly. 5981 * May set @mas->node to MAS_NONE. 5982 * 5983 * Return: The entry or %NULL. 5984 */ 5985 void *mas_find_range(struct ma_state *mas, unsigned long max) 5986 { 5987 void *entry; 5988 5989 if (mas_find_setup(mas, max, &entry)) 5990 return entry; 5991 5992 /* Retries on dead nodes handled by mas_next_slot */ 5993 return mas_next_slot(mas, max, true); 5994 } 5995 EXPORT_SYMBOL_GPL(mas_find_range); 5996 5997 /** 5998 * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() 5999 * @mas: The maple state 6000 * @min: The minimum index 6001 * @entry: Pointer to the entry 6002 * 6003 * Returns: True if entry is the answer, false otherwise. 6004 */ 6005 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 6006 void **entry) 6007 { 6008 *entry = NULL; 6009 6010 if (unlikely(mas_is_none(mas))) { 6011 if (mas->index <= min) 6012 goto none; 6013 6014 mas->last = mas->index; 6015 mas->node = MAS_START; 6016 } 6017 6018 if (unlikely(mas_is_paused(mas))) { 6019 if (unlikely(mas->index <= min)) { 6020 mas->node = MAS_NONE; 6021 return true; 6022 } 6023 mas->node = MAS_START; 6024 mas->last = --mas->index; 6025 } 6026 6027 if (unlikely(mas_is_start(mas))) { 6028 /* First run or continue */ 6029 if (mas->index < min) 6030 return true; 6031 6032 *entry = mas_walk(mas); 6033 if (*entry) 6034 return true; 6035 } 6036 6037 if (unlikely(!mas_searchable(mas))) { 6038 if (mas_is_ptr(mas)) 6039 goto none; 6040 6041 if (mas_is_none(mas)) { 6042 /* 6043 * Walked to the location, and there was nothing so the 6044 * previous location is 0. 6045 */ 6046 mas->last = mas->index = 0; 6047 mas->node = MAS_ROOT; 6048 *entry = mas_root(mas); 6049 return true; 6050 } 6051 } 6052 6053 if (mas->index < min) 6054 return true; 6055 6056 return false; 6057 6058 none: 6059 mas->node = MAS_NONE; 6060 return true; 6061 } 6062 6063 /** 6064 * mas_find_rev: On the first call, find the first non-null entry at or below 6065 * mas->index down to %min. Otherwise find the first non-null entry below 6066 * mas->index down to %min. 6067 * @mas: The maple state 6068 * @min: The minimum value to check. 6069 * 6070 * Must hold rcu_read_lock or the write lock. 6071 * If an entry exists, last and index are updated accordingly. 6072 * May set @mas->node to MAS_NONE. 6073 * 6074 * Return: The entry or %NULL. 6075 */ 6076 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6077 { 6078 void *entry; 6079 6080 if (mas_find_rev_setup(mas, min, &entry)) 6081 return entry; 6082 6083 /* Retries on dead nodes handled by mas_prev_slot */ 6084 return mas_prev_slot(mas, min, false); 6085 6086 } 6087 EXPORT_SYMBOL_GPL(mas_find_rev); 6088 6089 /** 6090 * mas_find_range_rev: On the first call, find the first non-null entry at or 6091 * below mas->index down to %min. Otherwise advance to the previous slot after 6092 * mas->index down to %min. 6093 * @mas: The maple state 6094 * @min: The minimum value to check. 6095 * 6096 * Must hold rcu_read_lock or the write lock. 6097 * If an entry exists, last and index are updated accordingly. 6098 * May set @mas->node to MAS_NONE. 6099 * 6100 * Return: The entry or %NULL. 6101 */ 6102 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6103 { 6104 void *entry; 6105 6106 if (mas_find_rev_setup(mas, min, &entry)) 6107 return entry; 6108 6109 /* Retries on dead nodes handled by mas_prev_slot */ 6110 return mas_prev_slot(mas, min, true); 6111 } 6112 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6113 6114 /** 6115 * mas_erase() - Find the range in which index resides and erase the entire 6116 * range. 6117 * @mas: The maple state 6118 * 6119 * Must hold the write lock. 6120 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6121 * erases that range. 6122 * 6123 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6124 */ 6125 void *mas_erase(struct ma_state *mas) 6126 { 6127 void *entry; 6128 MA_WR_STATE(wr_mas, mas, NULL); 6129 6130 if (mas_is_none(mas) || mas_is_paused(mas)) 6131 mas->node = MAS_START; 6132 6133 /* Retry unnecessary when holding the write lock. */ 6134 entry = mas_state_walk(mas); 6135 if (!entry) 6136 return NULL; 6137 6138 write_retry: 6139 /* Must reset to ensure spanning writes of last slot are detected */ 6140 mas_reset(mas); 6141 mas_wr_store_setup(&wr_mas); 6142 mas_wr_store_entry(&wr_mas); 6143 if (mas_nomem(mas, GFP_KERNEL)) 6144 goto write_retry; 6145 6146 return entry; 6147 } 6148 EXPORT_SYMBOL_GPL(mas_erase); 6149 6150 /** 6151 * mas_nomem() - Check if there was an error allocating and do the allocation 6152 * if necessary If there are allocations, then free them. 6153 * @mas: The maple state 6154 * @gfp: The GFP_FLAGS to use for allocations 6155 * Return: true on allocation, false otherwise. 6156 */ 6157 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6158 __must_hold(mas->tree->ma_lock) 6159 { 6160 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6161 mas_destroy(mas); 6162 return false; 6163 } 6164 6165 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6166 mtree_unlock(mas->tree); 6167 mas_alloc_nodes(mas, gfp); 6168 mtree_lock(mas->tree); 6169 } else { 6170 mas_alloc_nodes(mas, gfp); 6171 } 6172 6173 if (!mas_allocated(mas)) 6174 return false; 6175 6176 mas->node = MAS_START; 6177 return true; 6178 } 6179 6180 void __init maple_tree_init(void) 6181 { 6182 maple_node_cache = kmem_cache_create("maple_node", 6183 sizeof(struct maple_node), sizeof(struct maple_node), 6184 SLAB_PANIC, NULL); 6185 } 6186 6187 /** 6188 * mtree_load() - Load a value stored in a maple tree 6189 * @mt: The maple tree 6190 * @index: The index to load 6191 * 6192 * Return: the entry or %NULL 6193 */ 6194 void *mtree_load(struct maple_tree *mt, unsigned long index) 6195 { 6196 MA_STATE(mas, mt, index, index); 6197 void *entry; 6198 6199 trace_ma_read(__func__, &mas); 6200 rcu_read_lock(); 6201 retry: 6202 entry = mas_start(&mas); 6203 if (unlikely(mas_is_none(&mas))) 6204 goto unlock; 6205 6206 if (unlikely(mas_is_ptr(&mas))) { 6207 if (index) 6208 entry = NULL; 6209 6210 goto unlock; 6211 } 6212 6213 entry = mtree_lookup_walk(&mas); 6214 if (!entry && unlikely(mas_is_start(&mas))) 6215 goto retry; 6216 unlock: 6217 rcu_read_unlock(); 6218 if (xa_is_zero(entry)) 6219 return NULL; 6220 6221 return entry; 6222 } 6223 EXPORT_SYMBOL(mtree_load); 6224 6225 /** 6226 * mtree_store_range() - Store an entry at a given range. 6227 * @mt: The maple tree 6228 * @index: The start of the range 6229 * @last: The end of the range 6230 * @entry: The entry to store 6231 * @gfp: The GFP_FLAGS to use for allocations 6232 * 6233 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6234 * be allocated. 6235 */ 6236 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6237 unsigned long last, void *entry, gfp_t gfp) 6238 { 6239 MA_STATE(mas, mt, index, last); 6240 MA_WR_STATE(wr_mas, &mas, entry); 6241 6242 trace_ma_write(__func__, &mas, 0, entry); 6243 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6244 return -EINVAL; 6245 6246 if (index > last) 6247 return -EINVAL; 6248 6249 mtree_lock(mt); 6250 retry: 6251 mas_wr_store_entry(&wr_mas); 6252 if (mas_nomem(&mas, gfp)) 6253 goto retry; 6254 6255 mtree_unlock(mt); 6256 if (mas_is_err(&mas)) 6257 return xa_err(mas.node); 6258 6259 return 0; 6260 } 6261 EXPORT_SYMBOL(mtree_store_range); 6262 6263 /** 6264 * mtree_store() - Store an entry at a given index. 6265 * @mt: The maple tree 6266 * @index: The index to store the value 6267 * @entry: The entry to store 6268 * @gfp: The GFP_FLAGS to use for allocations 6269 * 6270 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6271 * be allocated. 6272 */ 6273 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6274 gfp_t gfp) 6275 { 6276 return mtree_store_range(mt, index, index, entry, gfp); 6277 } 6278 EXPORT_SYMBOL(mtree_store); 6279 6280 /** 6281 * mtree_insert_range() - Insert an entry at a give range if there is no value. 6282 * @mt: The maple tree 6283 * @first: The start of the range 6284 * @last: The end of the range 6285 * @entry: The entry to store 6286 * @gfp: The GFP_FLAGS to use for allocations. 6287 * 6288 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6289 * request, -ENOMEM if memory could not be allocated. 6290 */ 6291 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6292 unsigned long last, void *entry, gfp_t gfp) 6293 { 6294 MA_STATE(ms, mt, first, last); 6295 6296 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6297 return -EINVAL; 6298 6299 if (first > last) 6300 return -EINVAL; 6301 6302 mtree_lock(mt); 6303 retry: 6304 mas_insert(&ms, entry); 6305 if (mas_nomem(&ms, gfp)) 6306 goto retry; 6307 6308 mtree_unlock(mt); 6309 if (mas_is_err(&ms)) 6310 return xa_err(ms.node); 6311 6312 return 0; 6313 } 6314 EXPORT_SYMBOL(mtree_insert_range); 6315 6316 /** 6317 * mtree_insert() - Insert an entry at a give index if there is no value. 6318 * @mt: The maple tree 6319 * @index : The index to store the value 6320 * @entry: The entry to store 6321 * @gfp: The FGP_FLAGS to use for allocations. 6322 * 6323 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6324 * request, -ENOMEM if memory could not be allocated. 6325 */ 6326 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6327 gfp_t gfp) 6328 { 6329 return mtree_insert_range(mt, index, index, entry, gfp); 6330 } 6331 EXPORT_SYMBOL(mtree_insert); 6332 6333 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6334 void *entry, unsigned long size, unsigned long min, 6335 unsigned long max, gfp_t gfp) 6336 { 6337 int ret = 0; 6338 6339 MA_STATE(mas, mt, 0, 0); 6340 if (!mt_is_alloc(mt)) 6341 return -EINVAL; 6342 6343 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6344 return -EINVAL; 6345 6346 mtree_lock(mt); 6347 retry: 6348 ret = mas_empty_area(&mas, min, max, size); 6349 if (ret) 6350 goto unlock; 6351 6352 mas_insert(&mas, entry); 6353 /* 6354 * mas_nomem() may release the lock, causing the allocated area 6355 * to be unavailable, so try to allocate a free area again. 6356 */ 6357 if (mas_nomem(&mas, gfp)) 6358 goto retry; 6359 6360 if (mas_is_err(&mas)) 6361 ret = xa_err(mas.node); 6362 else 6363 *startp = mas.index; 6364 6365 unlock: 6366 mtree_unlock(mt); 6367 return ret; 6368 } 6369 EXPORT_SYMBOL(mtree_alloc_range); 6370 6371 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6372 void *entry, unsigned long size, unsigned long min, 6373 unsigned long max, gfp_t gfp) 6374 { 6375 int ret = 0; 6376 6377 MA_STATE(mas, mt, 0, 0); 6378 if (!mt_is_alloc(mt)) 6379 return -EINVAL; 6380 6381 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6382 return -EINVAL; 6383 6384 mtree_lock(mt); 6385 retry: 6386 ret = mas_empty_area_rev(&mas, min, max, size); 6387 if (ret) 6388 goto unlock; 6389 6390 mas_insert(&mas, entry); 6391 /* 6392 * mas_nomem() may release the lock, causing the allocated area 6393 * to be unavailable, so try to allocate a free area again. 6394 */ 6395 if (mas_nomem(&mas, gfp)) 6396 goto retry; 6397 6398 if (mas_is_err(&mas)) 6399 ret = xa_err(mas.node); 6400 else 6401 *startp = mas.index; 6402 6403 unlock: 6404 mtree_unlock(mt); 6405 return ret; 6406 } 6407 EXPORT_SYMBOL(mtree_alloc_rrange); 6408 6409 /** 6410 * mtree_erase() - Find an index and erase the entire range. 6411 * @mt: The maple tree 6412 * @index: The index to erase 6413 * 6414 * Erasing is the same as a walk to an entry then a store of a NULL to that 6415 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6416 * 6417 * Return: The entry stored at the @index or %NULL 6418 */ 6419 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6420 { 6421 void *entry = NULL; 6422 6423 MA_STATE(mas, mt, index, index); 6424 trace_ma_op(__func__, &mas); 6425 6426 mtree_lock(mt); 6427 entry = mas_erase(&mas); 6428 mtree_unlock(mt); 6429 6430 return entry; 6431 } 6432 EXPORT_SYMBOL(mtree_erase); 6433 6434 /** 6435 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6436 * @mt: The maple tree 6437 * 6438 * Note: Does not handle locking. 6439 */ 6440 void __mt_destroy(struct maple_tree *mt) 6441 { 6442 void *root = mt_root_locked(mt); 6443 6444 rcu_assign_pointer(mt->ma_root, NULL); 6445 if (xa_is_node(root)) 6446 mte_destroy_walk(root, mt); 6447 6448 mt->ma_flags = 0; 6449 } 6450 EXPORT_SYMBOL_GPL(__mt_destroy); 6451 6452 /** 6453 * mtree_destroy() - Destroy a maple tree 6454 * @mt: The maple tree 6455 * 6456 * Frees all resources used by the tree. Handles locking. 6457 */ 6458 void mtree_destroy(struct maple_tree *mt) 6459 { 6460 mtree_lock(mt); 6461 __mt_destroy(mt); 6462 mtree_unlock(mt); 6463 } 6464 EXPORT_SYMBOL(mtree_destroy); 6465 6466 /** 6467 * mt_find() - Search from the start up until an entry is found. 6468 * @mt: The maple tree 6469 * @index: Pointer which contains the start location of the search 6470 * @max: The maximum value to check 6471 * 6472 * Handles locking. @index will be incremented to one beyond the range. 6473 * 6474 * Return: The entry at or after the @index or %NULL 6475 */ 6476 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6477 { 6478 MA_STATE(mas, mt, *index, *index); 6479 void *entry; 6480 #ifdef CONFIG_DEBUG_MAPLE_TREE 6481 unsigned long copy = *index; 6482 #endif 6483 6484 trace_ma_read(__func__, &mas); 6485 6486 if ((*index) > max) 6487 return NULL; 6488 6489 rcu_read_lock(); 6490 retry: 6491 entry = mas_state_walk(&mas); 6492 if (mas_is_start(&mas)) 6493 goto retry; 6494 6495 if (unlikely(xa_is_zero(entry))) 6496 entry = NULL; 6497 6498 if (entry) 6499 goto unlock; 6500 6501 while (mas_searchable(&mas) && (mas.last < max)) { 6502 entry = mas_next_entry(&mas, max); 6503 if (likely(entry && !xa_is_zero(entry))) 6504 break; 6505 } 6506 6507 if (unlikely(xa_is_zero(entry))) 6508 entry = NULL; 6509 unlock: 6510 rcu_read_unlock(); 6511 if (likely(entry)) { 6512 *index = mas.last + 1; 6513 #ifdef CONFIG_DEBUG_MAPLE_TREE 6514 if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) 6515 pr_err("index not increased! %lx <= %lx\n", 6516 *index, copy); 6517 #endif 6518 } 6519 6520 return entry; 6521 } 6522 EXPORT_SYMBOL(mt_find); 6523 6524 /** 6525 * mt_find_after() - Search from the start up until an entry is found. 6526 * @mt: The maple tree 6527 * @index: Pointer which contains the start location of the search 6528 * @max: The maximum value to check 6529 * 6530 * Handles locking, detects wrapping on index == 0 6531 * 6532 * Return: The entry at or after the @index or %NULL 6533 */ 6534 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6535 unsigned long max) 6536 { 6537 if (!(*index)) 6538 return NULL; 6539 6540 return mt_find(mt, index, max); 6541 } 6542 EXPORT_SYMBOL(mt_find_after); 6543 6544 #ifdef CONFIG_DEBUG_MAPLE_TREE 6545 atomic_t maple_tree_tests_run; 6546 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6547 atomic_t maple_tree_tests_passed; 6548 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6549 6550 #ifndef __KERNEL__ 6551 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6552 void mt_set_non_kernel(unsigned int val) 6553 { 6554 kmem_cache_set_non_kernel(maple_node_cache, val); 6555 } 6556 6557 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6558 unsigned long mt_get_alloc_size(void) 6559 { 6560 return kmem_cache_get_alloc(maple_node_cache); 6561 } 6562 6563 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6564 void mt_zero_nr_tallocated(void) 6565 { 6566 kmem_cache_zero_nr_tallocated(maple_node_cache); 6567 } 6568 6569 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6570 unsigned int mt_nr_tallocated(void) 6571 { 6572 return kmem_cache_nr_tallocated(maple_node_cache); 6573 } 6574 6575 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6576 unsigned int mt_nr_allocated(void) 6577 { 6578 return kmem_cache_nr_allocated(maple_node_cache); 6579 } 6580 6581 /* 6582 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6583 * @mas: The maple state 6584 * @index: The index to restore in @mas. 6585 * 6586 * Used in test code. 6587 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6588 */ 6589 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6590 { 6591 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6592 return 0; 6593 6594 if (likely(!mte_dead_node(mas->node))) 6595 return 0; 6596 6597 mas_rewalk(mas, index); 6598 return 1; 6599 } 6600 6601 void mt_cache_shrink(void) 6602 { 6603 } 6604 #else 6605 /* 6606 * mt_cache_shrink() - For testing, don't use this. 6607 * 6608 * Certain testcases can trigger an OOM when combined with other memory 6609 * debugging configuration options. This function is used to reduce the 6610 * possibility of an out of memory even due to kmem_cache objects remaining 6611 * around for longer than usual. 6612 */ 6613 void mt_cache_shrink(void) 6614 { 6615 kmem_cache_shrink(maple_node_cache); 6616 6617 } 6618 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6619 6620 #endif /* not defined __KERNEL__ */ 6621 /* 6622 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6623 * @mas: The maple state 6624 * @offset: The offset into the slot array to fetch. 6625 * 6626 * Return: The entry stored at @offset. 6627 */ 6628 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6629 unsigned char offset) 6630 { 6631 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6632 offset); 6633 } 6634 6635 6636 /* 6637 * mas_first_entry() - Go the first leaf and find the first entry. 6638 * @mas: the maple state. 6639 * @limit: the maximum index to check. 6640 * @*r_start: Pointer to set to the range start. 6641 * 6642 * Sets mas->offset to the offset of the entry, r_start to the range minimum. 6643 * 6644 * Return: The first entry or MAS_NONE. 6645 */ 6646 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, 6647 unsigned long limit, enum maple_type mt) 6648 6649 { 6650 unsigned long max; 6651 unsigned long *pivots; 6652 void __rcu **slots; 6653 void *entry = NULL; 6654 6655 mas->index = mas->min; 6656 if (mas->index > limit) 6657 goto none; 6658 6659 max = mas->max; 6660 mas->offset = 0; 6661 while (likely(!ma_is_leaf(mt))) { 6662 MAS_WARN_ON(mas, mte_dead_node(mas->node)); 6663 slots = ma_slots(mn, mt); 6664 entry = mas_slot(mas, slots, 0); 6665 pivots = ma_pivots(mn, mt); 6666 if (unlikely(ma_dead_node(mn))) 6667 return NULL; 6668 max = pivots[0]; 6669 mas->node = entry; 6670 mn = mas_mn(mas); 6671 mt = mte_node_type(mas->node); 6672 } 6673 MAS_WARN_ON(mas, mte_dead_node(mas->node)); 6674 6675 mas->max = max; 6676 slots = ma_slots(mn, mt); 6677 entry = mas_slot(mas, slots, 0); 6678 if (unlikely(ma_dead_node(mn))) 6679 return NULL; 6680 6681 /* Slot 0 or 1 must be set */ 6682 if (mas->index > limit) 6683 goto none; 6684 6685 if (likely(entry)) 6686 return entry; 6687 6688 mas->offset = 1; 6689 entry = mas_slot(mas, slots, 1); 6690 pivots = ma_pivots(mn, mt); 6691 if (unlikely(ma_dead_node(mn))) 6692 return NULL; 6693 6694 mas->index = pivots[0] + 1; 6695 if (mas->index > limit) 6696 goto none; 6697 6698 if (likely(entry)) 6699 return entry; 6700 6701 none: 6702 if (likely(!ma_dead_node(mn))) 6703 mas->node = MAS_NONE; 6704 return NULL; 6705 } 6706 6707 /* Depth first search, post-order */ 6708 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6709 { 6710 6711 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6712 unsigned long p_min, p_max; 6713 6714 mas_next_node(mas, mas_mn(mas), max); 6715 if (!mas_is_none(mas)) 6716 return; 6717 6718 if (mte_is_root(mn)) 6719 return; 6720 6721 mas->node = mn; 6722 mas_ascend(mas); 6723 do { 6724 p = mas->node; 6725 p_min = mas->min; 6726 p_max = mas->max; 6727 mas_prev_node(mas, 0); 6728 } while (!mas_is_none(mas)); 6729 6730 mas->node = p; 6731 mas->max = p_max; 6732 mas->min = p_min; 6733 } 6734 6735 /* Tree validations */ 6736 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6737 unsigned long min, unsigned long max, unsigned int depth, 6738 enum mt_dump_format format); 6739 static void mt_dump_range(unsigned long min, unsigned long max, 6740 unsigned int depth, enum mt_dump_format format) 6741 { 6742 static const char spaces[] = " "; 6743 6744 switch(format) { 6745 case mt_dump_hex: 6746 if (min == max) 6747 pr_info("%.*s%lx: ", depth * 2, spaces, min); 6748 else 6749 pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); 6750 break; 6751 default: 6752 case mt_dump_dec: 6753 if (min == max) 6754 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6755 else 6756 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6757 } 6758 } 6759 6760 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6761 unsigned int depth, enum mt_dump_format format) 6762 { 6763 mt_dump_range(min, max, depth, format); 6764 6765 if (xa_is_value(entry)) 6766 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6767 xa_to_value(entry), entry); 6768 else if (xa_is_zero(entry)) 6769 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6770 else if (mt_is_reserved(entry)) 6771 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6772 else 6773 pr_cont("%p\n", entry); 6774 } 6775 6776 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6777 unsigned long min, unsigned long max, unsigned int depth, 6778 enum mt_dump_format format) 6779 { 6780 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6781 bool leaf = mte_is_leaf(entry); 6782 unsigned long first = min; 6783 int i; 6784 6785 pr_cont(" contents: "); 6786 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { 6787 switch(format) { 6788 case mt_dump_hex: 6789 pr_cont("%p %lX ", node->slot[i], node->pivot[i]); 6790 break; 6791 default: 6792 case mt_dump_dec: 6793 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6794 } 6795 } 6796 pr_cont("%p\n", node->slot[i]); 6797 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6798 unsigned long last = max; 6799 6800 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6801 last = node->pivot[i]; 6802 else if (!node->slot[i] && max != mt_node_max(entry)) 6803 break; 6804 if (last == 0 && i > 0) 6805 break; 6806 if (leaf) 6807 mt_dump_entry(mt_slot(mt, node->slot, i), 6808 first, last, depth + 1, format); 6809 else if (node->slot[i]) 6810 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6811 first, last, depth + 1, format); 6812 6813 if (last == max) 6814 break; 6815 if (last > max) { 6816 switch(format) { 6817 case mt_dump_hex: 6818 pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", 6819 node, last, max, i); 6820 break; 6821 default: 6822 case mt_dump_dec: 6823 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6824 node, last, max, i); 6825 } 6826 } 6827 first = last + 1; 6828 } 6829 } 6830 6831 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6832 unsigned long min, unsigned long max, unsigned int depth, 6833 enum mt_dump_format format) 6834 { 6835 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6836 bool leaf = mte_is_leaf(entry); 6837 unsigned long first = min; 6838 int i; 6839 6840 pr_cont(" contents: "); 6841 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6842 pr_cont("%lu ", node->gap[i]); 6843 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6844 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6845 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6846 pr_cont("%p\n", node->slot[i]); 6847 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6848 unsigned long last = max; 6849 6850 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6851 last = node->pivot[i]; 6852 else if (!node->slot[i]) 6853 break; 6854 if (last == 0 && i > 0) 6855 break; 6856 if (leaf) 6857 mt_dump_entry(mt_slot(mt, node->slot, i), 6858 first, last, depth + 1, format); 6859 else if (node->slot[i]) 6860 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6861 first, last, depth + 1, format); 6862 6863 if (last == max) 6864 break; 6865 if (last > max) { 6866 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6867 node, last, max, i); 6868 break; 6869 } 6870 first = last + 1; 6871 } 6872 } 6873 6874 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6875 unsigned long min, unsigned long max, unsigned int depth, 6876 enum mt_dump_format format) 6877 { 6878 struct maple_node *node = mte_to_node(entry); 6879 unsigned int type = mte_node_type(entry); 6880 unsigned int i; 6881 6882 mt_dump_range(min, max, depth, format); 6883 6884 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6885 node ? node->parent : NULL); 6886 switch (type) { 6887 case maple_dense: 6888 pr_cont("\n"); 6889 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6890 if (min + i > max) 6891 pr_cont("OUT OF RANGE: "); 6892 mt_dump_entry(mt_slot(mt, node->slot, i), 6893 min + i, min + i, depth, format); 6894 } 6895 break; 6896 case maple_leaf_64: 6897 case maple_range_64: 6898 mt_dump_range64(mt, entry, min, max, depth, format); 6899 break; 6900 case maple_arange_64: 6901 mt_dump_arange64(mt, entry, min, max, depth, format); 6902 break; 6903 6904 default: 6905 pr_cont(" UNKNOWN TYPE\n"); 6906 } 6907 } 6908 6909 void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) 6910 { 6911 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6912 6913 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6914 mt, mt->ma_flags, mt_height(mt), entry); 6915 if (!xa_is_node(entry)) 6916 mt_dump_entry(entry, 0, 0, 0, format); 6917 else if (entry) 6918 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); 6919 } 6920 EXPORT_SYMBOL_GPL(mt_dump); 6921 6922 /* 6923 * Calculate the maximum gap in a node and check if that's what is reported in 6924 * the parent (unless root). 6925 */ 6926 static void mas_validate_gaps(struct ma_state *mas) 6927 { 6928 struct maple_enode *mte = mas->node; 6929 struct maple_node *p_mn; 6930 unsigned long gap = 0, max_gap = 0; 6931 unsigned long p_end, p_start = mas->min; 6932 unsigned char p_slot; 6933 unsigned long *gaps = NULL; 6934 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); 6935 int i; 6936 6937 if (ma_is_dense(mte_node_type(mte))) { 6938 for (i = 0; i < mt_slot_count(mte); i++) { 6939 if (mas_get_slot(mas, i)) { 6940 if (gap > max_gap) 6941 max_gap = gap; 6942 gap = 0; 6943 continue; 6944 } 6945 gap++; 6946 } 6947 goto counted; 6948 } 6949 6950 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); 6951 for (i = 0; i < mt_slot_count(mte); i++) { 6952 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); 6953 6954 if (!gaps) { 6955 if (mas_get_slot(mas, i)) { 6956 gap = 0; 6957 goto not_empty; 6958 } 6959 6960 gap += p_end - p_start + 1; 6961 } else { 6962 void *entry = mas_get_slot(mas, i); 6963 6964 gap = gaps[i]; 6965 if (!entry) { 6966 if (gap != p_end - p_start + 1) { 6967 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", 6968 mas_mn(mas), i, 6969 mas_get_slot(mas, i), gap, 6970 p_end, p_start); 6971 mt_dump(mas->tree, mt_dump_hex); 6972 6973 MT_BUG_ON(mas->tree, 6974 gap != p_end - p_start + 1); 6975 } 6976 } else { 6977 if (gap > p_end - p_start + 1) { 6978 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6979 mas_mn(mas), i, gap, p_end, p_start, 6980 p_end - p_start + 1); 6981 MT_BUG_ON(mas->tree, 6982 gap > p_end - p_start + 1); 6983 } 6984 } 6985 } 6986 6987 if (gap > max_gap) 6988 max_gap = gap; 6989 not_empty: 6990 p_start = p_end + 1; 6991 if (p_end >= mas->max) 6992 break; 6993 } 6994 6995 counted: 6996 if (mte_is_root(mte)) 6997 return; 6998 6999 p_slot = mte_parent_slot(mas->node); 7000 p_mn = mte_parent(mte); 7001 MT_BUG_ON(mas->tree, max_gap > mas->max); 7002 if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { 7003 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 7004 mt_dump(mas->tree, mt_dump_hex); 7005 } 7006 7007 MT_BUG_ON(mas->tree, 7008 ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap); 7009 } 7010 7011 static void mas_validate_parent_slot(struct ma_state *mas) 7012 { 7013 struct maple_node *parent; 7014 struct maple_enode *node; 7015 enum maple_type p_type; 7016 unsigned char p_slot; 7017 void __rcu **slots; 7018 int i; 7019 7020 if (mte_is_root(mas->node)) 7021 return; 7022 7023 p_slot = mte_parent_slot(mas->node); 7024 p_type = mas_parent_type(mas, mas->node); 7025 parent = mte_parent(mas->node); 7026 slots = ma_slots(parent, p_type); 7027 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 7028 7029 /* Check prev/next parent slot for duplicate node entry */ 7030 7031 for (i = 0; i < mt_slots[p_type]; i++) { 7032 node = mas_slot(mas, slots, i); 7033 if (i == p_slot) { 7034 if (node != mas->node) 7035 pr_err("parent %p[%u] does not have %p\n", 7036 parent, i, mas_mn(mas)); 7037 MT_BUG_ON(mas->tree, node != mas->node); 7038 } else if (node == mas->node) { 7039 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 7040 mas_mn(mas), parent, i, p_slot); 7041 MT_BUG_ON(mas->tree, node == mas->node); 7042 } 7043 } 7044 } 7045 7046 static void mas_validate_child_slot(struct ma_state *mas) 7047 { 7048 enum maple_type type = mte_node_type(mas->node); 7049 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7050 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 7051 struct maple_enode *child; 7052 unsigned char i; 7053 7054 if (mte_is_leaf(mas->node)) 7055 return; 7056 7057 for (i = 0; i < mt_slots[type]; i++) { 7058 child = mas_slot(mas, slots, i); 7059 if (!pivots[i] || pivots[i] == mas->max) 7060 break; 7061 7062 if (!child) 7063 break; 7064 7065 if (mte_parent_slot(child) != i) { 7066 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 7067 mas_mn(mas), i, mte_to_node(child), 7068 mte_parent_slot(child)); 7069 MT_BUG_ON(mas->tree, 1); 7070 } 7071 7072 if (mte_parent(child) != mte_to_node(mas->node)) { 7073 pr_err("child %p has parent %p not %p\n", 7074 mte_to_node(child), mte_parent(child), 7075 mte_to_node(mas->node)); 7076 MT_BUG_ON(mas->tree, 1); 7077 } 7078 } 7079 } 7080 7081 /* 7082 * Validate all pivots are within mas->min and mas->max. 7083 */ 7084 static void mas_validate_limits(struct ma_state *mas) 7085 { 7086 int i; 7087 unsigned long prev_piv = 0; 7088 enum maple_type type = mte_node_type(mas->node); 7089 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7090 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7091 7092 /* all limits are fine here. */ 7093 if (mte_is_root(mas->node)) 7094 return; 7095 7096 for (i = 0; i < mt_slots[type]; i++) { 7097 unsigned long piv; 7098 7099 piv = mas_safe_pivot(mas, pivots, i, type); 7100 7101 if (!piv && (i != 0)) 7102 break; 7103 7104 if (!mte_is_leaf(mas->node)) { 7105 void *entry = mas_slot(mas, slots, i); 7106 7107 if (!entry) 7108 pr_err("%p[%u] cannot be null\n", 7109 mas_mn(mas), i); 7110 7111 MT_BUG_ON(mas->tree, !entry); 7112 } 7113 7114 if (prev_piv > piv) { 7115 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7116 mas_mn(mas), i, piv, prev_piv); 7117 MAS_WARN_ON(mas, piv < prev_piv); 7118 } 7119 7120 if (piv < mas->min) { 7121 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7122 piv, mas->min); 7123 MAS_WARN_ON(mas, piv < mas->min); 7124 } 7125 if (piv > mas->max) { 7126 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7127 piv, mas->max); 7128 MAS_WARN_ON(mas, piv > mas->max); 7129 } 7130 prev_piv = piv; 7131 if (piv == mas->max) 7132 break; 7133 } 7134 for (i += 1; i < mt_slots[type]; i++) { 7135 void *entry = mas_slot(mas, slots, i); 7136 7137 if (entry && (i != mt_slots[type] - 1)) { 7138 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7139 i, entry); 7140 MT_BUG_ON(mas->tree, entry != NULL); 7141 } 7142 7143 if (i < mt_pivots[type]) { 7144 unsigned long piv = pivots[i]; 7145 7146 if (!piv) 7147 continue; 7148 7149 pr_err("%p[%u] should not have piv %lu\n", 7150 mas_mn(mas), i, piv); 7151 MAS_WARN_ON(mas, i < mt_pivots[type] - 1); 7152 } 7153 } 7154 } 7155 7156 static void mt_validate_nulls(struct maple_tree *mt) 7157 { 7158 void *entry, *last = (void *)1; 7159 unsigned char offset = 0; 7160 void __rcu **slots; 7161 MA_STATE(mas, mt, 0, 0); 7162 7163 mas_start(&mas); 7164 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7165 return; 7166 7167 while (!mte_is_leaf(mas.node)) 7168 mas_descend(&mas); 7169 7170 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7171 do { 7172 entry = mas_slot(&mas, slots, offset); 7173 if (!last && !entry) { 7174 pr_err("Sequential nulls end at %p[%u]\n", 7175 mas_mn(&mas), offset); 7176 } 7177 MT_BUG_ON(mt, !last && !entry); 7178 last = entry; 7179 if (offset == mas_data_end(&mas)) { 7180 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7181 if (mas_is_none(&mas)) 7182 return; 7183 offset = 0; 7184 slots = ma_slots(mte_to_node(mas.node), 7185 mte_node_type(mas.node)); 7186 } else { 7187 offset++; 7188 } 7189 7190 } while (!mas_is_none(&mas)); 7191 } 7192 7193 /* 7194 * validate a maple tree by checking: 7195 * 1. The limits (pivots are within mas->min to mas->max) 7196 * 2. The gap is correctly set in the parents 7197 */ 7198 void mt_validate(struct maple_tree *mt) 7199 { 7200 unsigned char end; 7201 7202 MA_STATE(mas, mt, 0, 0); 7203 rcu_read_lock(); 7204 mas_start(&mas); 7205 if (!mas_searchable(&mas)) 7206 goto done; 7207 7208 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); 7209 while (!mas_is_none(&mas)) { 7210 MAS_WARN_ON(&mas, mte_dead_node(mas.node)); 7211 if (!mte_is_root(mas.node)) { 7212 end = mas_data_end(&mas); 7213 if (MAS_WARN_ON(&mas, 7214 (end < mt_min_slot_count(mas.node)) && 7215 (mas.max != ULONG_MAX))) { 7216 pr_err("Invalid size %u of %p\n", end, 7217 mas_mn(&mas)); 7218 } 7219 } 7220 mas_validate_parent_slot(&mas); 7221 mas_validate_child_slot(&mas); 7222 mas_validate_limits(&mas); 7223 if (mt_is_alloc(mt)) 7224 mas_validate_gaps(&mas); 7225 mas_dfs_postorder(&mas, ULONG_MAX); 7226 } 7227 mt_validate_nulls(mt); 7228 done: 7229 rcu_read_unlock(); 7230 7231 } 7232 EXPORT_SYMBOL_GPL(mt_validate); 7233 7234 void mas_dump(const struct ma_state *mas) 7235 { 7236 pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node); 7237 if (mas_is_none(mas)) 7238 pr_err("(MAS_NONE) "); 7239 else if (mas_is_ptr(mas)) 7240 pr_err("(MAS_ROOT) "); 7241 else if (mas_is_start(mas)) 7242 pr_err("(MAS_START) "); 7243 else if (mas_is_paused(mas)) 7244 pr_err("(MAS_PAUSED) "); 7245 7246 pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last); 7247 pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n", 7248 mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); 7249 if (mas->index > mas->last) 7250 pr_err("Check index & last\n"); 7251 } 7252 EXPORT_SYMBOL_GPL(mas_dump); 7253 7254 void mas_wr_dump(const struct ma_wr_state *wr_mas) 7255 { 7256 pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n", 7257 wr_mas->node, wr_mas->r_min, wr_mas->r_max); 7258 pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", 7259 wr_mas->type, wr_mas->offset_end, wr_mas->node_end, 7260 wr_mas->end_piv); 7261 } 7262 EXPORT_SYMBOL_GPL(mas_wr_dump); 7263 7264 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7265