1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Maple Tree implementation 4 * Copyright (c) 2018-2022 Oracle Corporation 5 * Authors: Liam R. Howlett <Liam.Howlett@oracle.com> 6 * Matthew Wilcox <willy@infradead.org> 7 */ 8 9 /* 10 * DOC: Interesting implementation details of the Maple Tree 11 * 12 * Each node type has a number of slots for entries and a number of slots for 13 * pivots. In the case of dense nodes, the pivots are implied by the position 14 * and are simply the slot index + the minimum of the node. 15 * 16 * In regular B-Tree terms, pivots are called keys. The term pivot is used to 17 * indicate that the tree is specifying ranges, Pivots may appear in the 18 * subtree with an entry attached to the value where as keys are unique to a 19 * specific position of a B-tree. Pivot values are inclusive of the slot with 20 * the same index. 21 * 22 * 23 * The following illustrates the layout of a range64 nodes slots and pivots. 24 * 25 * 26 * Slots -> | 0 | 1 | 2 | ... | 12 | 13 | 14 | 15 | 27 * ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ ┬ 28 * │ │ │ │ │ │ │ │ └─ Implied maximum 29 * │ │ │ │ │ │ │ └─ Pivot 14 30 * │ │ │ │ │ │ └─ Pivot 13 31 * │ │ │ │ │ └─ Pivot 12 32 * │ │ │ │ └─ Pivot 11 33 * │ │ │ └─ Pivot 2 34 * │ │ └─ Pivot 1 35 * │ └─ Pivot 0 36 * └─ Implied minimum 37 * 38 * Slot contents: 39 * Internal (non-leaf) nodes contain pointers to other nodes. 40 * Leaf nodes contain entries. 41 * 42 * The location of interest is often referred to as an offset. All offsets have 43 * a slot, but the last offset has an implied pivot from the node above (or 44 * UINT_MAX for the root node. 45 * 46 * Ranges complicate certain write activities. When modifying any of 47 * the B-tree variants, it is known that one entry will either be added or 48 * deleted. When modifying the Maple Tree, one store operation may overwrite 49 * the entire data set, or one half of the tree, or the middle half of the tree. 50 * 51 */ 52 53 54 #include <linux/maple_tree.h> 55 #include <linux/xarray.h> 56 #include <linux/types.h> 57 #include <linux/export.h> 58 #include <linux/slab.h> 59 #include <linux/limits.h> 60 #include <asm/barrier.h> 61 62 #define CREATE_TRACE_POINTS 63 #include <trace/events/maple_tree.h> 64 65 #define MA_ROOT_PARENT 1 66 67 /* 68 * Maple state flags 69 * * MA_STATE_BULK - Bulk insert mode 70 * * MA_STATE_REBALANCE - Indicate a rebalance during bulk insert 71 * * MA_STATE_PREALLOC - Preallocated nodes, WARN_ON allocation 72 */ 73 #define MA_STATE_BULK 1 74 #define MA_STATE_REBALANCE 2 75 #define MA_STATE_PREALLOC 4 76 77 #define ma_parent_ptr(x) ((struct maple_pnode *)(x)) 78 #define ma_mnode_ptr(x) ((struct maple_node *)(x)) 79 #define ma_enode_ptr(x) ((struct maple_enode *)(x)) 80 static struct kmem_cache *maple_node_cache; 81 82 #ifdef CONFIG_DEBUG_MAPLE_TREE 83 static const unsigned long mt_max[] = { 84 [maple_dense] = MAPLE_NODE_SLOTS, 85 [maple_leaf_64] = ULONG_MAX, 86 [maple_range_64] = ULONG_MAX, 87 [maple_arange_64] = ULONG_MAX, 88 }; 89 #define mt_node_max(x) mt_max[mte_node_type(x)] 90 #endif 91 92 static const unsigned char mt_slots[] = { 93 [maple_dense] = MAPLE_NODE_SLOTS, 94 [maple_leaf_64] = MAPLE_RANGE64_SLOTS, 95 [maple_range_64] = MAPLE_RANGE64_SLOTS, 96 [maple_arange_64] = MAPLE_ARANGE64_SLOTS, 97 }; 98 #define mt_slot_count(x) mt_slots[mte_node_type(x)] 99 100 static const unsigned char mt_pivots[] = { 101 [maple_dense] = 0, 102 [maple_leaf_64] = MAPLE_RANGE64_SLOTS - 1, 103 [maple_range_64] = MAPLE_RANGE64_SLOTS - 1, 104 [maple_arange_64] = MAPLE_ARANGE64_SLOTS - 1, 105 }; 106 #define mt_pivot_count(x) mt_pivots[mte_node_type(x)] 107 108 static const unsigned char mt_min_slots[] = { 109 [maple_dense] = MAPLE_NODE_SLOTS / 2, 110 [maple_leaf_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 111 [maple_range_64] = (MAPLE_RANGE64_SLOTS / 2) - 2, 112 [maple_arange_64] = (MAPLE_ARANGE64_SLOTS / 2) - 1, 113 }; 114 #define mt_min_slot_count(x) mt_min_slots[mte_node_type(x)] 115 116 #define MAPLE_BIG_NODE_SLOTS (MAPLE_RANGE64_SLOTS * 2 + 2) 117 #define MAPLE_BIG_NODE_GAPS (MAPLE_ARANGE64_SLOTS * 2 + 1) 118 119 struct maple_big_node { 120 struct maple_pnode *parent; 121 unsigned long pivot[MAPLE_BIG_NODE_SLOTS - 1]; 122 union { 123 struct maple_enode *slot[MAPLE_BIG_NODE_SLOTS]; 124 struct { 125 unsigned long padding[MAPLE_BIG_NODE_GAPS]; 126 unsigned long gap[MAPLE_BIG_NODE_GAPS]; 127 }; 128 }; 129 unsigned char b_end; 130 enum maple_type type; 131 }; 132 133 /* 134 * The maple_subtree_state is used to build a tree to replace a segment of an 135 * existing tree in a more atomic way. Any walkers of the older tree will hit a 136 * dead node and restart on updates. 137 */ 138 struct maple_subtree_state { 139 struct ma_state *orig_l; /* Original left side of subtree */ 140 struct ma_state *orig_r; /* Original right side of subtree */ 141 struct ma_state *l; /* New left side of subtree */ 142 struct ma_state *m; /* New middle of subtree (rare) */ 143 struct ma_state *r; /* New right side of subtree */ 144 struct ma_topiary *free; /* nodes to be freed */ 145 struct ma_topiary *destroy; /* Nodes to be destroyed (walked and freed) */ 146 struct maple_big_node *bn; 147 }; 148 149 #ifdef CONFIG_KASAN_STACK 150 /* Prevent mas_wr_bnode() from exceeding the stack frame limit */ 151 #define noinline_for_kasan noinline_for_stack 152 #else 153 #define noinline_for_kasan inline 154 #endif 155 156 /* Functions */ 157 static inline struct maple_node *mt_alloc_one(gfp_t gfp) 158 { 159 return kmem_cache_alloc(maple_node_cache, gfp); 160 } 161 162 static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes) 163 { 164 return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes); 165 } 166 167 static inline void mt_free_bulk(size_t size, void __rcu **nodes) 168 { 169 kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes); 170 } 171 172 static void mt_free_rcu(struct rcu_head *head) 173 { 174 struct maple_node *node = container_of(head, struct maple_node, rcu); 175 176 kmem_cache_free(maple_node_cache, node); 177 } 178 179 /* 180 * ma_free_rcu() - Use rcu callback to free a maple node 181 * @node: The node to free 182 * 183 * The maple tree uses the parent pointer to indicate this node is no longer in 184 * use and will be freed. 185 */ 186 static void ma_free_rcu(struct maple_node *node) 187 { 188 node->parent = ma_parent_ptr(node); 189 call_rcu(&node->rcu, mt_free_rcu); 190 } 191 192 static void mas_set_height(struct ma_state *mas) 193 { 194 unsigned int new_flags = mas->tree->ma_flags; 195 196 new_flags &= ~MT_FLAGS_HEIGHT_MASK; 197 BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); 198 new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; 199 mas->tree->ma_flags = new_flags; 200 } 201 202 static unsigned int mas_mt_height(struct ma_state *mas) 203 { 204 return mt_height(mas->tree); 205 } 206 207 static inline enum maple_type mte_node_type(const struct maple_enode *entry) 208 { 209 return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) & 210 MAPLE_NODE_TYPE_MASK; 211 } 212 213 static inline bool ma_is_dense(const enum maple_type type) 214 { 215 return type < maple_leaf_64; 216 } 217 218 static inline bool ma_is_leaf(const enum maple_type type) 219 { 220 return type < maple_range_64; 221 } 222 223 static inline bool mte_is_leaf(const struct maple_enode *entry) 224 { 225 return ma_is_leaf(mte_node_type(entry)); 226 } 227 228 /* 229 * We also reserve values with the bottom two bits set to '10' which are 230 * below 4096 231 */ 232 static inline bool mt_is_reserved(const void *entry) 233 { 234 return ((unsigned long)entry < MAPLE_RESERVED_RANGE) && 235 xa_is_internal(entry); 236 } 237 238 static inline void mas_set_err(struct ma_state *mas, long err) 239 { 240 mas->node = MA_ERROR(err); 241 } 242 243 static inline bool mas_is_ptr(struct ma_state *mas) 244 { 245 return mas->node == MAS_ROOT; 246 } 247 248 static inline bool mas_is_start(struct ma_state *mas) 249 { 250 return mas->node == MAS_START; 251 } 252 253 bool mas_is_err(struct ma_state *mas) 254 { 255 return xa_is_err(mas->node); 256 } 257 258 static inline bool mas_searchable(struct ma_state *mas) 259 { 260 if (mas_is_none(mas)) 261 return false; 262 263 if (mas_is_ptr(mas)) 264 return false; 265 266 return true; 267 } 268 269 static inline struct maple_node *mte_to_node(const struct maple_enode *entry) 270 { 271 return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK); 272 } 273 274 /* 275 * mte_to_mat() - Convert a maple encoded node to a maple topiary node. 276 * @entry: The maple encoded node 277 * 278 * Return: a maple topiary pointer 279 */ 280 static inline struct maple_topiary *mte_to_mat(const struct maple_enode *entry) 281 { 282 return (struct maple_topiary *) 283 ((unsigned long)entry & ~MAPLE_NODE_MASK); 284 } 285 286 /* 287 * mas_mn() - Get the maple state node. 288 * @mas: The maple state 289 * 290 * Return: the maple node (not encoded - bare pointer). 291 */ 292 static inline struct maple_node *mas_mn(const struct ma_state *mas) 293 { 294 return mte_to_node(mas->node); 295 } 296 297 /* 298 * mte_set_node_dead() - Set a maple encoded node as dead. 299 * @mn: The maple encoded node. 300 */ 301 static inline void mte_set_node_dead(struct maple_enode *mn) 302 { 303 mte_to_node(mn)->parent = ma_parent_ptr(mte_to_node(mn)); 304 smp_wmb(); /* Needed for RCU */ 305 } 306 307 /* Bit 1 indicates the root is a node */ 308 #define MAPLE_ROOT_NODE 0x02 309 /* maple_type stored bit 3-6 */ 310 #define MAPLE_ENODE_TYPE_SHIFT 0x03 311 /* Bit 2 means a NULL somewhere below */ 312 #define MAPLE_ENODE_NULL 0x04 313 314 static inline struct maple_enode *mt_mk_node(const struct maple_node *node, 315 enum maple_type type) 316 { 317 return (void *)((unsigned long)node | 318 (type << MAPLE_ENODE_TYPE_SHIFT) | MAPLE_ENODE_NULL); 319 } 320 321 static inline void *mte_mk_root(const struct maple_enode *node) 322 { 323 return (void *)((unsigned long)node | MAPLE_ROOT_NODE); 324 } 325 326 static inline void *mte_safe_root(const struct maple_enode *node) 327 { 328 return (void *)((unsigned long)node & ~MAPLE_ROOT_NODE); 329 } 330 331 static inline void *mte_set_full(const struct maple_enode *node) 332 { 333 return (void *)((unsigned long)node & ~MAPLE_ENODE_NULL); 334 } 335 336 static inline void *mte_clear_full(const struct maple_enode *node) 337 { 338 return (void *)((unsigned long)node | MAPLE_ENODE_NULL); 339 } 340 341 static inline bool mte_has_null(const struct maple_enode *node) 342 { 343 return (unsigned long)node & MAPLE_ENODE_NULL; 344 } 345 346 static inline bool ma_is_root(struct maple_node *node) 347 { 348 return ((unsigned long)node->parent & MA_ROOT_PARENT); 349 } 350 351 static inline bool mte_is_root(const struct maple_enode *node) 352 { 353 return ma_is_root(mte_to_node(node)); 354 } 355 356 static inline bool mas_is_root_limits(const struct ma_state *mas) 357 { 358 return !mas->min && mas->max == ULONG_MAX; 359 } 360 361 static inline bool mt_is_alloc(struct maple_tree *mt) 362 { 363 return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE); 364 } 365 366 /* 367 * The Parent Pointer 368 * Excluding root, the parent pointer is 256B aligned like all other tree nodes. 369 * When storing a 32 or 64 bit values, the offset can fit into 5 bits. The 16 370 * bit values need an extra bit to store the offset. This extra bit comes from 371 * a reuse of the last bit in the node type. This is possible by using bit 1 to 372 * indicate if bit 2 is part of the type or the slot. 373 * 374 * Note types: 375 * 0x??1 = Root 376 * 0x?00 = 16 bit nodes 377 * 0x010 = 32 bit nodes 378 * 0x110 = 64 bit nodes 379 * 380 * Slot size and alignment 381 * 0b??1 : Root 382 * 0b?00 : 16 bit values, type in 0-1, slot in 2-7 383 * 0b010 : 32 bit values, type in 0-2, slot in 3-7 384 * 0b110 : 64 bit values, type in 0-2, slot in 3-7 385 */ 386 387 #define MAPLE_PARENT_ROOT 0x01 388 389 #define MAPLE_PARENT_SLOT_SHIFT 0x03 390 #define MAPLE_PARENT_SLOT_MASK 0xF8 391 392 #define MAPLE_PARENT_16B_SLOT_SHIFT 0x02 393 #define MAPLE_PARENT_16B_SLOT_MASK 0xFC 394 395 #define MAPLE_PARENT_RANGE64 0x06 396 #define MAPLE_PARENT_RANGE32 0x04 397 #define MAPLE_PARENT_NOT_RANGE16 0x02 398 399 /* 400 * mte_parent_shift() - Get the parent shift for the slot storage. 401 * @parent: The parent pointer cast as an unsigned long 402 * Return: The shift into that pointer to the star to of the slot 403 */ 404 static inline unsigned long mte_parent_shift(unsigned long parent) 405 { 406 /* Note bit 1 == 0 means 16B */ 407 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 408 return MAPLE_PARENT_SLOT_SHIFT; 409 410 return MAPLE_PARENT_16B_SLOT_SHIFT; 411 } 412 413 /* 414 * mte_parent_slot_mask() - Get the slot mask for the parent. 415 * @parent: The parent pointer cast as an unsigned long. 416 * Return: The slot mask for that parent. 417 */ 418 static inline unsigned long mte_parent_slot_mask(unsigned long parent) 419 { 420 /* Note bit 1 == 0 means 16B */ 421 if (likely(parent & MAPLE_PARENT_NOT_RANGE16)) 422 return MAPLE_PARENT_SLOT_MASK; 423 424 return MAPLE_PARENT_16B_SLOT_MASK; 425 } 426 427 /* 428 * mas_parent_enum() - Return the maple_type of the parent from the stored 429 * parent type. 430 * @mas: The maple state 431 * @node: The maple_enode to extract the parent's enum 432 * Return: The node->parent maple_type 433 */ 434 static inline 435 enum maple_type mte_parent_enum(struct maple_enode *p_enode, 436 struct maple_tree *mt) 437 { 438 unsigned long p_type; 439 440 p_type = (unsigned long)p_enode; 441 if (p_type & MAPLE_PARENT_ROOT) 442 return 0; /* Validated in the caller. */ 443 444 p_type &= MAPLE_NODE_MASK; 445 p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); 446 447 switch (p_type) { 448 case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ 449 if (mt_is_alloc(mt)) 450 return maple_arange_64; 451 return maple_range_64; 452 } 453 454 return 0; 455 } 456 457 static inline 458 enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) 459 { 460 return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); 461 } 462 463 /* 464 * mte_set_parent() - Set the parent node and encode the slot 465 * @enode: The encoded maple node. 466 * @parent: The encoded maple node that is the parent of @enode. 467 * @slot: The slot that @enode resides in @parent. 468 * 469 * Slot number is encoded in the enode->parent bit 3-6 or 2-6, depending on the 470 * parent type. 471 */ 472 static inline 473 void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, 474 unsigned char slot) 475 { 476 unsigned long val = (unsigned long)parent; 477 unsigned long shift; 478 unsigned long type; 479 enum maple_type p_type = mte_node_type(parent); 480 481 BUG_ON(p_type == maple_dense); 482 BUG_ON(p_type == maple_leaf_64); 483 484 switch (p_type) { 485 case maple_range_64: 486 case maple_arange_64: 487 shift = MAPLE_PARENT_SLOT_SHIFT; 488 type = MAPLE_PARENT_RANGE64; 489 break; 490 default: 491 case maple_dense: 492 case maple_leaf_64: 493 shift = type = 0; 494 break; 495 } 496 497 val &= ~MAPLE_NODE_MASK; /* Clear all node metadata in parent */ 498 val |= (slot << shift) | type; 499 mte_to_node(enode)->parent = ma_parent_ptr(val); 500 } 501 502 /* 503 * mte_parent_slot() - get the parent slot of @enode. 504 * @enode: The encoded maple node. 505 * 506 * Return: The slot in the parent node where @enode resides. 507 */ 508 static inline unsigned int mte_parent_slot(const struct maple_enode *enode) 509 { 510 unsigned long val = (unsigned long)mte_to_node(enode)->parent; 511 512 if (val & MA_ROOT_PARENT) 513 return 0; 514 515 /* 516 * Okay to use MAPLE_PARENT_16B_SLOT_MASK as the last bit will be lost 517 * by shift if the parent shift is MAPLE_PARENT_SLOT_SHIFT 518 */ 519 return (val & MAPLE_PARENT_16B_SLOT_MASK) >> mte_parent_shift(val); 520 } 521 522 /* 523 * mte_parent() - Get the parent of @node. 524 * @node: The encoded maple node. 525 * 526 * Return: The parent maple node. 527 */ 528 static inline struct maple_node *mte_parent(const struct maple_enode *enode) 529 { 530 return (void *)((unsigned long) 531 (mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK); 532 } 533 534 /* 535 * ma_dead_node() - check if the @enode is dead. 536 * @enode: The encoded maple node 537 * 538 * Return: true if dead, false otherwise. 539 */ 540 static inline bool ma_dead_node(const struct maple_node *node) 541 { 542 struct maple_node *parent = (void *)((unsigned long) 543 node->parent & ~MAPLE_NODE_MASK); 544 545 return (parent == node); 546 } 547 /* 548 * mte_dead_node() - check if the @enode is dead. 549 * @enode: The encoded maple node 550 * 551 * Return: true if dead, false otherwise. 552 */ 553 static inline bool mte_dead_node(const struct maple_enode *enode) 554 { 555 struct maple_node *parent, *node; 556 557 node = mte_to_node(enode); 558 parent = mte_parent(enode); 559 return (parent == node); 560 } 561 562 /* 563 * mas_allocated() - Get the number of nodes allocated in a maple state. 564 * @mas: The maple state 565 * 566 * The ma_state alloc member is overloaded to hold a pointer to the first 567 * allocated node or to the number of requested nodes to allocate. If bit 0 is 568 * set, then the alloc contains the number of requested nodes. If there is an 569 * allocated node, then the total allocated nodes is in that node. 570 * 571 * Return: The total number of nodes allocated 572 */ 573 static inline unsigned long mas_allocated(const struct ma_state *mas) 574 { 575 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) 576 return 0; 577 578 return mas->alloc->total; 579 } 580 581 /* 582 * mas_set_alloc_req() - Set the requested number of allocations. 583 * @mas: the maple state 584 * @count: the number of allocations. 585 * 586 * The requested number of allocations is either in the first allocated node, 587 * located in @mas->alloc->request_count, or directly in @mas->alloc if there is 588 * no allocated node. Set the request either in the node or do the necessary 589 * encoding to store in @mas->alloc directly. 590 */ 591 static inline void mas_set_alloc_req(struct ma_state *mas, unsigned long count) 592 { 593 if (!mas->alloc || ((unsigned long)mas->alloc & 0x1)) { 594 if (!count) 595 mas->alloc = NULL; 596 else 597 mas->alloc = (struct maple_alloc *)(((count) << 1U) | 1U); 598 return; 599 } 600 601 mas->alloc->request_count = count; 602 } 603 604 /* 605 * mas_alloc_req() - get the requested number of allocations. 606 * @mas: The maple state 607 * 608 * The alloc count is either stored directly in @mas, or in 609 * @mas->alloc->request_count if there is at least one node allocated. Decode 610 * the request count if it's stored directly in @mas->alloc. 611 * 612 * Return: The allocation request count. 613 */ 614 static inline unsigned int mas_alloc_req(const struct ma_state *mas) 615 { 616 if ((unsigned long)mas->alloc & 0x1) 617 return (unsigned long)(mas->alloc) >> 1; 618 else if (mas->alloc) 619 return mas->alloc->request_count; 620 return 0; 621 } 622 623 /* 624 * ma_pivots() - Get a pointer to the maple node pivots. 625 * @node - the maple node 626 * @type - the node type 627 * 628 * Return: A pointer to the maple node pivots 629 */ 630 static inline unsigned long *ma_pivots(struct maple_node *node, 631 enum maple_type type) 632 { 633 switch (type) { 634 case maple_arange_64: 635 return node->ma64.pivot; 636 case maple_range_64: 637 case maple_leaf_64: 638 return node->mr64.pivot; 639 case maple_dense: 640 return NULL; 641 } 642 return NULL; 643 } 644 645 /* 646 * ma_gaps() - Get a pointer to the maple node gaps. 647 * @node - the maple node 648 * @type - the node type 649 * 650 * Return: A pointer to the maple node gaps 651 */ 652 static inline unsigned long *ma_gaps(struct maple_node *node, 653 enum maple_type type) 654 { 655 switch (type) { 656 case maple_arange_64: 657 return node->ma64.gap; 658 case maple_range_64: 659 case maple_leaf_64: 660 case maple_dense: 661 return NULL; 662 } 663 return NULL; 664 } 665 666 /* 667 * mte_pivot() - Get the pivot at @piv of the maple encoded node. 668 * @mn: The maple encoded node. 669 * @piv: The pivot. 670 * 671 * Return: the pivot at @piv of @mn. 672 */ 673 static inline unsigned long mte_pivot(const struct maple_enode *mn, 674 unsigned char piv) 675 { 676 struct maple_node *node = mte_to_node(mn); 677 enum maple_type type = mte_node_type(mn); 678 679 if (piv >= mt_pivots[type]) { 680 WARN_ON(1); 681 return 0; 682 } 683 switch (type) { 684 case maple_arange_64: 685 return node->ma64.pivot[piv]; 686 case maple_range_64: 687 case maple_leaf_64: 688 return node->mr64.pivot[piv]; 689 case maple_dense: 690 return 0; 691 } 692 return 0; 693 } 694 695 /* 696 * mas_safe_pivot() - get the pivot at @piv or mas->max. 697 * @mas: The maple state 698 * @pivots: The pointer to the maple node pivots 699 * @piv: The pivot to fetch 700 * @type: The maple node type 701 * 702 * Return: The pivot at @piv within the limit of the @pivots array, @mas->max 703 * otherwise. 704 */ 705 static inline unsigned long 706 mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots, 707 unsigned char piv, enum maple_type type) 708 { 709 if (piv >= mt_pivots[type]) 710 return mas->max; 711 712 return pivots[piv]; 713 } 714 715 /* 716 * mas_safe_min() - Return the minimum for a given offset. 717 * @mas: The maple state 718 * @pivots: The pointer to the maple node pivots 719 * @offset: The offset into the pivot array 720 * 721 * Return: The minimum range value that is contained in @offset. 722 */ 723 static inline unsigned long 724 mas_safe_min(struct ma_state *mas, unsigned long *pivots, unsigned char offset) 725 { 726 if (likely(offset)) 727 return pivots[offset - 1] + 1; 728 729 return mas->min; 730 } 731 732 /* 733 * mas_logical_pivot() - Get the logical pivot of a given offset. 734 * @mas: The maple state 735 * @pivots: The pointer to the maple node pivots 736 * @offset: The offset into the pivot array 737 * @type: The maple node type 738 * 739 * When there is no value at a pivot (beyond the end of the data), then the 740 * pivot is actually @mas->max. 741 * 742 * Return: the logical pivot of a given @offset. 743 */ 744 static inline unsigned long 745 mas_logical_pivot(struct ma_state *mas, unsigned long *pivots, 746 unsigned char offset, enum maple_type type) 747 { 748 unsigned long lpiv = mas_safe_pivot(mas, pivots, offset, type); 749 750 if (likely(lpiv)) 751 return lpiv; 752 753 if (likely(offset)) 754 return mas->max; 755 756 return lpiv; 757 } 758 759 /* 760 * mte_set_pivot() - Set a pivot to a value in an encoded maple node. 761 * @mn: The encoded maple node 762 * @piv: The pivot offset 763 * @val: The value of the pivot 764 */ 765 static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv, 766 unsigned long val) 767 { 768 struct maple_node *node = mte_to_node(mn); 769 enum maple_type type = mte_node_type(mn); 770 771 BUG_ON(piv >= mt_pivots[type]); 772 switch (type) { 773 default: 774 case maple_range_64: 775 case maple_leaf_64: 776 node->mr64.pivot[piv] = val; 777 break; 778 case maple_arange_64: 779 node->ma64.pivot[piv] = val; 780 break; 781 case maple_dense: 782 break; 783 } 784 785 } 786 787 /* 788 * ma_slots() - Get a pointer to the maple node slots. 789 * @mn: The maple node 790 * @mt: The maple node type 791 * 792 * Return: A pointer to the maple node slots 793 */ 794 static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt) 795 { 796 switch (mt) { 797 default: 798 case maple_arange_64: 799 return mn->ma64.slot; 800 case maple_range_64: 801 case maple_leaf_64: 802 return mn->mr64.slot; 803 case maple_dense: 804 return mn->slot; 805 } 806 } 807 808 static inline bool mt_locked(const struct maple_tree *mt) 809 { 810 return mt_external_lock(mt) ? mt_lock_is_held(mt) : 811 lockdep_is_held(&mt->ma_lock); 812 } 813 814 static inline void *mt_slot(const struct maple_tree *mt, 815 void __rcu **slots, unsigned char offset) 816 { 817 return rcu_dereference_check(slots[offset], mt_locked(mt)); 818 } 819 820 /* 821 * mas_slot_locked() - Get the slot value when holding the maple tree lock. 822 * @mas: The maple state 823 * @slots: The pointer to the slots 824 * @offset: The offset into the slots array to fetch 825 * 826 * Return: The entry stored in @slots at the @offset. 827 */ 828 static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots, 829 unsigned char offset) 830 { 831 return rcu_dereference_protected(slots[offset], mt_locked(mas->tree)); 832 } 833 834 /* 835 * mas_slot() - Get the slot value when not holding the maple tree lock. 836 * @mas: The maple state 837 * @slots: The pointer to the slots 838 * @offset: The offset into the slots array to fetch 839 * 840 * Return: The entry stored in @slots at the @offset 841 */ 842 static inline void *mas_slot(struct ma_state *mas, void __rcu **slots, 843 unsigned char offset) 844 { 845 return mt_slot(mas->tree, slots, offset); 846 } 847 848 /* 849 * mas_root() - Get the maple tree root. 850 * @mas: The maple state. 851 * 852 * Return: The pointer to the root of the tree 853 */ 854 static inline void *mas_root(struct ma_state *mas) 855 { 856 return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree)); 857 } 858 859 static inline void *mt_root_locked(struct maple_tree *mt) 860 { 861 return rcu_dereference_protected(mt->ma_root, mt_locked(mt)); 862 } 863 864 /* 865 * mas_root_locked() - Get the maple tree root when holding the maple tree lock. 866 * @mas: The maple state. 867 * 868 * Return: The pointer to the root of the tree 869 */ 870 static inline void *mas_root_locked(struct ma_state *mas) 871 { 872 return mt_root_locked(mas->tree); 873 } 874 875 static inline struct maple_metadata *ma_meta(struct maple_node *mn, 876 enum maple_type mt) 877 { 878 switch (mt) { 879 case maple_arange_64: 880 return &mn->ma64.meta; 881 default: 882 return &mn->mr64.meta; 883 } 884 } 885 886 /* 887 * ma_set_meta() - Set the metadata information of a node. 888 * @mn: The maple node 889 * @mt: The maple node type 890 * @offset: The offset of the highest sub-gap in this node. 891 * @end: The end of the data in this node. 892 */ 893 static inline void ma_set_meta(struct maple_node *mn, enum maple_type mt, 894 unsigned char offset, unsigned char end) 895 { 896 struct maple_metadata *meta = ma_meta(mn, mt); 897 898 meta->gap = offset; 899 meta->end = end; 900 } 901 902 /* 903 * ma_meta_end() - Get the data end of a node from the metadata 904 * @mn: The maple node 905 * @mt: The maple node type 906 */ 907 static inline unsigned char ma_meta_end(struct maple_node *mn, 908 enum maple_type mt) 909 { 910 struct maple_metadata *meta = ma_meta(mn, mt); 911 912 return meta->end; 913 } 914 915 /* 916 * ma_meta_gap() - Get the largest gap location of a node from the metadata 917 * @mn: The maple node 918 * @mt: The maple node type 919 */ 920 static inline unsigned char ma_meta_gap(struct maple_node *mn, 921 enum maple_type mt) 922 { 923 BUG_ON(mt != maple_arange_64); 924 925 return mn->ma64.meta.gap; 926 } 927 928 /* 929 * ma_set_meta_gap() - Set the largest gap location in a nodes metadata 930 * @mn: The maple node 931 * @mn: The maple node type 932 * @offset: The location of the largest gap. 933 */ 934 static inline void ma_set_meta_gap(struct maple_node *mn, enum maple_type mt, 935 unsigned char offset) 936 { 937 938 struct maple_metadata *meta = ma_meta(mn, mt); 939 940 meta->gap = offset; 941 } 942 943 /* 944 * mat_add() - Add a @dead_enode to the ma_topiary of a list of dead nodes. 945 * @mat - the ma_topiary, a linked list of dead nodes. 946 * @dead_enode - the node to be marked as dead and added to the tail of the list 947 * 948 * Add the @dead_enode to the linked list in @mat. 949 */ 950 static inline void mat_add(struct ma_topiary *mat, 951 struct maple_enode *dead_enode) 952 { 953 mte_set_node_dead(dead_enode); 954 mte_to_mat(dead_enode)->next = NULL; 955 if (!mat->tail) { 956 mat->tail = mat->head = dead_enode; 957 return; 958 } 959 960 mte_to_mat(mat->tail)->next = dead_enode; 961 mat->tail = dead_enode; 962 } 963 964 static void mte_destroy_walk(struct maple_enode *, struct maple_tree *); 965 static inline void mas_free(struct ma_state *mas, struct maple_enode *used); 966 967 /* 968 * mas_mat_free() - Free all nodes in a dead list. 969 * @mas - the maple state 970 * @mat - the ma_topiary linked list of dead nodes to free. 971 * 972 * Free walk a dead list. 973 */ 974 static void mas_mat_free(struct ma_state *mas, struct ma_topiary *mat) 975 { 976 struct maple_enode *next; 977 978 while (mat->head) { 979 next = mte_to_mat(mat->head)->next; 980 mas_free(mas, mat->head); 981 mat->head = next; 982 } 983 } 984 985 /* 986 * mas_mat_destroy() - Free all nodes and subtrees in a dead list. 987 * @mas - the maple state 988 * @mat - the ma_topiary linked list of dead nodes to free. 989 * 990 * Destroy walk a dead list. 991 */ 992 static void mas_mat_destroy(struct ma_state *mas, struct ma_topiary *mat) 993 { 994 struct maple_enode *next; 995 996 while (mat->head) { 997 next = mte_to_mat(mat->head)->next; 998 mte_destroy_walk(mat->head, mat->mtree); 999 mat->head = next; 1000 } 1001 } 1002 /* 1003 * mas_descend() - Descend into the slot stored in the ma_state. 1004 * @mas - the maple state. 1005 * 1006 * Note: Not RCU safe, only use in write side or debug code. 1007 */ 1008 static inline void mas_descend(struct ma_state *mas) 1009 { 1010 enum maple_type type; 1011 unsigned long *pivots; 1012 struct maple_node *node; 1013 void __rcu **slots; 1014 1015 node = mas_mn(mas); 1016 type = mte_node_type(mas->node); 1017 pivots = ma_pivots(node, type); 1018 slots = ma_slots(node, type); 1019 1020 if (mas->offset) 1021 mas->min = pivots[mas->offset - 1] + 1; 1022 mas->max = mas_safe_pivot(mas, pivots, mas->offset, type); 1023 mas->node = mas_slot(mas, slots, mas->offset); 1024 } 1025 1026 /* 1027 * mte_set_gap() - Set a maple node gap. 1028 * @mn: The encoded maple node 1029 * @gap: The offset of the gap to set 1030 * @val: The gap value 1031 */ 1032 static inline void mte_set_gap(const struct maple_enode *mn, 1033 unsigned char gap, unsigned long val) 1034 { 1035 switch (mte_node_type(mn)) { 1036 default: 1037 break; 1038 case maple_arange_64: 1039 mte_to_node(mn)->ma64.gap[gap] = val; 1040 break; 1041 } 1042 } 1043 1044 /* 1045 * mas_ascend() - Walk up a level of the tree. 1046 * @mas: The maple state 1047 * 1048 * Sets the @mas->max and @mas->min to the correct values when walking up. This 1049 * may cause several levels of walking up to find the correct min and max. 1050 * May find a dead node which will cause a premature return. 1051 * Return: 1 on dead node, 0 otherwise 1052 */ 1053 static int mas_ascend(struct ma_state *mas) 1054 { 1055 struct maple_enode *p_enode; /* parent enode. */ 1056 struct maple_enode *a_enode; /* ancestor enode. */ 1057 struct maple_node *a_node; /* ancestor node. */ 1058 struct maple_node *p_node; /* parent node. */ 1059 unsigned char a_slot; 1060 enum maple_type a_type; 1061 unsigned long min, max; 1062 unsigned long *pivots; 1063 unsigned char offset; 1064 bool set_max = false, set_min = false; 1065 1066 a_node = mas_mn(mas); 1067 if (ma_is_root(a_node)) { 1068 mas->offset = 0; 1069 return 0; 1070 } 1071 1072 p_node = mte_parent(mas->node); 1073 if (unlikely(a_node == p_node)) 1074 return 1; 1075 a_type = mas_parent_enum(mas, mas->node); 1076 offset = mte_parent_slot(mas->node); 1077 a_enode = mt_mk_node(p_node, a_type); 1078 1079 /* Check to make sure all parent information is still accurate */ 1080 if (p_node != mte_parent(mas->node)) 1081 return 1; 1082 1083 mas->node = a_enode; 1084 mas->offset = offset; 1085 1086 if (mte_is_root(a_enode)) { 1087 mas->max = ULONG_MAX; 1088 mas->min = 0; 1089 return 0; 1090 } 1091 1092 min = 0; 1093 max = ULONG_MAX; 1094 do { 1095 p_enode = a_enode; 1096 a_type = mas_parent_enum(mas, p_enode); 1097 a_node = mte_parent(p_enode); 1098 a_slot = mte_parent_slot(p_enode); 1099 pivots = ma_pivots(a_node, a_type); 1100 a_enode = mt_mk_node(a_node, a_type); 1101 1102 if (!set_min && a_slot) { 1103 set_min = true; 1104 min = pivots[a_slot - 1] + 1; 1105 } 1106 1107 if (!set_max && a_slot < mt_pivots[a_type]) { 1108 set_max = true; 1109 max = pivots[a_slot]; 1110 } 1111 1112 if (unlikely(ma_dead_node(a_node))) 1113 return 1; 1114 1115 if (unlikely(ma_is_root(a_node))) 1116 break; 1117 1118 } while (!set_min || !set_max); 1119 1120 mas->max = max; 1121 mas->min = min; 1122 return 0; 1123 } 1124 1125 /* 1126 * mas_pop_node() - Get a previously allocated maple node from the maple state. 1127 * @mas: The maple state 1128 * 1129 * Return: A pointer to a maple node. 1130 */ 1131 static inline struct maple_node *mas_pop_node(struct ma_state *mas) 1132 { 1133 struct maple_alloc *ret, *node = mas->alloc; 1134 unsigned long total = mas_allocated(mas); 1135 unsigned int req = mas_alloc_req(mas); 1136 1137 /* nothing or a request pending. */ 1138 if (WARN_ON(!total)) 1139 return NULL; 1140 1141 if (total == 1) { 1142 /* single allocation in this ma_state */ 1143 mas->alloc = NULL; 1144 ret = node; 1145 goto single_node; 1146 } 1147 1148 if (node->node_count == 1) { 1149 /* Single allocation in this node. */ 1150 mas->alloc = node->slot[0]; 1151 mas->alloc->total = node->total - 1; 1152 ret = node; 1153 goto new_head; 1154 } 1155 node->total--; 1156 ret = node->slot[--node->node_count]; 1157 node->slot[node->node_count] = NULL; 1158 1159 single_node: 1160 new_head: 1161 if (req) { 1162 req++; 1163 mas_set_alloc_req(mas, req); 1164 } 1165 1166 memset(ret, 0, sizeof(*ret)); 1167 return (struct maple_node *)ret; 1168 } 1169 1170 /* 1171 * mas_push_node() - Push a node back on the maple state allocation. 1172 * @mas: The maple state 1173 * @used: The used maple node 1174 * 1175 * Stores the maple node back into @mas->alloc for reuse. Updates allocated and 1176 * requested node count as necessary. 1177 */ 1178 static inline void mas_push_node(struct ma_state *mas, struct maple_node *used) 1179 { 1180 struct maple_alloc *reuse = (struct maple_alloc *)used; 1181 struct maple_alloc *head = mas->alloc; 1182 unsigned long count; 1183 unsigned int requested = mas_alloc_req(mas); 1184 1185 count = mas_allocated(mas); 1186 1187 reuse->request_count = 0; 1188 reuse->node_count = 0; 1189 if (count && (head->node_count < MAPLE_ALLOC_SLOTS)) { 1190 head->slot[head->node_count++] = reuse; 1191 head->total++; 1192 goto done; 1193 } 1194 1195 reuse->total = 1; 1196 if ((head) && !((unsigned long)head & 0x1)) { 1197 reuse->slot[0] = head; 1198 reuse->node_count = 1; 1199 reuse->total += head->total; 1200 } 1201 1202 mas->alloc = reuse; 1203 done: 1204 if (requested > 1) 1205 mas_set_alloc_req(mas, requested - 1); 1206 } 1207 1208 /* 1209 * mas_alloc_nodes() - Allocate nodes into a maple state 1210 * @mas: The maple state 1211 * @gfp: The GFP Flags 1212 */ 1213 static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) 1214 { 1215 struct maple_alloc *node; 1216 unsigned long allocated = mas_allocated(mas); 1217 unsigned int requested = mas_alloc_req(mas); 1218 unsigned int count; 1219 void **slots = NULL; 1220 unsigned int max_req = 0; 1221 1222 if (!requested) 1223 return; 1224 1225 mas_set_alloc_req(mas, 0); 1226 if (mas->mas_flags & MA_STATE_PREALLOC) { 1227 if (allocated) 1228 return; 1229 WARN_ON(!allocated); 1230 } 1231 1232 if (!allocated || mas->alloc->node_count == MAPLE_ALLOC_SLOTS) { 1233 node = (struct maple_alloc *)mt_alloc_one(gfp); 1234 if (!node) 1235 goto nomem_one; 1236 1237 if (allocated) { 1238 node->slot[0] = mas->alloc; 1239 node->node_count = 1; 1240 } else { 1241 node->node_count = 0; 1242 } 1243 1244 mas->alloc = node; 1245 node->total = ++allocated; 1246 requested--; 1247 } 1248 1249 node = mas->alloc; 1250 node->request_count = 0; 1251 while (requested) { 1252 max_req = MAPLE_ALLOC_SLOTS; 1253 if (node->node_count) { 1254 unsigned int offset = node->node_count; 1255 1256 slots = (void **)&node->slot[offset]; 1257 max_req -= offset; 1258 } else { 1259 slots = (void **)&node->slot; 1260 } 1261 1262 max_req = min(requested, max_req); 1263 count = mt_alloc_bulk(gfp, max_req, slots); 1264 if (!count) 1265 goto nomem_bulk; 1266 1267 node->node_count += count; 1268 allocated += count; 1269 node = node->slot[0]; 1270 node->node_count = 0; 1271 node->request_count = 0; 1272 requested -= count; 1273 } 1274 mas->alloc->total = allocated; 1275 return; 1276 1277 nomem_bulk: 1278 /* Clean up potential freed allocations on bulk failure */ 1279 memset(slots, 0, max_req * sizeof(unsigned long)); 1280 nomem_one: 1281 mas_set_alloc_req(mas, requested); 1282 if (mas->alloc && !(((unsigned long)mas->alloc & 0x1))) 1283 mas->alloc->total = allocated; 1284 mas_set_err(mas, -ENOMEM); 1285 } 1286 1287 /* 1288 * mas_free() - Free an encoded maple node 1289 * @mas: The maple state 1290 * @used: The encoded maple node to free. 1291 * 1292 * Uses rcu free if necessary, pushes @used back on the maple state allocations 1293 * otherwise. 1294 */ 1295 static inline void mas_free(struct ma_state *mas, struct maple_enode *used) 1296 { 1297 struct maple_node *tmp = mte_to_node(used); 1298 1299 if (mt_in_rcu(mas->tree)) 1300 ma_free_rcu(tmp); 1301 else 1302 mas_push_node(mas, tmp); 1303 } 1304 1305 /* 1306 * mas_node_count() - Check if enough nodes are allocated and request more if 1307 * there is not enough nodes. 1308 * @mas: The maple state 1309 * @count: The number of nodes needed 1310 * @gfp: the gfp flags 1311 */ 1312 static void mas_node_count_gfp(struct ma_state *mas, int count, gfp_t gfp) 1313 { 1314 unsigned long allocated = mas_allocated(mas); 1315 1316 if (allocated < count) { 1317 mas_set_alloc_req(mas, count - allocated); 1318 mas_alloc_nodes(mas, gfp); 1319 } 1320 } 1321 1322 /* 1323 * mas_node_count() - Check if enough nodes are allocated and request more if 1324 * there is not enough nodes. 1325 * @mas: The maple state 1326 * @count: The number of nodes needed 1327 * 1328 * Note: Uses GFP_NOWAIT | __GFP_NOWARN for gfp flags. 1329 */ 1330 static void mas_node_count(struct ma_state *mas, int count) 1331 { 1332 return mas_node_count_gfp(mas, count, GFP_NOWAIT | __GFP_NOWARN); 1333 } 1334 1335 /* 1336 * mas_start() - Sets up maple state for operations. 1337 * @mas: The maple state. 1338 * 1339 * If mas->node == MAS_START, then set the min, max and depth to 1340 * defaults. 1341 * 1342 * Return: 1343 * - If mas->node is an error or not MAS_START, return NULL. 1344 * - If it's an empty tree: NULL & mas->node == MAS_NONE 1345 * - If it's a single entry: The entry & mas->node == MAS_ROOT 1346 * - If it's a tree: NULL & mas->node == safe root node. 1347 */ 1348 static inline struct maple_enode *mas_start(struct ma_state *mas) 1349 { 1350 if (likely(mas_is_start(mas))) { 1351 struct maple_enode *root; 1352 1353 mas->min = 0; 1354 mas->max = ULONG_MAX; 1355 mas->depth = 0; 1356 1357 root = mas_root(mas); 1358 /* Tree with nodes */ 1359 if (likely(xa_is_node(root))) { 1360 mas->depth = 1; 1361 mas->node = mte_safe_root(root); 1362 mas->offset = 0; 1363 return NULL; 1364 } 1365 1366 /* empty tree */ 1367 if (unlikely(!root)) { 1368 mas->node = MAS_NONE; 1369 mas->offset = MAPLE_NODE_SLOTS; 1370 return NULL; 1371 } 1372 1373 /* Single entry tree */ 1374 mas->node = MAS_ROOT; 1375 mas->offset = MAPLE_NODE_SLOTS; 1376 1377 /* Single entry tree. */ 1378 if (mas->index > 0) 1379 return NULL; 1380 1381 return root; 1382 } 1383 1384 return NULL; 1385 } 1386 1387 /* 1388 * ma_data_end() - Find the end of the data in a node. 1389 * @node: The maple node 1390 * @type: The maple node type 1391 * @pivots: The array of pivots in the node 1392 * @max: The maximum value in the node 1393 * 1394 * Uses metadata to find the end of the data when possible. 1395 * Return: The zero indexed last slot with data (may be null). 1396 */ 1397 static inline unsigned char ma_data_end(struct maple_node *node, 1398 enum maple_type type, 1399 unsigned long *pivots, 1400 unsigned long max) 1401 { 1402 unsigned char offset; 1403 1404 if (type == maple_arange_64) 1405 return ma_meta_end(node, type); 1406 1407 offset = mt_pivots[type] - 1; 1408 if (likely(!pivots[offset])) 1409 return ma_meta_end(node, type); 1410 1411 if (likely(pivots[offset] == max)) 1412 return offset; 1413 1414 return mt_pivots[type]; 1415 } 1416 1417 /* 1418 * mas_data_end() - Find the end of the data (slot). 1419 * @mas: the maple state 1420 * 1421 * This method is optimized to check the metadata of a node if the node type 1422 * supports data end metadata. 1423 * 1424 * Return: The zero indexed last slot with data (may be null). 1425 */ 1426 static inline unsigned char mas_data_end(struct ma_state *mas) 1427 { 1428 enum maple_type type; 1429 struct maple_node *node; 1430 unsigned char offset; 1431 unsigned long *pivots; 1432 1433 type = mte_node_type(mas->node); 1434 node = mas_mn(mas); 1435 if (type == maple_arange_64) 1436 return ma_meta_end(node, type); 1437 1438 pivots = ma_pivots(node, type); 1439 offset = mt_pivots[type] - 1; 1440 if (likely(!pivots[offset])) 1441 return ma_meta_end(node, type); 1442 1443 if (likely(pivots[offset] == mas->max)) 1444 return offset; 1445 1446 return mt_pivots[type]; 1447 } 1448 1449 /* 1450 * mas_leaf_max_gap() - Returns the largest gap in a leaf node 1451 * @mas - the maple state 1452 * 1453 * Return: The maximum gap in the leaf. 1454 */ 1455 static unsigned long mas_leaf_max_gap(struct ma_state *mas) 1456 { 1457 enum maple_type mt; 1458 unsigned long pstart, gap, max_gap; 1459 struct maple_node *mn; 1460 unsigned long *pivots; 1461 void __rcu **slots; 1462 unsigned char i; 1463 unsigned char max_piv; 1464 1465 mt = mte_node_type(mas->node); 1466 mn = mas_mn(mas); 1467 slots = ma_slots(mn, mt); 1468 max_gap = 0; 1469 if (unlikely(ma_is_dense(mt))) { 1470 gap = 0; 1471 for (i = 0; i < mt_slots[mt]; i++) { 1472 if (slots[i]) { 1473 if (gap > max_gap) 1474 max_gap = gap; 1475 gap = 0; 1476 } else { 1477 gap++; 1478 } 1479 } 1480 if (gap > max_gap) 1481 max_gap = gap; 1482 return max_gap; 1483 } 1484 1485 /* 1486 * Check the first implied pivot optimizes the loop below and slot 1 may 1487 * be skipped if there is a gap in slot 0. 1488 */ 1489 pivots = ma_pivots(mn, mt); 1490 if (likely(!slots[0])) { 1491 max_gap = pivots[0] - mas->min + 1; 1492 i = 2; 1493 } else { 1494 i = 1; 1495 } 1496 1497 /* reduce max_piv as the special case is checked before the loop */ 1498 max_piv = ma_data_end(mn, mt, pivots, mas->max) - 1; 1499 /* 1500 * Check end implied pivot which can only be a gap on the right most 1501 * node. 1502 */ 1503 if (unlikely(mas->max == ULONG_MAX) && !slots[max_piv + 1]) { 1504 gap = ULONG_MAX - pivots[max_piv]; 1505 if (gap > max_gap) 1506 max_gap = gap; 1507 } 1508 1509 for (; i <= max_piv; i++) { 1510 /* data == no gap. */ 1511 if (likely(slots[i])) 1512 continue; 1513 1514 pstart = pivots[i - 1]; 1515 gap = pivots[i] - pstart; 1516 if (gap > max_gap) 1517 max_gap = gap; 1518 1519 /* There cannot be two gaps in a row. */ 1520 i++; 1521 } 1522 return max_gap; 1523 } 1524 1525 /* 1526 * ma_max_gap() - Get the maximum gap in a maple node (non-leaf) 1527 * @node: The maple node 1528 * @gaps: The pointer to the gaps 1529 * @mt: The maple node type 1530 * @*off: Pointer to store the offset location of the gap. 1531 * 1532 * Uses the metadata data end to scan backwards across set gaps. 1533 * 1534 * Return: The maximum gap value 1535 */ 1536 static inline unsigned long 1537 ma_max_gap(struct maple_node *node, unsigned long *gaps, enum maple_type mt, 1538 unsigned char *off) 1539 { 1540 unsigned char offset, i; 1541 unsigned long max_gap = 0; 1542 1543 i = offset = ma_meta_end(node, mt); 1544 do { 1545 if (gaps[i] > max_gap) { 1546 max_gap = gaps[i]; 1547 offset = i; 1548 } 1549 } while (i--); 1550 1551 *off = offset; 1552 return max_gap; 1553 } 1554 1555 /* 1556 * mas_max_gap() - find the largest gap in a non-leaf node and set the slot. 1557 * @mas: The maple state. 1558 * 1559 * If the metadata gap is set to MAPLE_ARANGE64_META_MAX, there is no gap. 1560 * 1561 * Return: The gap value. 1562 */ 1563 static inline unsigned long mas_max_gap(struct ma_state *mas) 1564 { 1565 unsigned long *gaps; 1566 unsigned char offset; 1567 enum maple_type mt; 1568 struct maple_node *node; 1569 1570 mt = mte_node_type(mas->node); 1571 if (ma_is_leaf(mt)) 1572 return mas_leaf_max_gap(mas); 1573 1574 node = mas_mn(mas); 1575 offset = ma_meta_gap(node, mt); 1576 if (offset == MAPLE_ARANGE64_META_MAX) 1577 return 0; 1578 1579 gaps = ma_gaps(node, mt); 1580 return gaps[offset]; 1581 } 1582 1583 /* 1584 * mas_parent_gap() - Set the parent gap and any gaps above, as needed 1585 * @mas: The maple state 1586 * @offset: The gap offset in the parent to set 1587 * @new: The new gap value. 1588 * 1589 * Set the parent gap then continue to set the gap upwards, using the metadata 1590 * of the parent to see if it is necessary to check the node above. 1591 */ 1592 static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, 1593 unsigned long new) 1594 { 1595 unsigned long meta_gap = 0; 1596 struct maple_node *pnode; 1597 struct maple_enode *penode; 1598 unsigned long *pgaps; 1599 unsigned char meta_offset; 1600 enum maple_type pmt; 1601 1602 pnode = mte_parent(mas->node); 1603 pmt = mas_parent_enum(mas, mas->node); 1604 penode = mt_mk_node(pnode, pmt); 1605 pgaps = ma_gaps(pnode, pmt); 1606 1607 ascend: 1608 meta_offset = ma_meta_gap(pnode, pmt); 1609 if (meta_offset == MAPLE_ARANGE64_META_MAX) 1610 meta_gap = 0; 1611 else 1612 meta_gap = pgaps[meta_offset]; 1613 1614 pgaps[offset] = new; 1615 1616 if (meta_gap == new) 1617 return; 1618 1619 if (offset != meta_offset) { 1620 if (meta_gap > new) 1621 return; 1622 1623 ma_set_meta_gap(pnode, pmt, offset); 1624 } else if (new < meta_gap) { 1625 meta_offset = 15; 1626 new = ma_max_gap(pnode, pgaps, pmt, &meta_offset); 1627 ma_set_meta_gap(pnode, pmt, meta_offset); 1628 } 1629 1630 if (ma_is_root(pnode)) 1631 return; 1632 1633 /* Go to the parent node. */ 1634 pnode = mte_parent(penode); 1635 pmt = mas_parent_enum(mas, penode); 1636 pgaps = ma_gaps(pnode, pmt); 1637 offset = mte_parent_slot(penode); 1638 penode = mt_mk_node(pnode, pmt); 1639 goto ascend; 1640 } 1641 1642 /* 1643 * mas_update_gap() - Update a nodes gaps and propagate up if necessary. 1644 * @mas - the maple state. 1645 */ 1646 static inline void mas_update_gap(struct ma_state *mas) 1647 { 1648 unsigned char pslot; 1649 unsigned long p_gap; 1650 unsigned long max_gap; 1651 1652 if (!mt_is_alloc(mas->tree)) 1653 return; 1654 1655 if (mte_is_root(mas->node)) 1656 return; 1657 1658 max_gap = mas_max_gap(mas); 1659 1660 pslot = mte_parent_slot(mas->node); 1661 p_gap = ma_gaps(mte_parent(mas->node), 1662 mas_parent_enum(mas, mas->node))[pslot]; 1663 1664 if (p_gap != max_gap) 1665 mas_parent_gap(mas, pslot, max_gap); 1666 } 1667 1668 /* 1669 * mas_adopt_children() - Set the parent pointer of all nodes in @parent to 1670 * @parent with the slot encoded. 1671 * @mas - the maple state (for the tree) 1672 * @parent - the maple encoded node containing the children. 1673 */ 1674 static inline void mas_adopt_children(struct ma_state *mas, 1675 struct maple_enode *parent) 1676 { 1677 enum maple_type type = mte_node_type(parent); 1678 struct maple_node *node = mas_mn(mas); 1679 void __rcu **slots = ma_slots(node, type); 1680 unsigned long *pivots = ma_pivots(node, type); 1681 struct maple_enode *child; 1682 unsigned char offset; 1683 1684 offset = ma_data_end(node, type, pivots, mas->max); 1685 do { 1686 child = mas_slot_locked(mas, slots, offset); 1687 mte_set_parent(child, parent, offset); 1688 } while (offset--); 1689 } 1690 1691 /* 1692 * mas_replace() - Replace a maple node in the tree with mas->node. Uses the 1693 * parent encoding to locate the maple node in the tree. 1694 * @mas - the ma_state to use for operations. 1695 * @advanced - boolean to adopt the child nodes and free the old node (false) or 1696 * leave the node (true) and handle the adoption and free elsewhere. 1697 */ 1698 static inline void mas_replace(struct ma_state *mas, bool advanced) 1699 __must_hold(mas->tree->lock) 1700 { 1701 struct maple_node *mn = mas_mn(mas); 1702 struct maple_enode *old_enode; 1703 unsigned char offset = 0; 1704 void __rcu **slots = NULL; 1705 1706 if (ma_is_root(mn)) { 1707 old_enode = mas_root_locked(mas); 1708 } else { 1709 offset = mte_parent_slot(mas->node); 1710 slots = ma_slots(mte_parent(mas->node), 1711 mas_parent_enum(mas, mas->node)); 1712 old_enode = mas_slot_locked(mas, slots, offset); 1713 } 1714 1715 if (!advanced && !mte_is_leaf(mas->node)) 1716 mas_adopt_children(mas, mas->node); 1717 1718 if (mte_is_root(mas->node)) { 1719 mn->parent = ma_parent_ptr( 1720 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 1721 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 1722 mas_set_height(mas); 1723 } else { 1724 rcu_assign_pointer(slots[offset], mas->node); 1725 } 1726 1727 if (!advanced) 1728 mas_free(mas, old_enode); 1729 } 1730 1731 /* 1732 * mas_new_child() - Find the new child of a node. 1733 * @mas: the maple state 1734 * @child: the maple state to store the child. 1735 */ 1736 static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) 1737 __must_hold(mas->tree->lock) 1738 { 1739 enum maple_type mt; 1740 unsigned char offset; 1741 unsigned char end; 1742 unsigned long *pivots; 1743 struct maple_enode *entry; 1744 struct maple_node *node; 1745 void __rcu **slots; 1746 1747 mt = mte_node_type(mas->node); 1748 node = mas_mn(mas); 1749 slots = ma_slots(node, mt); 1750 pivots = ma_pivots(node, mt); 1751 end = ma_data_end(node, mt, pivots, mas->max); 1752 for (offset = mas->offset; offset <= end; offset++) { 1753 entry = mas_slot_locked(mas, slots, offset); 1754 if (mte_parent(entry) == node) { 1755 *child = *mas; 1756 mas->offset = offset + 1; 1757 child->offset = offset; 1758 mas_descend(child); 1759 child->offset = 0; 1760 return true; 1761 } 1762 } 1763 return false; 1764 } 1765 1766 /* 1767 * mab_shift_right() - Shift the data in mab right. Note, does not clean out the 1768 * old data or set b_node->b_end. 1769 * @b_node: the maple_big_node 1770 * @shift: the shift count 1771 */ 1772 static inline void mab_shift_right(struct maple_big_node *b_node, 1773 unsigned char shift) 1774 { 1775 unsigned long size = b_node->b_end * sizeof(unsigned long); 1776 1777 memmove(b_node->pivot + shift, b_node->pivot, size); 1778 memmove(b_node->slot + shift, b_node->slot, size); 1779 if (b_node->type == maple_arange_64) 1780 memmove(b_node->gap + shift, b_node->gap, size); 1781 } 1782 1783 /* 1784 * mab_middle_node() - Check if a middle node is needed (unlikely) 1785 * @b_node: the maple_big_node that contains the data. 1786 * @size: the amount of data in the b_node 1787 * @split: the potential split location 1788 * @slot_count: the size that can be stored in a single node being considered. 1789 * 1790 * Return: true if a middle node is required. 1791 */ 1792 static inline bool mab_middle_node(struct maple_big_node *b_node, int split, 1793 unsigned char slot_count) 1794 { 1795 unsigned char size = b_node->b_end; 1796 1797 if (size >= 2 * slot_count) 1798 return true; 1799 1800 if (!b_node->slot[split] && (size >= 2 * slot_count - 1)) 1801 return true; 1802 1803 return false; 1804 } 1805 1806 /* 1807 * mab_no_null_split() - ensure the split doesn't fall on a NULL 1808 * @b_node: the maple_big_node with the data 1809 * @split: the suggested split location 1810 * @slot_count: the number of slots in the node being considered. 1811 * 1812 * Return: the split location. 1813 */ 1814 static inline int mab_no_null_split(struct maple_big_node *b_node, 1815 unsigned char split, unsigned char slot_count) 1816 { 1817 if (!b_node->slot[split]) { 1818 /* 1819 * If the split is less than the max slot && the right side will 1820 * still be sufficient, then increment the split on NULL. 1821 */ 1822 if ((split < slot_count - 1) && 1823 (b_node->b_end - split) > (mt_min_slots[b_node->type])) 1824 split++; 1825 else 1826 split--; 1827 } 1828 return split; 1829 } 1830 1831 /* 1832 * mab_calc_split() - Calculate the split location and if there needs to be two 1833 * splits. 1834 * @bn: The maple_big_node with the data 1835 * @mid_split: The second split, if required. 0 otherwise. 1836 * 1837 * Return: The first split location. The middle split is set in @mid_split. 1838 */ 1839 static inline int mab_calc_split(struct ma_state *mas, 1840 struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) 1841 { 1842 unsigned char b_end = bn->b_end; 1843 int split = b_end / 2; /* Assume equal split. */ 1844 unsigned char slot_min, slot_count = mt_slots[bn->type]; 1845 1846 /* 1847 * To support gap tracking, all NULL entries are kept together and a node cannot 1848 * end on a NULL entry, with the exception of the left-most leaf. The 1849 * limitation means that the split of a node must be checked for this condition 1850 * and be able to put more data in one direction or the other. 1851 */ 1852 if (unlikely((mas->mas_flags & MA_STATE_BULK))) { 1853 *mid_split = 0; 1854 split = b_end - mt_min_slots[bn->type]; 1855 1856 if (!ma_is_leaf(bn->type)) 1857 return split; 1858 1859 mas->mas_flags |= MA_STATE_REBALANCE; 1860 if (!bn->slot[split]) 1861 split--; 1862 return split; 1863 } 1864 1865 /* 1866 * Although extremely rare, it is possible to enter what is known as the 3-way 1867 * split scenario. The 3-way split comes about by means of a store of a range 1868 * that overwrites the end and beginning of two full nodes. The result is a set 1869 * of entries that cannot be stored in 2 nodes. Sometimes, these two nodes can 1870 * also be located in different parent nodes which are also full. This can 1871 * carry upwards all the way to the root in the worst case. 1872 */ 1873 if (unlikely(mab_middle_node(bn, split, slot_count))) { 1874 split = b_end / 3; 1875 *mid_split = split * 2; 1876 } else { 1877 slot_min = mt_min_slots[bn->type]; 1878 1879 *mid_split = 0; 1880 /* 1881 * Avoid having a range less than the slot count unless it 1882 * causes one node to be deficient. 1883 * NOTE: mt_min_slots is 1 based, b_end and split are zero. 1884 */ 1885 while (((bn->pivot[split] - min) < slot_count - 1) && 1886 (split < slot_count - 1) && (b_end - split > slot_min)) 1887 split++; 1888 } 1889 1890 /* Avoid ending a node on a NULL entry */ 1891 split = mab_no_null_split(bn, split, slot_count); 1892 1893 if (unlikely(*mid_split)) 1894 *mid_split = mab_no_null_split(bn, *mid_split, slot_count); 1895 1896 return split; 1897 } 1898 1899 /* 1900 * mas_mab_cp() - Copy data from a maple state inclusively to a maple_big_node 1901 * and set @b_node->b_end to the next free slot. 1902 * @mas: The maple state 1903 * @mas_start: The starting slot to copy 1904 * @mas_end: The end slot to copy (inclusively) 1905 * @b_node: The maple_big_node to place the data 1906 * @mab_start: The starting location in maple_big_node to store the data. 1907 */ 1908 static inline void mas_mab_cp(struct ma_state *mas, unsigned char mas_start, 1909 unsigned char mas_end, struct maple_big_node *b_node, 1910 unsigned char mab_start) 1911 { 1912 enum maple_type mt; 1913 struct maple_node *node; 1914 void __rcu **slots; 1915 unsigned long *pivots, *gaps; 1916 int i = mas_start, j = mab_start; 1917 unsigned char piv_end; 1918 1919 node = mas_mn(mas); 1920 mt = mte_node_type(mas->node); 1921 pivots = ma_pivots(node, mt); 1922 if (!i) { 1923 b_node->pivot[j] = pivots[i++]; 1924 if (unlikely(i > mas_end)) 1925 goto complete; 1926 j++; 1927 } 1928 1929 piv_end = min(mas_end, mt_pivots[mt]); 1930 for (; i < piv_end; i++, j++) { 1931 b_node->pivot[j] = pivots[i]; 1932 if (unlikely(!b_node->pivot[j])) 1933 break; 1934 1935 if (unlikely(mas->max == b_node->pivot[j])) 1936 goto complete; 1937 } 1938 1939 if (likely(i <= mas_end)) 1940 b_node->pivot[j] = mas_safe_pivot(mas, pivots, i, mt); 1941 1942 complete: 1943 b_node->b_end = ++j; 1944 j -= mab_start; 1945 slots = ma_slots(node, mt); 1946 memcpy(b_node->slot + mab_start, slots + mas_start, sizeof(void *) * j); 1947 if (!ma_is_leaf(mt) && mt_is_alloc(mas->tree)) { 1948 gaps = ma_gaps(node, mt); 1949 memcpy(b_node->gap + mab_start, gaps + mas_start, 1950 sizeof(unsigned long) * j); 1951 } 1952 } 1953 1954 /* 1955 * mas_leaf_set_meta() - Set the metadata of a leaf if possible. 1956 * @mas: The maple state 1957 * @node: The maple node 1958 * @pivots: pointer to the maple node pivots 1959 * @mt: The maple type 1960 * @end: The assumed end 1961 * 1962 * Note, end may be incremented within this function but not modified at the 1963 * source. This is fine since the metadata is the last thing to be stored in a 1964 * node during a write. 1965 */ 1966 static inline void mas_leaf_set_meta(struct ma_state *mas, 1967 struct maple_node *node, unsigned long *pivots, 1968 enum maple_type mt, unsigned char end) 1969 { 1970 /* There is no room for metadata already */ 1971 if (mt_pivots[mt] <= end) 1972 return; 1973 1974 if (pivots[end] && pivots[end] < mas->max) 1975 end++; 1976 1977 if (end < mt_slots[mt] - 1) 1978 ma_set_meta(node, mt, 0, end); 1979 } 1980 1981 /* 1982 * mab_mas_cp() - Copy data from maple_big_node to a maple encoded node. 1983 * @b_node: the maple_big_node that has the data 1984 * @mab_start: the start location in @b_node. 1985 * @mab_end: The end location in @b_node (inclusively) 1986 * @mas: The maple state with the maple encoded node. 1987 */ 1988 static inline void mab_mas_cp(struct maple_big_node *b_node, 1989 unsigned char mab_start, unsigned char mab_end, 1990 struct ma_state *mas, bool new_max) 1991 { 1992 int i, j = 0; 1993 enum maple_type mt = mte_node_type(mas->node); 1994 struct maple_node *node = mte_to_node(mas->node); 1995 void __rcu **slots = ma_slots(node, mt); 1996 unsigned long *pivots = ma_pivots(node, mt); 1997 unsigned long *gaps = NULL; 1998 unsigned char end; 1999 2000 if (mab_end - mab_start > mt_pivots[mt]) 2001 mab_end--; 2002 2003 if (!pivots[mt_pivots[mt] - 1]) 2004 slots[mt_pivots[mt]] = NULL; 2005 2006 i = mab_start; 2007 do { 2008 pivots[j++] = b_node->pivot[i++]; 2009 } while (i <= mab_end && likely(b_node->pivot[i])); 2010 2011 memcpy(slots, b_node->slot + mab_start, 2012 sizeof(void *) * (i - mab_start)); 2013 2014 if (new_max) 2015 mas->max = b_node->pivot[i - 1]; 2016 2017 end = j - 1; 2018 if (likely(!ma_is_leaf(mt) && mt_is_alloc(mas->tree))) { 2019 unsigned long max_gap = 0; 2020 unsigned char offset = 15; 2021 2022 gaps = ma_gaps(node, mt); 2023 do { 2024 gaps[--j] = b_node->gap[--i]; 2025 if (gaps[j] > max_gap) { 2026 offset = j; 2027 max_gap = gaps[j]; 2028 } 2029 } while (j); 2030 2031 ma_set_meta(node, mt, offset, end); 2032 } else { 2033 mas_leaf_set_meta(mas, node, pivots, mt, end); 2034 } 2035 } 2036 2037 /* 2038 * mas_descend_adopt() - Descend through a sub-tree and adopt children. 2039 * @mas: the maple state with the maple encoded node of the sub-tree. 2040 * 2041 * Descend through a sub-tree and adopt children who do not have the correct 2042 * parents set. Follow the parents which have the correct parents as they are 2043 * the new entries which need to be followed to find other incorrectly set 2044 * parents. 2045 */ 2046 static inline void mas_descend_adopt(struct ma_state *mas) 2047 { 2048 struct ma_state list[3], next[3]; 2049 int i, n; 2050 2051 /* 2052 * At each level there may be up to 3 correct parent pointers which indicates 2053 * the new nodes which need to be walked to find any new nodes at a lower level. 2054 */ 2055 2056 for (i = 0; i < 3; i++) { 2057 list[i] = *mas; 2058 list[i].offset = 0; 2059 next[i].offset = 0; 2060 } 2061 next[0] = *mas; 2062 2063 while (!mte_is_leaf(list[0].node)) { 2064 n = 0; 2065 for (i = 0; i < 3; i++) { 2066 if (mas_is_none(&list[i])) 2067 continue; 2068 2069 if (i && list[i-1].node == list[i].node) 2070 continue; 2071 2072 while ((n < 3) && (mas_new_child(&list[i], &next[n]))) 2073 n++; 2074 2075 mas_adopt_children(&list[i], list[i].node); 2076 } 2077 2078 while (n < 3) 2079 next[n++].node = MAS_NONE; 2080 2081 /* descend by setting the list to the children */ 2082 for (i = 0; i < 3; i++) 2083 list[i] = next[i]; 2084 } 2085 } 2086 2087 /* 2088 * mas_bulk_rebalance() - Rebalance the end of a tree after a bulk insert. 2089 * @mas: The maple state 2090 * @end: The maple node end 2091 * @mt: The maple node type 2092 */ 2093 static inline void mas_bulk_rebalance(struct ma_state *mas, unsigned char end, 2094 enum maple_type mt) 2095 { 2096 if (!(mas->mas_flags & MA_STATE_BULK)) 2097 return; 2098 2099 if (mte_is_root(mas->node)) 2100 return; 2101 2102 if (end > mt_min_slots[mt]) { 2103 mas->mas_flags &= ~MA_STATE_REBALANCE; 2104 return; 2105 } 2106 } 2107 2108 /* 2109 * mas_store_b_node() - Store an @entry into the b_node while also copying the 2110 * data from a maple encoded node. 2111 * @wr_mas: the maple write state 2112 * @b_node: the maple_big_node to fill with data 2113 * @offset_end: the offset to end copying 2114 * 2115 * Return: The actual end of the data stored in @b_node 2116 */ 2117 static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas, 2118 struct maple_big_node *b_node, unsigned char offset_end) 2119 { 2120 unsigned char slot; 2121 unsigned char b_end; 2122 /* Possible underflow of piv will wrap back to 0 before use. */ 2123 unsigned long piv; 2124 struct ma_state *mas = wr_mas->mas; 2125 2126 b_node->type = wr_mas->type; 2127 b_end = 0; 2128 slot = mas->offset; 2129 if (slot) { 2130 /* Copy start data up to insert. */ 2131 mas_mab_cp(mas, 0, slot - 1, b_node, 0); 2132 b_end = b_node->b_end; 2133 piv = b_node->pivot[b_end - 1]; 2134 } else 2135 piv = mas->min - 1; 2136 2137 if (piv + 1 < mas->index) { 2138 /* Handle range starting after old range */ 2139 b_node->slot[b_end] = wr_mas->content; 2140 if (!wr_mas->content) 2141 b_node->gap[b_end] = mas->index - 1 - piv; 2142 b_node->pivot[b_end++] = mas->index - 1; 2143 } 2144 2145 /* Store the new entry. */ 2146 mas->offset = b_end; 2147 b_node->slot[b_end] = wr_mas->entry; 2148 b_node->pivot[b_end] = mas->last; 2149 2150 /* Appended. */ 2151 if (mas->last >= mas->max) 2152 goto b_end; 2153 2154 /* Handle new range ending before old range ends */ 2155 piv = mas_logical_pivot(mas, wr_mas->pivots, offset_end, wr_mas->type); 2156 if (piv > mas->last) { 2157 if (piv == ULONG_MAX) 2158 mas_bulk_rebalance(mas, b_node->b_end, wr_mas->type); 2159 2160 if (offset_end != slot) 2161 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 2162 offset_end); 2163 2164 b_node->slot[++b_end] = wr_mas->content; 2165 if (!wr_mas->content) 2166 b_node->gap[b_end] = piv - mas->last + 1; 2167 b_node->pivot[b_end] = piv; 2168 } 2169 2170 slot = offset_end + 1; 2171 if (slot > wr_mas->node_end) 2172 goto b_end; 2173 2174 /* Copy end data to the end of the node. */ 2175 mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end); 2176 b_node->b_end--; 2177 return; 2178 2179 b_end: 2180 b_node->b_end = b_end; 2181 } 2182 2183 /* 2184 * mas_prev_sibling() - Find the previous node with the same parent. 2185 * @mas: the maple state 2186 * 2187 * Return: True if there is a previous sibling, false otherwise. 2188 */ 2189 static inline bool mas_prev_sibling(struct ma_state *mas) 2190 { 2191 unsigned int p_slot = mte_parent_slot(mas->node); 2192 2193 if (mte_is_root(mas->node)) 2194 return false; 2195 2196 if (!p_slot) 2197 return false; 2198 2199 mas_ascend(mas); 2200 mas->offset = p_slot - 1; 2201 mas_descend(mas); 2202 return true; 2203 } 2204 2205 /* 2206 * mas_next_sibling() - Find the next node with the same parent. 2207 * @mas: the maple state 2208 * 2209 * Return: true if there is a next sibling, false otherwise. 2210 */ 2211 static inline bool mas_next_sibling(struct ma_state *mas) 2212 { 2213 MA_STATE(parent, mas->tree, mas->index, mas->last); 2214 2215 if (mte_is_root(mas->node)) 2216 return false; 2217 2218 parent = *mas; 2219 mas_ascend(&parent); 2220 parent.offset = mte_parent_slot(mas->node) + 1; 2221 if (parent.offset > mas_data_end(&parent)) 2222 return false; 2223 2224 *mas = parent; 2225 mas_descend(mas); 2226 return true; 2227 } 2228 2229 /* 2230 * mte_node_or_node() - Return the encoded node or MAS_NONE. 2231 * @enode: The encoded maple node. 2232 * 2233 * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state. 2234 * 2235 * Return: @enode or MAS_NONE 2236 */ 2237 static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode) 2238 { 2239 if (enode) 2240 return enode; 2241 2242 return ma_enode_ptr(MAS_NONE); 2243 } 2244 2245 /* 2246 * mas_wr_node_walk() - Find the correct offset for the index in the @mas. 2247 * @wr_mas: The maple write state 2248 * 2249 * Uses mas_slot_locked() and does not need to worry about dead nodes. 2250 */ 2251 static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas) 2252 { 2253 struct ma_state *mas = wr_mas->mas; 2254 unsigned char count; 2255 unsigned char offset; 2256 unsigned long index, min, max; 2257 2258 if (unlikely(ma_is_dense(wr_mas->type))) { 2259 wr_mas->r_max = wr_mas->r_min = mas->index; 2260 mas->offset = mas->index = mas->min; 2261 return; 2262 } 2263 2264 wr_mas->node = mas_mn(wr_mas->mas); 2265 wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type); 2266 count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type, 2267 wr_mas->pivots, mas->max); 2268 offset = mas->offset; 2269 min = mas_safe_min(mas, wr_mas->pivots, offset); 2270 if (unlikely(offset == count)) 2271 goto max; 2272 2273 max = wr_mas->pivots[offset]; 2274 index = mas->index; 2275 if (unlikely(index <= max)) 2276 goto done; 2277 2278 if (unlikely(!max && offset)) 2279 goto max; 2280 2281 min = max + 1; 2282 while (++offset < count) { 2283 max = wr_mas->pivots[offset]; 2284 if (index <= max) 2285 goto done; 2286 else if (unlikely(!max)) 2287 break; 2288 2289 min = max + 1; 2290 } 2291 2292 max: 2293 max = mas->max; 2294 done: 2295 wr_mas->r_max = max; 2296 wr_mas->r_min = min; 2297 wr_mas->offset_end = mas->offset = offset; 2298 } 2299 2300 /* 2301 * mas_topiary_range() - Add a range of slots to the topiary. 2302 * @mas: The maple state 2303 * @destroy: The topiary to add the slots (usually destroy) 2304 * @start: The starting slot inclusively 2305 * @end: The end slot inclusively 2306 */ 2307 static inline void mas_topiary_range(struct ma_state *mas, 2308 struct ma_topiary *destroy, unsigned char start, unsigned char end) 2309 { 2310 void __rcu **slots; 2311 unsigned char offset; 2312 2313 MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); 2314 slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); 2315 for (offset = start; offset <= end; offset++) { 2316 struct maple_enode *enode = mas_slot_locked(mas, slots, offset); 2317 2318 if (mte_dead_node(enode)) 2319 continue; 2320 2321 mat_add(destroy, enode); 2322 } 2323 } 2324 2325 /* 2326 * mast_topiary() - Add the portions of the tree to the removal list; either to 2327 * be freed or discarded (destroy walk). 2328 * @mast: The maple_subtree_state. 2329 */ 2330 static inline void mast_topiary(struct maple_subtree_state *mast) 2331 { 2332 MA_WR_STATE(wr_mas, mast->orig_l, NULL); 2333 unsigned char r_start, r_end; 2334 unsigned char l_start, l_end; 2335 void __rcu **l_slots, **r_slots; 2336 2337 wr_mas.type = mte_node_type(mast->orig_l->node); 2338 mast->orig_l->index = mast->orig_l->last; 2339 mas_wr_node_walk(&wr_mas); 2340 l_start = mast->orig_l->offset + 1; 2341 l_end = mas_data_end(mast->orig_l); 2342 r_start = 0; 2343 r_end = mast->orig_r->offset; 2344 2345 if (r_end) 2346 r_end--; 2347 2348 l_slots = ma_slots(mas_mn(mast->orig_l), 2349 mte_node_type(mast->orig_l->node)); 2350 2351 r_slots = ma_slots(mas_mn(mast->orig_r), 2352 mte_node_type(mast->orig_r->node)); 2353 2354 if ((l_start < l_end) && 2355 mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_start))) { 2356 l_start++; 2357 } 2358 2359 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_end))) { 2360 if (r_end) 2361 r_end--; 2362 } 2363 2364 if ((l_start > r_end) && (mast->orig_l->node == mast->orig_r->node)) 2365 return; 2366 2367 /* At the node where left and right sides meet, add the parts between */ 2368 if (mast->orig_l->node == mast->orig_r->node) { 2369 return mas_topiary_range(mast->orig_l, mast->destroy, 2370 l_start, r_end); 2371 } 2372 2373 /* mast->orig_r is different and consumed. */ 2374 if (mte_is_leaf(mast->orig_r->node)) 2375 return; 2376 2377 if (mte_dead_node(mas_slot_locked(mast->orig_l, l_slots, l_end))) 2378 l_end--; 2379 2380 2381 if (l_start <= l_end) 2382 mas_topiary_range(mast->orig_l, mast->destroy, l_start, l_end); 2383 2384 if (mte_dead_node(mas_slot_locked(mast->orig_r, r_slots, r_start))) 2385 r_start++; 2386 2387 if (r_start <= r_end) 2388 mas_topiary_range(mast->orig_r, mast->destroy, 0, r_end); 2389 } 2390 2391 /* 2392 * mast_rebalance_next() - Rebalance against the next node 2393 * @mast: The maple subtree state 2394 * @old_r: The encoded maple node to the right (next node). 2395 */ 2396 static inline void mast_rebalance_next(struct maple_subtree_state *mast) 2397 { 2398 unsigned char b_end = mast->bn->b_end; 2399 2400 mas_mab_cp(mast->orig_r, 0, mt_slot_count(mast->orig_r->node), 2401 mast->bn, b_end); 2402 mast->orig_r->last = mast->orig_r->max; 2403 } 2404 2405 /* 2406 * mast_rebalance_prev() - Rebalance against the previous node 2407 * @mast: The maple subtree state 2408 * @old_l: The encoded maple node to the left (previous node) 2409 */ 2410 static inline void mast_rebalance_prev(struct maple_subtree_state *mast) 2411 { 2412 unsigned char end = mas_data_end(mast->orig_l) + 1; 2413 unsigned char b_end = mast->bn->b_end; 2414 2415 mab_shift_right(mast->bn, end); 2416 mas_mab_cp(mast->orig_l, 0, end - 1, mast->bn, 0); 2417 mast->l->min = mast->orig_l->min; 2418 mast->orig_l->index = mast->orig_l->min; 2419 mast->bn->b_end = end + b_end; 2420 mast->l->offset += end; 2421 } 2422 2423 /* 2424 * mast_spanning_rebalance() - Rebalance nodes with nearest neighbour favouring 2425 * the node to the right. Checking the nodes to the right then the left at each 2426 * level upwards until root is reached. Free and destroy as needed. 2427 * Data is copied into the @mast->bn. 2428 * @mast: The maple_subtree_state. 2429 */ 2430 static inline 2431 bool mast_spanning_rebalance(struct maple_subtree_state *mast) 2432 { 2433 struct ma_state r_tmp = *mast->orig_r; 2434 struct ma_state l_tmp = *mast->orig_l; 2435 struct maple_enode *ancestor = NULL; 2436 unsigned char start, end; 2437 unsigned char depth = 0; 2438 2439 r_tmp = *mast->orig_r; 2440 l_tmp = *mast->orig_l; 2441 do { 2442 mas_ascend(mast->orig_r); 2443 mas_ascend(mast->orig_l); 2444 depth++; 2445 if (!ancestor && 2446 (mast->orig_r->node == mast->orig_l->node)) { 2447 ancestor = mast->orig_r->node; 2448 end = mast->orig_r->offset - 1; 2449 start = mast->orig_l->offset + 1; 2450 } 2451 2452 if (mast->orig_r->offset < mas_data_end(mast->orig_r)) { 2453 if (!ancestor) { 2454 ancestor = mast->orig_r->node; 2455 start = 0; 2456 } 2457 2458 mast->orig_r->offset++; 2459 do { 2460 mas_descend(mast->orig_r); 2461 mast->orig_r->offset = 0; 2462 depth--; 2463 } while (depth); 2464 2465 mast_rebalance_next(mast); 2466 do { 2467 unsigned char l_off = 0; 2468 struct maple_enode *child = r_tmp.node; 2469 2470 mas_ascend(&r_tmp); 2471 if (ancestor == r_tmp.node) 2472 l_off = start; 2473 2474 if (r_tmp.offset) 2475 r_tmp.offset--; 2476 2477 if (l_off < r_tmp.offset) 2478 mas_topiary_range(&r_tmp, mast->destroy, 2479 l_off, r_tmp.offset); 2480 2481 if (l_tmp.node != child) 2482 mat_add(mast->free, child); 2483 2484 } while (r_tmp.node != ancestor); 2485 2486 *mast->orig_l = l_tmp; 2487 return true; 2488 2489 } else if (mast->orig_l->offset != 0) { 2490 if (!ancestor) { 2491 ancestor = mast->orig_l->node; 2492 end = mas_data_end(mast->orig_l); 2493 } 2494 2495 mast->orig_l->offset--; 2496 do { 2497 mas_descend(mast->orig_l); 2498 mast->orig_l->offset = 2499 mas_data_end(mast->orig_l); 2500 depth--; 2501 } while (depth); 2502 2503 mast_rebalance_prev(mast); 2504 do { 2505 unsigned char r_off; 2506 struct maple_enode *child = l_tmp.node; 2507 2508 mas_ascend(&l_tmp); 2509 if (ancestor == l_tmp.node) 2510 r_off = end; 2511 else 2512 r_off = mas_data_end(&l_tmp); 2513 2514 if (l_tmp.offset < r_off) 2515 l_tmp.offset++; 2516 2517 if (l_tmp.offset < r_off) 2518 mas_topiary_range(&l_tmp, mast->destroy, 2519 l_tmp.offset, r_off); 2520 2521 if (r_tmp.node != child) 2522 mat_add(mast->free, child); 2523 2524 } while (l_tmp.node != ancestor); 2525 2526 *mast->orig_r = r_tmp; 2527 return true; 2528 } 2529 } while (!mte_is_root(mast->orig_r->node)); 2530 2531 *mast->orig_r = r_tmp; 2532 *mast->orig_l = l_tmp; 2533 return false; 2534 } 2535 2536 /* 2537 * mast_ascend_free() - Add current original maple state nodes to the free list 2538 * and ascend. 2539 * @mast: the maple subtree state. 2540 * 2541 * Ascend the original left and right sides and add the previous nodes to the 2542 * free list. Set the slots to point to the correct location in the new nodes. 2543 */ 2544 static inline void 2545 mast_ascend_free(struct maple_subtree_state *mast) 2546 { 2547 MA_WR_STATE(wr_mas, mast->orig_r, NULL); 2548 struct maple_enode *left = mast->orig_l->node; 2549 struct maple_enode *right = mast->orig_r->node; 2550 2551 mas_ascend(mast->orig_l); 2552 mas_ascend(mast->orig_r); 2553 mat_add(mast->free, left); 2554 2555 if (left != right) 2556 mat_add(mast->free, right); 2557 2558 mast->orig_r->offset = 0; 2559 mast->orig_r->index = mast->r->max; 2560 /* last should be larger than or equal to index */ 2561 if (mast->orig_r->last < mast->orig_r->index) 2562 mast->orig_r->last = mast->orig_r->index; 2563 /* 2564 * The node may not contain the value so set slot to ensure all 2565 * of the nodes contents are freed or destroyed. 2566 */ 2567 wr_mas.type = mte_node_type(mast->orig_r->node); 2568 mas_wr_node_walk(&wr_mas); 2569 /* Set up the left side of things */ 2570 mast->orig_l->offset = 0; 2571 mast->orig_l->index = mast->l->min; 2572 wr_mas.mas = mast->orig_l; 2573 wr_mas.type = mte_node_type(mast->orig_l->node); 2574 mas_wr_node_walk(&wr_mas); 2575 2576 mast->bn->type = wr_mas.type; 2577 } 2578 2579 /* 2580 * mas_new_ma_node() - Create and return a new maple node. Helper function. 2581 * @mas: the maple state with the allocations. 2582 * @b_node: the maple_big_node with the type encoding. 2583 * 2584 * Use the node type from the maple_big_node to allocate a new node from the 2585 * ma_state. This function exists mainly for code readability. 2586 * 2587 * Return: A new maple encoded node 2588 */ 2589 static inline struct maple_enode 2590 *mas_new_ma_node(struct ma_state *mas, struct maple_big_node *b_node) 2591 { 2592 return mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), b_node->type); 2593 } 2594 2595 /* 2596 * mas_mab_to_node() - Set up right and middle nodes 2597 * 2598 * @mas: the maple state that contains the allocations. 2599 * @b_node: the node which contains the data. 2600 * @left: The pointer which will have the left node 2601 * @right: The pointer which may have the right node 2602 * @middle: the pointer which may have the middle node (rare) 2603 * @mid_split: the split location for the middle node 2604 * 2605 * Return: the split of left. 2606 */ 2607 static inline unsigned char mas_mab_to_node(struct ma_state *mas, 2608 struct maple_big_node *b_node, struct maple_enode **left, 2609 struct maple_enode **right, struct maple_enode **middle, 2610 unsigned char *mid_split, unsigned long min) 2611 { 2612 unsigned char split = 0; 2613 unsigned char slot_count = mt_slots[b_node->type]; 2614 2615 *left = mas_new_ma_node(mas, b_node); 2616 *right = NULL; 2617 *middle = NULL; 2618 *mid_split = 0; 2619 2620 if (b_node->b_end < slot_count) { 2621 split = b_node->b_end; 2622 } else { 2623 split = mab_calc_split(mas, b_node, mid_split, min); 2624 *right = mas_new_ma_node(mas, b_node); 2625 } 2626 2627 if (*mid_split) 2628 *middle = mas_new_ma_node(mas, b_node); 2629 2630 return split; 2631 2632 } 2633 2634 /* 2635 * mab_set_b_end() - Add entry to b_node at b_node->b_end and increment the end 2636 * pointer. 2637 * @b_node - the big node to add the entry 2638 * @mas - the maple state to get the pivot (mas->max) 2639 * @entry - the entry to add, if NULL nothing happens. 2640 */ 2641 static inline void mab_set_b_end(struct maple_big_node *b_node, 2642 struct ma_state *mas, 2643 void *entry) 2644 { 2645 if (!entry) 2646 return; 2647 2648 b_node->slot[b_node->b_end] = entry; 2649 if (mt_is_alloc(mas->tree)) 2650 b_node->gap[b_node->b_end] = mas_max_gap(mas); 2651 b_node->pivot[b_node->b_end++] = mas->max; 2652 } 2653 2654 /* 2655 * mas_set_split_parent() - combine_then_separate helper function. Sets the parent 2656 * of @mas->node to either @left or @right, depending on @slot and @split 2657 * 2658 * @mas - the maple state with the node that needs a parent 2659 * @left - possible parent 1 2660 * @right - possible parent 2 2661 * @slot - the slot the mas->node was placed 2662 * @split - the split location between @left and @right 2663 */ 2664 static inline void mas_set_split_parent(struct ma_state *mas, 2665 struct maple_enode *left, 2666 struct maple_enode *right, 2667 unsigned char *slot, unsigned char split) 2668 { 2669 if (mas_is_none(mas)) 2670 return; 2671 2672 if ((*slot) <= split) 2673 mte_set_parent(mas->node, left, *slot); 2674 else if (right) 2675 mte_set_parent(mas->node, right, (*slot) - split - 1); 2676 2677 (*slot)++; 2678 } 2679 2680 /* 2681 * mte_mid_split_check() - Check if the next node passes the mid-split 2682 * @**l: Pointer to left encoded maple node. 2683 * @**m: Pointer to middle encoded maple node. 2684 * @**r: Pointer to right encoded maple node. 2685 * @slot: The offset 2686 * @*split: The split location. 2687 * @mid_split: The middle split. 2688 */ 2689 static inline void mte_mid_split_check(struct maple_enode **l, 2690 struct maple_enode **r, 2691 struct maple_enode *right, 2692 unsigned char slot, 2693 unsigned char *split, 2694 unsigned char mid_split) 2695 { 2696 if (*r == right) 2697 return; 2698 2699 if (slot < mid_split) 2700 return; 2701 2702 *l = *r; 2703 *r = right; 2704 *split = mid_split; 2705 } 2706 2707 /* 2708 * mast_set_split_parents() - Helper function to set three nodes parents. Slot 2709 * is taken from @mast->l. 2710 * @mast - the maple subtree state 2711 * @left - the left node 2712 * @right - the right node 2713 * @split - the split location. 2714 */ 2715 static inline void mast_set_split_parents(struct maple_subtree_state *mast, 2716 struct maple_enode *left, 2717 struct maple_enode *middle, 2718 struct maple_enode *right, 2719 unsigned char split, 2720 unsigned char mid_split) 2721 { 2722 unsigned char slot; 2723 struct maple_enode *l = left; 2724 struct maple_enode *r = right; 2725 2726 if (mas_is_none(mast->l)) 2727 return; 2728 2729 if (middle) 2730 r = middle; 2731 2732 slot = mast->l->offset; 2733 2734 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2735 mas_set_split_parent(mast->l, l, r, &slot, split); 2736 2737 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2738 mas_set_split_parent(mast->m, l, r, &slot, split); 2739 2740 mte_mid_split_check(&l, &r, right, slot, &split, mid_split); 2741 mas_set_split_parent(mast->r, l, r, &slot, split); 2742 } 2743 2744 /* 2745 * mas_wmb_replace() - Write memory barrier and replace 2746 * @mas: The maple state 2747 * @free: the maple topiary list of nodes to free 2748 * @destroy: The maple topiary list of nodes to destroy (walk and free) 2749 * 2750 * Updates gap as necessary. 2751 */ 2752 static inline void mas_wmb_replace(struct ma_state *mas, 2753 struct ma_topiary *free, 2754 struct ma_topiary *destroy) 2755 { 2756 /* All nodes must see old data as dead prior to replacing that data */ 2757 smp_wmb(); /* Needed for RCU */ 2758 2759 /* Insert the new data in the tree */ 2760 mas_replace(mas, true); 2761 2762 if (!mte_is_leaf(mas->node)) 2763 mas_descend_adopt(mas); 2764 2765 mas_mat_free(mas, free); 2766 2767 if (destroy) 2768 mas_mat_destroy(mas, destroy); 2769 2770 if (mte_is_leaf(mas->node)) 2771 return; 2772 2773 mas_update_gap(mas); 2774 } 2775 2776 /* 2777 * mast_new_root() - Set a new tree root during subtree creation 2778 * @mast: The maple subtree state 2779 * @mas: The maple state 2780 */ 2781 static inline void mast_new_root(struct maple_subtree_state *mast, 2782 struct ma_state *mas) 2783 { 2784 mas_mn(mast->l)->parent = 2785 ma_parent_ptr(((unsigned long)mas->tree | MA_ROOT_PARENT)); 2786 if (!mte_dead_node(mast->orig_l->node) && 2787 !mte_is_root(mast->orig_l->node)) { 2788 do { 2789 mast_ascend_free(mast); 2790 mast_topiary(mast); 2791 } while (!mte_is_root(mast->orig_l->node)); 2792 } 2793 if ((mast->orig_l->node != mas->node) && 2794 (mast->l->depth > mas_mt_height(mas))) { 2795 mat_add(mast->free, mas->node); 2796 } 2797 } 2798 2799 /* 2800 * mast_cp_to_nodes() - Copy data out to nodes. 2801 * @mast: The maple subtree state 2802 * @left: The left encoded maple node 2803 * @middle: The middle encoded maple node 2804 * @right: The right encoded maple node 2805 * @split: The location to split between left and (middle ? middle : right) 2806 * @mid_split: The location to split between middle and right. 2807 */ 2808 static inline void mast_cp_to_nodes(struct maple_subtree_state *mast, 2809 struct maple_enode *left, struct maple_enode *middle, 2810 struct maple_enode *right, unsigned char split, unsigned char mid_split) 2811 { 2812 bool new_lmax = true; 2813 2814 mast->l->node = mte_node_or_none(left); 2815 mast->m->node = mte_node_or_none(middle); 2816 mast->r->node = mte_node_or_none(right); 2817 2818 mast->l->min = mast->orig_l->min; 2819 if (split == mast->bn->b_end) { 2820 mast->l->max = mast->orig_r->max; 2821 new_lmax = false; 2822 } 2823 2824 mab_mas_cp(mast->bn, 0, split, mast->l, new_lmax); 2825 2826 if (middle) { 2827 mab_mas_cp(mast->bn, 1 + split, mid_split, mast->m, true); 2828 mast->m->min = mast->bn->pivot[split] + 1; 2829 split = mid_split; 2830 } 2831 2832 mast->r->max = mast->orig_r->max; 2833 if (right) { 2834 mab_mas_cp(mast->bn, 1 + split, mast->bn->b_end, mast->r, false); 2835 mast->r->min = mast->bn->pivot[split] + 1; 2836 } 2837 } 2838 2839 /* 2840 * mast_combine_cp_left - Copy in the original left side of the tree into the 2841 * combined data set in the maple subtree state big node. 2842 * @mast: The maple subtree state 2843 */ 2844 static inline void mast_combine_cp_left(struct maple_subtree_state *mast) 2845 { 2846 unsigned char l_slot = mast->orig_l->offset; 2847 2848 if (!l_slot) 2849 return; 2850 2851 mas_mab_cp(mast->orig_l, 0, l_slot - 1, mast->bn, 0); 2852 } 2853 2854 /* 2855 * mast_combine_cp_right: Copy in the original right side of the tree into the 2856 * combined data set in the maple subtree state big node. 2857 * @mast: The maple subtree state 2858 */ 2859 static inline void mast_combine_cp_right(struct maple_subtree_state *mast) 2860 { 2861 if (mast->bn->pivot[mast->bn->b_end - 1] >= mast->orig_r->max) 2862 return; 2863 2864 mas_mab_cp(mast->orig_r, mast->orig_r->offset + 1, 2865 mt_slot_count(mast->orig_r->node), mast->bn, 2866 mast->bn->b_end); 2867 mast->orig_r->last = mast->orig_r->max; 2868 } 2869 2870 /* 2871 * mast_sufficient: Check if the maple subtree state has enough data in the big 2872 * node to create at least one sufficient node 2873 * @mast: the maple subtree state 2874 */ 2875 static inline bool mast_sufficient(struct maple_subtree_state *mast) 2876 { 2877 if (mast->bn->b_end > mt_min_slot_count(mast->orig_l->node)) 2878 return true; 2879 2880 return false; 2881 } 2882 2883 /* 2884 * mast_overflow: Check if there is too much data in the subtree state for a 2885 * single node. 2886 * @mast: The maple subtree state 2887 */ 2888 static inline bool mast_overflow(struct maple_subtree_state *mast) 2889 { 2890 if (mast->bn->b_end >= mt_slot_count(mast->orig_l->node)) 2891 return true; 2892 2893 return false; 2894 } 2895 2896 static inline void *mtree_range_walk(struct ma_state *mas) 2897 { 2898 unsigned long *pivots; 2899 unsigned char offset; 2900 struct maple_node *node; 2901 struct maple_enode *next, *last; 2902 enum maple_type type; 2903 void __rcu **slots; 2904 unsigned char end; 2905 unsigned long max, min; 2906 unsigned long prev_max, prev_min; 2907 2908 next = mas->node; 2909 min = mas->min; 2910 max = mas->max; 2911 do { 2912 offset = 0; 2913 last = next; 2914 node = mte_to_node(next); 2915 type = mte_node_type(next); 2916 pivots = ma_pivots(node, type); 2917 end = ma_data_end(node, type, pivots, max); 2918 if (unlikely(ma_dead_node(node))) 2919 goto dead_node; 2920 2921 if (pivots[offset] >= mas->index) { 2922 prev_max = max; 2923 prev_min = min; 2924 max = pivots[offset]; 2925 goto next; 2926 } 2927 2928 do { 2929 offset++; 2930 } while ((offset < end) && (pivots[offset] < mas->index)); 2931 2932 prev_min = min; 2933 min = pivots[offset - 1] + 1; 2934 prev_max = max; 2935 if (likely(offset < end && pivots[offset])) 2936 max = pivots[offset]; 2937 2938 next: 2939 slots = ma_slots(node, type); 2940 next = mt_slot(mas->tree, slots, offset); 2941 if (unlikely(ma_dead_node(node))) 2942 goto dead_node; 2943 } while (!ma_is_leaf(type)); 2944 2945 mas->offset = offset; 2946 mas->index = min; 2947 mas->last = max; 2948 mas->min = prev_min; 2949 mas->max = prev_max; 2950 mas->node = last; 2951 return (void *)next; 2952 2953 dead_node: 2954 mas_reset(mas); 2955 return NULL; 2956 } 2957 2958 /* 2959 * mas_spanning_rebalance() - Rebalance across two nodes which may not be peers. 2960 * @mas: The starting maple state 2961 * @mast: The maple_subtree_state, keeps track of 4 maple states. 2962 * @count: The estimated count of iterations needed. 2963 * 2964 * Follow the tree upwards from @l_mas and @r_mas for @count, or until the root 2965 * is hit. First @b_node is split into two entries which are inserted into the 2966 * next iteration of the loop. @b_node is returned populated with the final 2967 * iteration. @mas is used to obtain allocations. orig_l_mas keeps track of the 2968 * nodes that will remain active by using orig_l_mas->index and orig_l_mas->last 2969 * to account of what has been copied into the new sub-tree. The update of 2970 * orig_l_mas->last is used in mas_consume to find the slots that will need to 2971 * be either freed or destroyed. orig_l_mas->depth keeps track of the height of 2972 * the new sub-tree in case the sub-tree becomes the full tree. 2973 * 2974 * Return: the number of elements in b_node during the last loop. 2975 */ 2976 static int mas_spanning_rebalance(struct ma_state *mas, 2977 struct maple_subtree_state *mast, unsigned char count) 2978 { 2979 unsigned char split, mid_split; 2980 unsigned char slot = 0; 2981 struct maple_enode *left = NULL, *middle = NULL, *right = NULL; 2982 2983 MA_STATE(l_mas, mas->tree, mas->index, mas->index); 2984 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 2985 MA_STATE(m_mas, mas->tree, mas->index, mas->index); 2986 MA_TOPIARY(free, mas->tree); 2987 MA_TOPIARY(destroy, mas->tree); 2988 2989 /* 2990 * The tree needs to be rebalanced and leaves need to be kept at the same level. 2991 * Rebalancing is done by use of the ``struct maple_topiary``. 2992 */ 2993 mast->l = &l_mas; 2994 mast->m = &m_mas; 2995 mast->r = &r_mas; 2996 mast->free = &free; 2997 mast->destroy = &destroy; 2998 l_mas.node = r_mas.node = m_mas.node = MAS_NONE; 2999 3000 /* Check if this is not root and has sufficient data. */ 3001 if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) && 3002 unlikely(mast->bn->b_end <= mt_min_slots[mast->bn->type])) 3003 mast_spanning_rebalance(mast); 3004 3005 mast->orig_l->depth = 0; 3006 3007 /* 3008 * Each level of the tree is examined and balanced, pushing data to the left or 3009 * right, or rebalancing against left or right nodes is employed to avoid 3010 * rippling up the tree to limit the amount of churn. Once a new sub-section of 3011 * the tree is created, there may be a mix of new and old nodes. The old nodes 3012 * will have the incorrect parent pointers and currently be in two trees: the 3013 * original tree and the partially new tree. To remedy the parent pointers in 3014 * the old tree, the new data is swapped into the active tree and a walk down 3015 * the tree is performed and the parent pointers are updated. 3016 * See mas_descend_adopt() for more information.. 3017 */ 3018 while (count--) { 3019 mast->bn->b_end--; 3020 mast->bn->type = mte_node_type(mast->orig_l->node); 3021 split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, 3022 &mid_split, mast->orig_l->min); 3023 mast_set_split_parents(mast, left, middle, right, split, 3024 mid_split); 3025 mast_cp_to_nodes(mast, left, middle, right, split, mid_split); 3026 3027 /* 3028 * Copy data from next level in the tree to mast->bn from next 3029 * iteration 3030 */ 3031 memset(mast->bn, 0, sizeof(struct maple_big_node)); 3032 mast->bn->type = mte_node_type(left); 3033 mast->orig_l->depth++; 3034 3035 /* Root already stored in l->node. */ 3036 if (mas_is_root_limits(mast->l)) 3037 goto new_root; 3038 3039 mast_ascend_free(mast); 3040 mast_combine_cp_left(mast); 3041 l_mas.offset = mast->bn->b_end; 3042 mab_set_b_end(mast->bn, &l_mas, left); 3043 mab_set_b_end(mast->bn, &m_mas, middle); 3044 mab_set_b_end(mast->bn, &r_mas, right); 3045 3046 /* Copy anything necessary out of the right node. */ 3047 mast_combine_cp_right(mast); 3048 mast_topiary(mast); 3049 mast->orig_l->last = mast->orig_l->max; 3050 3051 if (mast_sufficient(mast)) 3052 continue; 3053 3054 if (mast_overflow(mast)) 3055 continue; 3056 3057 /* May be a new root stored in mast->bn */ 3058 if (mas_is_root_limits(mast->orig_l)) 3059 break; 3060 3061 mast_spanning_rebalance(mast); 3062 3063 /* rebalancing from other nodes may require another loop. */ 3064 if (!count) 3065 count++; 3066 } 3067 3068 l_mas.node = mt_mk_node(ma_mnode_ptr(mas_pop_node(mas)), 3069 mte_node_type(mast->orig_l->node)); 3070 mast->orig_l->depth++; 3071 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); 3072 mte_set_parent(left, l_mas.node, slot); 3073 if (middle) 3074 mte_set_parent(middle, l_mas.node, ++slot); 3075 3076 if (right) 3077 mte_set_parent(right, l_mas.node, ++slot); 3078 3079 if (mas_is_root_limits(mast->l)) { 3080 new_root: 3081 mast_new_root(mast, mas); 3082 } else { 3083 mas_mn(&l_mas)->parent = mas_mn(mast->orig_l)->parent; 3084 } 3085 3086 if (!mte_dead_node(mast->orig_l->node)) 3087 mat_add(&free, mast->orig_l->node); 3088 3089 mas->depth = mast->orig_l->depth; 3090 *mast->orig_l = l_mas; 3091 mte_set_node_dead(mas->node); 3092 3093 /* Set up mas for insertion. */ 3094 mast->orig_l->depth = mas->depth; 3095 mast->orig_l->alloc = mas->alloc; 3096 *mas = *mast->orig_l; 3097 mas_wmb_replace(mas, &free, &destroy); 3098 mtree_range_walk(mas); 3099 return mast->bn->b_end; 3100 } 3101 3102 /* 3103 * mas_rebalance() - Rebalance a given node. 3104 * @mas: The maple state 3105 * @b_node: The big maple node. 3106 * 3107 * Rebalance two nodes into a single node or two new nodes that are sufficient. 3108 * Continue upwards until tree is sufficient. 3109 * 3110 * Return: the number of elements in b_node during the last loop. 3111 */ 3112 static inline int mas_rebalance(struct ma_state *mas, 3113 struct maple_big_node *b_node) 3114 { 3115 char empty_count = mas_mt_height(mas); 3116 struct maple_subtree_state mast; 3117 unsigned char shift, b_end = ++b_node->b_end; 3118 3119 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3120 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3121 3122 trace_ma_op(__func__, mas); 3123 3124 /* 3125 * Rebalancing occurs if a node is insufficient. Data is rebalanced 3126 * against the node to the right if it exists, otherwise the node to the 3127 * left of this node is rebalanced against this node. If rebalancing 3128 * causes just one node to be produced instead of two, then the parent 3129 * is also examined and rebalanced if it is insufficient. Every level 3130 * tries to combine the data in the same way. If one node contains the 3131 * entire range of the tree, then that node is used as a new root node. 3132 */ 3133 mas_node_count(mas, 1 + empty_count * 3); 3134 if (mas_is_err(mas)) 3135 return 0; 3136 3137 mast.orig_l = &l_mas; 3138 mast.orig_r = &r_mas; 3139 mast.bn = b_node; 3140 mast.bn->type = mte_node_type(mas->node); 3141 3142 l_mas = r_mas = *mas; 3143 3144 if (mas_next_sibling(&r_mas)) { 3145 mas_mab_cp(&r_mas, 0, mt_slot_count(r_mas.node), b_node, b_end); 3146 r_mas.last = r_mas.index = r_mas.max; 3147 } else { 3148 mas_prev_sibling(&l_mas); 3149 shift = mas_data_end(&l_mas) + 1; 3150 mab_shift_right(b_node, shift); 3151 mas->offset += shift; 3152 mas_mab_cp(&l_mas, 0, shift - 1, b_node, 0); 3153 b_node->b_end = shift + b_end; 3154 l_mas.index = l_mas.last = l_mas.min; 3155 } 3156 3157 return mas_spanning_rebalance(mas, &mast, empty_count); 3158 } 3159 3160 /* 3161 * mas_destroy_rebalance() - Rebalance left-most node while destroying the maple 3162 * state. 3163 * @mas: The maple state 3164 * @end: The end of the left-most node. 3165 * 3166 * During a mass-insert event (such as forking), it may be necessary to 3167 * rebalance the left-most node when it is not sufficient. 3168 */ 3169 static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end) 3170 { 3171 enum maple_type mt = mte_node_type(mas->node); 3172 struct maple_node reuse, *newnode, *parent, *new_left, *left, *node; 3173 struct maple_enode *eparent; 3174 unsigned char offset, tmp, split = mt_slots[mt] / 2; 3175 void __rcu **l_slots, **slots; 3176 unsigned long *l_pivs, *pivs, gap; 3177 bool in_rcu = mt_in_rcu(mas->tree); 3178 3179 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3180 3181 l_mas = *mas; 3182 mas_prev_sibling(&l_mas); 3183 3184 /* set up node. */ 3185 if (in_rcu) { 3186 /* Allocate for both left and right as well as parent. */ 3187 mas_node_count(mas, 3); 3188 if (mas_is_err(mas)) 3189 return; 3190 3191 newnode = mas_pop_node(mas); 3192 } else { 3193 newnode = &reuse; 3194 } 3195 3196 node = mas_mn(mas); 3197 newnode->parent = node->parent; 3198 slots = ma_slots(newnode, mt); 3199 pivs = ma_pivots(newnode, mt); 3200 left = mas_mn(&l_mas); 3201 l_slots = ma_slots(left, mt); 3202 l_pivs = ma_pivots(left, mt); 3203 if (!l_slots[split]) 3204 split++; 3205 tmp = mas_data_end(&l_mas) - split; 3206 3207 memcpy(slots, l_slots + split + 1, sizeof(void *) * tmp); 3208 memcpy(pivs, l_pivs + split + 1, sizeof(unsigned long) * tmp); 3209 pivs[tmp] = l_mas.max; 3210 memcpy(slots + tmp, ma_slots(node, mt), sizeof(void *) * end); 3211 memcpy(pivs + tmp, ma_pivots(node, mt), sizeof(unsigned long) * end); 3212 3213 l_mas.max = l_pivs[split]; 3214 mas->min = l_mas.max + 1; 3215 eparent = mt_mk_node(mte_parent(l_mas.node), 3216 mas_parent_enum(&l_mas, l_mas.node)); 3217 tmp += end; 3218 if (!in_rcu) { 3219 unsigned char max_p = mt_pivots[mt]; 3220 unsigned char max_s = mt_slots[mt]; 3221 3222 if (tmp < max_p) 3223 memset(pivs + tmp, 0, 3224 sizeof(unsigned long *) * (max_p - tmp)); 3225 3226 if (tmp < mt_slots[mt]) 3227 memset(slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3228 3229 memcpy(node, newnode, sizeof(struct maple_node)); 3230 ma_set_meta(node, mt, 0, tmp - 1); 3231 mte_set_pivot(eparent, mte_parent_slot(l_mas.node), 3232 l_pivs[split]); 3233 3234 /* Remove data from l_pivs. */ 3235 tmp = split + 1; 3236 memset(l_pivs + tmp, 0, sizeof(unsigned long) * (max_p - tmp)); 3237 memset(l_slots + tmp, 0, sizeof(void *) * (max_s - tmp)); 3238 ma_set_meta(left, mt, 0, split); 3239 3240 goto done; 3241 } 3242 3243 /* RCU requires replacing both l_mas, mas, and parent. */ 3244 mas->node = mt_mk_node(newnode, mt); 3245 ma_set_meta(newnode, mt, 0, tmp); 3246 3247 new_left = mas_pop_node(mas); 3248 new_left->parent = left->parent; 3249 mt = mte_node_type(l_mas.node); 3250 slots = ma_slots(new_left, mt); 3251 pivs = ma_pivots(new_left, mt); 3252 memcpy(slots, l_slots, sizeof(void *) * split); 3253 memcpy(pivs, l_pivs, sizeof(unsigned long) * split); 3254 ma_set_meta(new_left, mt, 0, split); 3255 l_mas.node = mt_mk_node(new_left, mt); 3256 3257 /* replace parent. */ 3258 offset = mte_parent_slot(mas->node); 3259 mt = mas_parent_enum(&l_mas, l_mas.node); 3260 parent = mas_pop_node(mas); 3261 slots = ma_slots(parent, mt); 3262 pivs = ma_pivots(parent, mt); 3263 memcpy(parent, mte_to_node(eparent), sizeof(struct maple_node)); 3264 rcu_assign_pointer(slots[offset], mas->node); 3265 rcu_assign_pointer(slots[offset - 1], l_mas.node); 3266 pivs[offset - 1] = l_mas.max; 3267 eparent = mt_mk_node(parent, mt); 3268 done: 3269 gap = mas_leaf_max_gap(mas); 3270 mte_set_gap(eparent, mte_parent_slot(mas->node), gap); 3271 gap = mas_leaf_max_gap(&l_mas); 3272 mte_set_gap(eparent, mte_parent_slot(l_mas.node), gap); 3273 mas_ascend(mas); 3274 3275 if (in_rcu) 3276 mas_replace(mas, false); 3277 3278 mas_update_gap(mas); 3279 } 3280 3281 /* 3282 * mas_split_final_node() - Split the final node in a subtree operation. 3283 * @mast: the maple subtree state 3284 * @mas: The maple state 3285 * @height: The height of the tree in case it's a new root. 3286 */ 3287 static inline bool mas_split_final_node(struct maple_subtree_state *mast, 3288 struct ma_state *mas, int height) 3289 { 3290 struct maple_enode *ancestor; 3291 3292 if (mte_is_root(mas->node)) { 3293 if (mt_is_alloc(mas->tree)) 3294 mast->bn->type = maple_arange_64; 3295 else 3296 mast->bn->type = maple_range_64; 3297 mas->depth = height; 3298 } 3299 /* 3300 * Only a single node is used here, could be root. 3301 * The Big_node data should just fit in a single node. 3302 */ 3303 ancestor = mas_new_ma_node(mas, mast->bn); 3304 mte_set_parent(mast->l->node, ancestor, mast->l->offset); 3305 mte_set_parent(mast->r->node, ancestor, mast->r->offset); 3306 mte_to_node(ancestor)->parent = mas_mn(mas)->parent; 3307 3308 mast->l->node = ancestor; 3309 mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true); 3310 mas->offset = mast->bn->b_end - 1; 3311 return true; 3312 } 3313 3314 /* 3315 * mast_fill_bnode() - Copy data into the big node in the subtree state 3316 * @mast: The maple subtree state 3317 * @mas: the maple state 3318 * @skip: The number of entries to skip for new nodes insertion. 3319 */ 3320 static inline void mast_fill_bnode(struct maple_subtree_state *mast, 3321 struct ma_state *mas, 3322 unsigned char skip) 3323 { 3324 bool cp = true; 3325 struct maple_enode *old = mas->node; 3326 unsigned char split; 3327 3328 memset(mast->bn->gap, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->gap)); 3329 memset(mast->bn->slot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->slot)); 3330 memset(mast->bn->pivot, 0, sizeof(unsigned long) * ARRAY_SIZE(mast->bn->pivot)); 3331 mast->bn->b_end = 0; 3332 3333 if (mte_is_root(mas->node)) { 3334 cp = false; 3335 } else { 3336 mas_ascend(mas); 3337 mat_add(mast->free, old); 3338 mas->offset = mte_parent_slot(mas->node); 3339 } 3340 3341 if (cp && mast->l->offset) 3342 mas_mab_cp(mas, 0, mast->l->offset - 1, mast->bn, 0); 3343 3344 split = mast->bn->b_end; 3345 mab_set_b_end(mast->bn, mast->l, mast->l->node); 3346 mast->r->offset = mast->bn->b_end; 3347 mab_set_b_end(mast->bn, mast->r, mast->r->node); 3348 if (mast->bn->pivot[mast->bn->b_end - 1] == mas->max) 3349 cp = false; 3350 3351 if (cp) 3352 mas_mab_cp(mas, split + skip, mt_slot_count(mas->node) - 1, 3353 mast->bn, mast->bn->b_end); 3354 3355 mast->bn->b_end--; 3356 mast->bn->type = mte_node_type(mas->node); 3357 } 3358 3359 /* 3360 * mast_split_data() - Split the data in the subtree state big node into regular 3361 * nodes. 3362 * @mast: The maple subtree state 3363 * @mas: The maple state 3364 * @split: The location to split the big node 3365 */ 3366 static inline void mast_split_data(struct maple_subtree_state *mast, 3367 struct ma_state *mas, unsigned char split) 3368 { 3369 unsigned char p_slot; 3370 3371 mab_mas_cp(mast->bn, 0, split, mast->l, true); 3372 mte_set_pivot(mast->r->node, 0, mast->r->max); 3373 mab_mas_cp(mast->bn, split + 1, mast->bn->b_end, mast->r, false); 3374 mast->l->offset = mte_parent_slot(mas->node); 3375 mast->l->max = mast->bn->pivot[split]; 3376 mast->r->min = mast->l->max + 1; 3377 if (mte_is_leaf(mas->node)) 3378 return; 3379 3380 p_slot = mast->orig_l->offset; 3381 mas_set_split_parent(mast->orig_l, mast->l->node, mast->r->node, 3382 &p_slot, split); 3383 mas_set_split_parent(mast->orig_r, mast->l->node, mast->r->node, 3384 &p_slot, split); 3385 } 3386 3387 /* 3388 * mas_push_data() - Instead of splitting a node, it is beneficial to push the 3389 * data to the right or left node if there is room. 3390 * @mas: The maple state 3391 * @height: The current height of the maple state 3392 * @mast: The maple subtree state 3393 * @left: Push left or not. 3394 * 3395 * Keeping the height of the tree low means faster lookups. 3396 * 3397 * Return: True if pushed, false otherwise. 3398 */ 3399 static inline bool mas_push_data(struct ma_state *mas, int height, 3400 struct maple_subtree_state *mast, bool left) 3401 { 3402 unsigned char slot_total = mast->bn->b_end; 3403 unsigned char end, space, split; 3404 3405 MA_STATE(tmp_mas, mas->tree, mas->index, mas->last); 3406 tmp_mas = *mas; 3407 tmp_mas.depth = mast->l->depth; 3408 3409 if (left && !mas_prev_sibling(&tmp_mas)) 3410 return false; 3411 else if (!left && !mas_next_sibling(&tmp_mas)) 3412 return false; 3413 3414 end = mas_data_end(&tmp_mas); 3415 slot_total += end; 3416 space = 2 * mt_slot_count(mas->node) - 2; 3417 /* -2 instead of -1 to ensure there isn't a triple split */ 3418 if (ma_is_leaf(mast->bn->type)) 3419 space--; 3420 3421 if (mas->max == ULONG_MAX) 3422 space--; 3423 3424 if (slot_total >= space) 3425 return false; 3426 3427 /* Get the data; Fill mast->bn */ 3428 mast->bn->b_end++; 3429 if (left) { 3430 mab_shift_right(mast->bn, end + 1); 3431 mas_mab_cp(&tmp_mas, 0, end, mast->bn, 0); 3432 mast->bn->b_end = slot_total + 1; 3433 } else { 3434 mas_mab_cp(&tmp_mas, 0, end, mast->bn, mast->bn->b_end); 3435 } 3436 3437 /* Configure mast for splitting of mast->bn */ 3438 split = mt_slots[mast->bn->type] - 2; 3439 if (left) { 3440 /* Switch mas to prev node */ 3441 mat_add(mast->free, mas->node); 3442 *mas = tmp_mas; 3443 /* Start using mast->l for the left side. */ 3444 tmp_mas.node = mast->l->node; 3445 *mast->l = tmp_mas; 3446 } else { 3447 mat_add(mast->free, tmp_mas.node); 3448 tmp_mas.node = mast->r->node; 3449 *mast->r = tmp_mas; 3450 split = slot_total - split; 3451 } 3452 split = mab_no_null_split(mast->bn, split, mt_slots[mast->bn->type]); 3453 /* Update parent slot for split calculation. */ 3454 if (left) 3455 mast->orig_l->offset += end + 1; 3456 3457 mast_split_data(mast, mas, split); 3458 mast_fill_bnode(mast, mas, 2); 3459 mas_split_final_node(mast, mas, height + 1); 3460 return true; 3461 } 3462 3463 /* 3464 * mas_split() - Split data that is too big for one node into two. 3465 * @mas: The maple state 3466 * @b_node: The maple big node 3467 * Return: 1 on success, 0 on failure. 3468 */ 3469 static int mas_split(struct ma_state *mas, struct maple_big_node *b_node) 3470 { 3471 struct maple_subtree_state mast; 3472 int height = 0; 3473 unsigned char mid_split, split = 0; 3474 3475 /* 3476 * Splitting is handled differently from any other B-tree; the Maple 3477 * Tree splits upwards. Splitting up means that the split operation 3478 * occurs when the walk of the tree hits the leaves and not on the way 3479 * down. The reason for splitting up is that it is impossible to know 3480 * how much space will be needed until the leaf is (or leaves are) 3481 * reached. Since overwriting data is allowed and a range could 3482 * overwrite more than one range or result in changing one entry into 3 3483 * entries, it is impossible to know if a split is required until the 3484 * data is examined. 3485 * 3486 * Splitting is a balancing act between keeping allocations to a minimum 3487 * and avoiding a 'jitter' event where a tree is expanded to make room 3488 * for an entry followed by a contraction when the entry is removed. To 3489 * accomplish the balance, there are empty slots remaining in both left 3490 * and right nodes after a split. 3491 */ 3492 MA_STATE(l_mas, mas->tree, mas->index, mas->last); 3493 MA_STATE(r_mas, mas->tree, mas->index, mas->last); 3494 MA_STATE(prev_l_mas, mas->tree, mas->index, mas->last); 3495 MA_STATE(prev_r_mas, mas->tree, mas->index, mas->last); 3496 MA_TOPIARY(mat, mas->tree); 3497 3498 trace_ma_op(__func__, mas); 3499 mas->depth = mas_mt_height(mas); 3500 /* Allocation failures will happen early. */ 3501 mas_node_count(mas, 1 + mas->depth * 2); 3502 if (mas_is_err(mas)) 3503 return 0; 3504 3505 mast.l = &l_mas; 3506 mast.r = &r_mas; 3507 mast.orig_l = &prev_l_mas; 3508 mast.orig_r = &prev_r_mas; 3509 mast.free = &mat; 3510 mast.bn = b_node; 3511 3512 while (height++ <= mas->depth) { 3513 if (mt_slots[b_node->type] > b_node->b_end) { 3514 mas_split_final_node(&mast, mas, height); 3515 break; 3516 } 3517 3518 l_mas = r_mas = *mas; 3519 l_mas.node = mas_new_ma_node(mas, b_node); 3520 r_mas.node = mas_new_ma_node(mas, b_node); 3521 /* 3522 * Another way that 'jitter' is avoided is to terminate a split up early if the 3523 * left or right node has space to spare. This is referred to as "pushing left" 3524 * or "pushing right" and is similar to the B* tree, except the nodes left or 3525 * right can rarely be reused due to RCU, but the ripple upwards is halted which 3526 * is a significant savings. 3527 */ 3528 /* Try to push left. */ 3529 if (mas_push_data(mas, height, &mast, true)) 3530 break; 3531 3532 /* Try to push right. */ 3533 if (mas_push_data(mas, height, &mast, false)) 3534 break; 3535 3536 split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); 3537 mast_split_data(&mast, mas, split); 3538 /* 3539 * Usually correct, mab_mas_cp in the above call overwrites 3540 * r->max. 3541 */ 3542 mast.r->max = mas->max; 3543 mast_fill_bnode(&mast, mas, 1); 3544 prev_l_mas = *mast.l; 3545 prev_r_mas = *mast.r; 3546 } 3547 3548 /* Set the original node as dead */ 3549 mat_add(mast.free, mas->node); 3550 mas->node = l_mas.node; 3551 mas_wmb_replace(mas, mast.free, NULL); 3552 mtree_range_walk(mas); 3553 return 1; 3554 } 3555 3556 /* 3557 * mas_reuse_node() - Reuse the node to store the data. 3558 * @wr_mas: The maple write state 3559 * @bn: The maple big node 3560 * @end: The end of the data. 3561 * 3562 * Will always return false in RCU mode. 3563 * 3564 * Return: True if node was reused, false otherwise. 3565 */ 3566 static inline bool mas_reuse_node(struct ma_wr_state *wr_mas, 3567 struct maple_big_node *bn, unsigned char end) 3568 { 3569 /* Need to be rcu safe. */ 3570 if (mt_in_rcu(wr_mas->mas->tree)) 3571 return false; 3572 3573 if (end > bn->b_end) { 3574 int clear = mt_slots[wr_mas->type] - bn->b_end; 3575 3576 memset(wr_mas->slots + bn->b_end, 0, sizeof(void *) * clear--); 3577 memset(wr_mas->pivots + bn->b_end, 0, sizeof(void *) * clear); 3578 } 3579 mab_mas_cp(bn, 0, bn->b_end, wr_mas->mas, false); 3580 return true; 3581 } 3582 3583 /* 3584 * mas_commit_b_node() - Commit the big node into the tree. 3585 * @wr_mas: The maple write state 3586 * @b_node: The maple big node 3587 * @end: The end of the data. 3588 */ 3589 static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas, 3590 struct maple_big_node *b_node, unsigned char end) 3591 { 3592 struct maple_node *node; 3593 unsigned char b_end = b_node->b_end; 3594 enum maple_type b_type = b_node->type; 3595 3596 if ((b_end < mt_min_slots[b_type]) && 3597 (!mte_is_root(wr_mas->mas->node)) && 3598 (mas_mt_height(wr_mas->mas) > 1)) 3599 return mas_rebalance(wr_mas->mas, b_node); 3600 3601 if (b_end >= mt_slots[b_type]) 3602 return mas_split(wr_mas->mas, b_node); 3603 3604 if (mas_reuse_node(wr_mas, b_node, end)) 3605 goto reuse_node; 3606 3607 mas_node_count(wr_mas->mas, 1); 3608 if (mas_is_err(wr_mas->mas)) 3609 return 0; 3610 3611 node = mas_pop_node(wr_mas->mas); 3612 node->parent = mas_mn(wr_mas->mas)->parent; 3613 wr_mas->mas->node = mt_mk_node(node, b_type); 3614 mab_mas_cp(b_node, 0, b_end, wr_mas->mas, false); 3615 mas_replace(wr_mas->mas, false); 3616 reuse_node: 3617 mas_update_gap(wr_mas->mas); 3618 return 1; 3619 } 3620 3621 /* 3622 * mas_root_expand() - Expand a root to a node 3623 * @mas: The maple state 3624 * @entry: The entry to store into the tree 3625 */ 3626 static inline int mas_root_expand(struct ma_state *mas, void *entry) 3627 { 3628 void *contents = mas_root_locked(mas); 3629 enum maple_type type = maple_leaf_64; 3630 struct maple_node *node; 3631 void __rcu **slots; 3632 unsigned long *pivots; 3633 int slot = 0; 3634 3635 mas_node_count(mas, 1); 3636 if (unlikely(mas_is_err(mas))) 3637 return 0; 3638 3639 node = mas_pop_node(mas); 3640 pivots = ma_pivots(node, type); 3641 slots = ma_slots(node, type); 3642 node->parent = ma_parent_ptr( 3643 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3644 mas->node = mt_mk_node(node, type); 3645 3646 if (mas->index) { 3647 if (contents) { 3648 rcu_assign_pointer(slots[slot], contents); 3649 if (likely(mas->index > 1)) 3650 slot++; 3651 } 3652 pivots[slot++] = mas->index - 1; 3653 } 3654 3655 rcu_assign_pointer(slots[slot], entry); 3656 mas->offset = slot; 3657 pivots[slot] = mas->last; 3658 if (mas->last != ULONG_MAX) 3659 slot++; 3660 mas->depth = 1; 3661 mas_set_height(mas); 3662 3663 /* swap the new root into the tree */ 3664 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3665 ma_set_meta(node, maple_leaf_64, 0, slot); 3666 return slot; 3667 } 3668 3669 static inline void mas_store_root(struct ma_state *mas, void *entry) 3670 { 3671 if (likely((mas->last != 0) || (mas->index != 0))) 3672 mas_root_expand(mas, entry); 3673 else if (((unsigned long) (entry) & 3) == 2) 3674 mas_root_expand(mas, entry); 3675 else { 3676 rcu_assign_pointer(mas->tree->ma_root, entry); 3677 mas->node = MAS_START; 3678 } 3679 } 3680 3681 /* 3682 * mas_is_span_wr() - Check if the write needs to be treated as a write that 3683 * spans the node. 3684 * @mas: The maple state 3685 * @piv: The pivot value being written 3686 * @type: The maple node type 3687 * @entry: The data to write 3688 * 3689 * Spanning writes are writes that start in one node and end in another OR if 3690 * the write of a %NULL will cause the node to end with a %NULL. 3691 * 3692 * Return: True if this is a spanning write, false otherwise. 3693 */ 3694 static bool mas_is_span_wr(struct ma_wr_state *wr_mas) 3695 { 3696 unsigned long max; 3697 unsigned long last = wr_mas->mas->last; 3698 unsigned long piv = wr_mas->r_max; 3699 enum maple_type type = wr_mas->type; 3700 void *entry = wr_mas->entry; 3701 3702 /* Contained in this pivot */ 3703 if (piv > last) 3704 return false; 3705 3706 max = wr_mas->mas->max; 3707 if (unlikely(ma_is_leaf(type))) { 3708 /* Fits in the node, but may span slots. */ 3709 if (last < max) 3710 return false; 3711 3712 /* Writes to the end of the node but not null. */ 3713 if ((last == max) && entry) 3714 return false; 3715 3716 /* 3717 * Writing ULONG_MAX is not a spanning write regardless of the 3718 * value being written as long as the range fits in the node. 3719 */ 3720 if ((last == ULONG_MAX) && (last == max)) 3721 return false; 3722 } else if (piv == last) { 3723 if (entry) 3724 return false; 3725 3726 /* Detect spanning store wr walk */ 3727 if (last == ULONG_MAX) 3728 return false; 3729 } 3730 3731 trace_ma_write(__func__, wr_mas->mas, piv, entry); 3732 3733 return true; 3734 } 3735 3736 static inline void mas_wr_walk_descend(struct ma_wr_state *wr_mas) 3737 { 3738 wr_mas->type = mte_node_type(wr_mas->mas->node); 3739 mas_wr_node_walk(wr_mas); 3740 wr_mas->slots = ma_slots(wr_mas->node, wr_mas->type); 3741 } 3742 3743 static inline void mas_wr_walk_traverse(struct ma_wr_state *wr_mas) 3744 { 3745 wr_mas->mas->max = wr_mas->r_max; 3746 wr_mas->mas->min = wr_mas->r_min; 3747 wr_mas->mas->node = wr_mas->content; 3748 wr_mas->mas->offset = 0; 3749 wr_mas->mas->depth++; 3750 } 3751 /* 3752 * mas_wr_walk() - Walk the tree for a write. 3753 * @wr_mas: The maple write state 3754 * 3755 * Uses mas_slot_locked() and does not need to worry about dead nodes. 3756 * 3757 * Return: True if it's contained in a node, false on spanning write. 3758 */ 3759 static bool mas_wr_walk(struct ma_wr_state *wr_mas) 3760 { 3761 struct ma_state *mas = wr_mas->mas; 3762 3763 while (true) { 3764 mas_wr_walk_descend(wr_mas); 3765 if (unlikely(mas_is_span_wr(wr_mas))) 3766 return false; 3767 3768 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3769 mas->offset); 3770 if (ma_is_leaf(wr_mas->type)) 3771 return true; 3772 3773 mas_wr_walk_traverse(wr_mas); 3774 } 3775 3776 return true; 3777 } 3778 3779 static bool mas_wr_walk_index(struct ma_wr_state *wr_mas) 3780 { 3781 struct ma_state *mas = wr_mas->mas; 3782 3783 while (true) { 3784 mas_wr_walk_descend(wr_mas); 3785 wr_mas->content = mas_slot_locked(mas, wr_mas->slots, 3786 mas->offset); 3787 if (ma_is_leaf(wr_mas->type)) 3788 return true; 3789 mas_wr_walk_traverse(wr_mas); 3790 3791 } 3792 return true; 3793 } 3794 /* 3795 * mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs. 3796 * @l_wr_mas: The left maple write state 3797 * @r_wr_mas: The right maple write state 3798 */ 3799 static inline void mas_extend_spanning_null(struct ma_wr_state *l_wr_mas, 3800 struct ma_wr_state *r_wr_mas) 3801 { 3802 struct ma_state *r_mas = r_wr_mas->mas; 3803 struct ma_state *l_mas = l_wr_mas->mas; 3804 unsigned char l_slot; 3805 3806 l_slot = l_mas->offset; 3807 if (!l_wr_mas->content) 3808 l_mas->index = l_wr_mas->r_min; 3809 3810 if ((l_mas->index == l_wr_mas->r_min) && 3811 (l_slot && 3812 !mas_slot_locked(l_mas, l_wr_mas->slots, l_slot - 1))) { 3813 if (l_slot > 1) 3814 l_mas->index = l_wr_mas->pivots[l_slot - 2] + 1; 3815 else 3816 l_mas->index = l_mas->min; 3817 3818 l_mas->offset = l_slot - 1; 3819 } 3820 3821 if (!r_wr_mas->content) { 3822 if (r_mas->last < r_wr_mas->r_max) 3823 r_mas->last = r_wr_mas->r_max; 3824 r_mas->offset++; 3825 } else if ((r_mas->last == r_wr_mas->r_max) && 3826 (r_mas->last < r_mas->max) && 3827 !mas_slot_locked(r_mas, r_wr_mas->slots, r_mas->offset + 1)) { 3828 r_mas->last = mas_safe_pivot(r_mas, r_wr_mas->pivots, 3829 r_wr_mas->type, r_mas->offset + 1); 3830 r_mas->offset++; 3831 } 3832 } 3833 3834 static inline void *mas_state_walk(struct ma_state *mas) 3835 { 3836 void *entry; 3837 3838 entry = mas_start(mas); 3839 if (mas_is_none(mas)) 3840 return NULL; 3841 3842 if (mas_is_ptr(mas)) 3843 return entry; 3844 3845 return mtree_range_walk(mas); 3846 } 3847 3848 /* 3849 * mtree_lookup_walk() - Internal quick lookup that does not keep maple state up 3850 * to date. 3851 * 3852 * @mas: The maple state. 3853 * 3854 * Note: Leaves mas in undesirable state. 3855 * Return: The entry for @mas->index or %NULL on dead node. 3856 */ 3857 static inline void *mtree_lookup_walk(struct ma_state *mas) 3858 { 3859 unsigned long *pivots; 3860 unsigned char offset; 3861 struct maple_node *node; 3862 struct maple_enode *next; 3863 enum maple_type type; 3864 void __rcu **slots; 3865 unsigned char end; 3866 unsigned long max; 3867 3868 next = mas->node; 3869 max = ULONG_MAX; 3870 do { 3871 offset = 0; 3872 node = mte_to_node(next); 3873 type = mte_node_type(next); 3874 pivots = ma_pivots(node, type); 3875 end = ma_data_end(node, type, pivots, max); 3876 if (unlikely(ma_dead_node(node))) 3877 goto dead_node; 3878 3879 if (pivots[offset] >= mas->index) 3880 goto next; 3881 3882 do { 3883 offset++; 3884 } while ((offset < end) && (pivots[offset] < mas->index)); 3885 3886 if (likely(offset > end)) 3887 max = pivots[offset]; 3888 3889 next: 3890 slots = ma_slots(node, type); 3891 next = mt_slot(mas->tree, slots, offset); 3892 if (unlikely(ma_dead_node(node))) 3893 goto dead_node; 3894 } while (!ma_is_leaf(type)); 3895 3896 return (void *)next; 3897 3898 dead_node: 3899 mas_reset(mas); 3900 return NULL; 3901 } 3902 3903 /* 3904 * mas_new_root() - Create a new root node that only contains the entry passed 3905 * in. 3906 * @mas: The maple state 3907 * @entry: The entry to store. 3908 * 3909 * Only valid when the index == 0 and the last == ULONG_MAX 3910 * 3911 * Return 0 on error, 1 on success. 3912 */ 3913 static inline int mas_new_root(struct ma_state *mas, void *entry) 3914 { 3915 struct maple_enode *root = mas_root_locked(mas); 3916 enum maple_type type = maple_leaf_64; 3917 struct maple_node *node; 3918 void __rcu **slots; 3919 unsigned long *pivots; 3920 3921 if (!entry && !mas->index && mas->last == ULONG_MAX) { 3922 mas->depth = 0; 3923 mas_set_height(mas); 3924 rcu_assign_pointer(mas->tree->ma_root, entry); 3925 mas->node = MAS_START; 3926 goto done; 3927 } 3928 3929 mas_node_count(mas, 1); 3930 if (mas_is_err(mas)) 3931 return 0; 3932 3933 node = mas_pop_node(mas); 3934 pivots = ma_pivots(node, type); 3935 slots = ma_slots(node, type); 3936 node->parent = ma_parent_ptr( 3937 ((unsigned long)mas->tree | MA_ROOT_PARENT)); 3938 mas->node = mt_mk_node(node, type); 3939 rcu_assign_pointer(slots[0], entry); 3940 pivots[0] = mas->last; 3941 mas->depth = 1; 3942 mas_set_height(mas); 3943 rcu_assign_pointer(mas->tree->ma_root, mte_mk_root(mas->node)); 3944 3945 done: 3946 if (xa_is_node(root)) 3947 mte_destroy_walk(root, mas->tree); 3948 3949 return 1; 3950 } 3951 /* 3952 * mas_wr_spanning_store() - Create a subtree with the store operation completed 3953 * and new nodes where necessary, then place the sub-tree in the actual tree. 3954 * Note that mas is expected to point to the node which caused the store to 3955 * span. 3956 * @wr_mas: The maple write state 3957 * 3958 * Return: 0 on error, positive on success. 3959 */ 3960 static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) 3961 { 3962 struct maple_subtree_state mast; 3963 struct maple_big_node b_node; 3964 struct ma_state *mas; 3965 unsigned char height; 3966 3967 /* Left and Right side of spanning store */ 3968 MA_STATE(l_mas, NULL, 0, 0); 3969 MA_STATE(r_mas, NULL, 0, 0); 3970 3971 MA_WR_STATE(r_wr_mas, &r_mas, wr_mas->entry); 3972 MA_WR_STATE(l_wr_mas, &l_mas, wr_mas->entry); 3973 3974 /* 3975 * A store operation that spans multiple nodes is called a spanning 3976 * store and is handled early in the store call stack by the function 3977 * mas_is_span_wr(). When a spanning store is identified, the maple 3978 * state is duplicated. The first maple state walks the left tree path 3979 * to ``index``, the duplicate walks the right tree path to ``last``. 3980 * The data in the two nodes are combined into a single node, two nodes, 3981 * or possibly three nodes (see the 3-way split above). A ``NULL`` 3982 * written to the last entry of a node is considered a spanning store as 3983 * a rebalance is required for the operation to complete and an overflow 3984 * of data may happen. 3985 */ 3986 mas = wr_mas->mas; 3987 trace_ma_op(__func__, mas); 3988 3989 if (unlikely(!mas->index && mas->last == ULONG_MAX)) 3990 return mas_new_root(mas, wr_mas->entry); 3991 /* 3992 * Node rebalancing may occur due to this store, so there may be three new 3993 * entries per level plus a new root. 3994 */ 3995 height = mas_mt_height(mas); 3996 mas_node_count(mas, 1 + height * 3); 3997 if (mas_is_err(mas)) 3998 return 0; 3999 4000 /* 4001 * Set up right side. Need to get to the next offset after the spanning 4002 * store to ensure it's not NULL and to combine both the next node and 4003 * the node with the start together. 4004 */ 4005 r_mas = *mas; 4006 /* Avoid overflow, walk to next slot in the tree. */ 4007 if (r_mas.last + 1) 4008 r_mas.last++; 4009 4010 r_mas.index = r_mas.last; 4011 mas_wr_walk_index(&r_wr_mas); 4012 r_mas.last = r_mas.index = mas->last; 4013 4014 /* Set up left side. */ 4015 l_mas = *mas; 4016 mas_wr_walk_index(&l_wr_mas); 4017 4018 if (!wr_mas->entry) { 4019 mas_extend_spanning_null(&l_wr_mas, &r_wr_mas); 4020 mas->offset = l_mas.offset; 4021 mas->index = l_mas.index; 4022 mas->last = l_mas.last = r_mas.last; 4023 } 4024 4025 /* expanding NULLs may make this cover the entire range */ 4026 if (!l_mas.index && r_mas.last == ULONG_MAX) { 4027 mas_set_range(mas, 0, ULONG_MAX); 4028 return mas_new_root(mas, wr_mas->entry); 4029 } 4030 4031 memset(&b_node, 0, sizeof(struct maple_big_node)); 4032 /* Copy l_mas and store the value in b_node. */ 4033 mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end); 4034 /* Copy r_mas into b_node. */ 4035 if (r_mas.offset <= r_wr_mas.node_end) 4036 mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end, 4037 &b_node, b_node.b_end + 1); 4038 else 4039 b_node.b_end++; 4040 4041 /* Stop spanning searches by searching for just index. */ 4042 l_mas.index = l_mas.last = mas->index; 4043 4044 mast.bn = &b_node; 4045 mast.orig_l = &l_mas; 4046 mast.orig_r = &r_mas; 4047 /* Combine l_mas and r_mas and split them up evenly again. */ 4048 return mas_spanning_rebalance(mas, &mast, height + 1); 4049 } 4050 4051 /* 4052 * mas_wr_node_store() - Attempt to store the value in a node 4053 * @wr_mas: The maple write state 4054 * 4055 * Attempts to reuse the node, but may allocate. 4056 * 4057 * Return: True if stored, false otherwise 4058 */ 4059 static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) 4060 { 4061 struct ma_state *mas = wr_mas->mas; 4062 void __rcu **dst_slots; 4063 unsigned long *dst_pivots; 4064 unsigned char dst_offset; 4065 unsigned char new_end = wr_mas->node_end; 4066 unsigned char offset; 4067 unsigned char node_slots = mt_slots[wr_mas->type]; 4068 struct maple_node reuse, *newnode; 4069 unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; 4070 bool in_rcu = mt_in_rcu(mas->tree); 4071 4072 offset = mas->offset; 4073 if (mas->last == wr_mas->r_max) { 4074 /* runs right to the end of the node */ 4075 if (mas->last == mas->max) 4076 new_end = offset; 4077 /* don't copy this offset */ 4078 wr_mas->offset_end++; 4079 } else if (mas->last < wr_mas->r_max) { 4080 /* new range ends in this range */ 4081 if (unlikely(wr_mas->r_max == ULONG_MAX)) 4082 mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); 4083 4084 new_end++; 4085 } else { 4086 if (wr_mas->end_piv == mas->last) 4087 wr_mas->offset_end++; 4088 4089 new_end -= wr_mas->offset_end - offset - 1; 4090 } 4091 4092 /* new range starts within a range */ 4093 if (wr_mas->r_min < mas->index) 4094 new_end++; 4095 4096 /* Not enough room */ 4097 if (new_end >= node_slots) 4098 return false; 4099 4100 /* Not enough data. */ 4101 if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && 4102 !(mas->mas_flags & MA_STATE_BULK)) 4103 return false; 4104 4105 /* set up node. */ 4106 if (in_rcu) { 4107 mas_node_count(mas, 1); 4108 if (mas_is_err(mas)) 4109 return false; 4110 4111 newnode = mas_pop_node(mas); 4112 } else { 4113 memset(&reuse, 0, sizeof(struct maple_node)); 4114 newnode = &reuse; 4115 } 4116 4117 newnode->parent = mas_mn(mas)->parent; 4118 dst_pivots = ma_pivots(newnode, wr_mas->type); 4119 dst_slots = ma_slots(newnode, wr_mas->type); 4120 /* Copy from start to insert point */ 4121 memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); 4122 memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); 4123 dst_offset = offset; 4124 4125 /* Handle insert of new range starting after old range */ 4126 if (wr_mas->r_min < mas->index) { 4127 mas->offset++; 4128 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); 4129 dst_pivots[dst_offset++] = mas->index - 1; 4130 } 4131 4132 /* Store the new entry and range end. */ 4133 if (dst_offset < max_piv) 4134 dst_pivots[dst_offset] = mas->last; 4135 mas->offset = dst_offset; 4136 rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); 4137 4138 /* 4139 * this range wrote to the end of the node or it overwrote the rest of 4140 * the data 4141 */ 4142 if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { 4143 new_end = dst_offset; 4144 goto done; 4145 } 4146 4147 dst_offset++; 4148 /* Copy to the end of node if necessary. */ 4149 copy_size = wr_mas->node_end - wr_mas->offset_end + 1; 4150 memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, 4151 sizeof(void *) * copy_size); 4152 if (dst_offset < max_piv) { 4153 if (copy_size > max_piv - dst_offset) 4154 copy_size = max_piv - dst_offset; 4155 4156 memcpy(dst_pivots + dst_offset, 4157 wr_mas->pivots + wr_mas->offset_end, 4158 sizeof(unsigned long) * copy_size); 4159 } 4160 4161 if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) 4162 dst_pivots[new_end] = mas->max; 4163 4164 done: 4165 mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end); 4166 if (in_rcu) { 4167 mas->node = mt_mk_node(newnode, wr_mas->type); 4168 mas_replace(mas, false); 4169 } else { 4170 memcpy(wr_mas->node, newnode, sizeof(struct maple_node)); 4171 } 4172 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4173 mas_update_gap(mas); 4174 return true; 4175 } 4176 4177 /* 4178 * mas_wr_slot_store: Attempt to store a value in a slot. 4179 * @wr_mas: the maple write state 4180 * 4181 * Return: True if stored, false otherwise 4182 */ 4183 static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) 4184 { 4185 struct ma_state *mas = wr_mas->mas; 4186 unsigned long lmax; /* Logical max. */ 4187 unsigned char offset = mas->offset; 4188 4189 if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || 4190 (offset != wr_mas->node_end))) 4191 return false; 4192 4193 if (offset == wr_mas->node_end - 1) 4194 lmax = mas->max; 4195 else 4196 lmax = wr_mas->pivots[offset + 1]; 4197 4198 /* going to overwrite too many slots. */ 4199 if (lmax < mas->last) 4200 return false; 4201 4202 if (wr_mas->r_min == mas->index) { 4203 /* overwriting two or more ranges with one. */ 4204 if (lmax == mas->last) 4205 return false; 4206 4207 /* Overwriting all of offset and a portion of offset + 1. */ 4208 rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); 4209 wr_mas->pivots[offset] = mas->last; 4210 goto done; 4211 } 4212 4213 /* Doesn't end on the next range end. */ 4214 if (lmax != mas->last) 4215 return false; 4216 4217 /* Overwriting a portion of offset and all of offset + 1 */ 4218 if ((offset + 1 < mt_pivots[wr_mas->type]) && 4219 (wr_mas->entry || wr_mas->pivots[offset + 1])) 4220 wr_mas->pivots[offset + 1] = mas->last; 4221 4222 rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); 4223 wr_mas->pivots[offset] = mas->index - 1; 4224 mas->offset++; /* Keep mas accurate. */ 4225 4226 done: 4227 trace_ma_write(__func__, mas, 0, wr_mas->entry); 4228 mas_update_gap(mas); 4229 return true; 4230 } 4231 4232 static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) 4233 { 4234 while ((wr_mas->mas->last > wr_mas->end_piv) && 4235 (wr_mas->offset_end < wr_mas->node_end)) 4236 wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; 4237 4238 if (wr_mas->mas->last > wr_mas->end_piv) 4239 wr_mas->end_piv = wr_mas->mas->max; 4240 } 4241 4242 static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) 4243 { 4244 struct ma_state *mas = wr_mas->mas; 4245 4246 if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) 4247 mas->last = wr_mas->end_piv; 4248 4249 /* Check next slot(s) if we are overwriting the end */ 4250 if ((mas->last == wr_mas->end_piv) && 4251 (wr_mas->node_end != wr_mas->offset_end) && 4252 !wr_mas->slots[wr_mas->offset_end + 1]) { 4253 wr_mas->offset_end++; 4254 if (wr_mas->offset_end == wr_mas->node_end) 4255 mas->last = mas->max; 4256 else 4257 mas->last = wr_mas->pivots[wr_mas->offset_end]; 4258 wr_mas->end_piv = mas->last; 4259 } 4260 4261 if (!wr_mas->content) { 4262 /* If this one is null, the next and prev are not */ 4263 mas->index = wr_mas->r_min; 4264 } else { 4265 /* Check prev slot if we are overwriting the start */ 4266 if (mas->index == wr_mas->r_min && mas->offset && 4267 !wr_mas->slots[mas->offset - 1]) { 4268 mas->offset--; 4269 wr_mas->r_min = mas->index = 4270 mas_safe_min(mas, wr_mas->pivots, mas->offset); 4271 wr_mas->r_max = wr_mas->pivots[mas->offset]; 4272 } 4273 } 4274 } 4275 4276 static inline bool mas_wr_append(struct ma_wr_state *wr_mas) 4277 { 4278 unsigned char end = wr_mas->node_end; 4279 unsigned char new_end = end + 1; 4280 struct ma_state *mas = wr_mas->mas; 4281 unsigned char node_pivots = mt_pivots[wr_mas->type]; 4282 4283 if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { 4284 if (new_end < node_pivots) 4285 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4286 4287 if (new_end < node_pivots) 4288 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4289 4290 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); 4291 mas->offset = new_end; 4292 wr_mas->pivots[end] = mas->index - 1; 4293 4294 return true; 4295 } 4296 4297 if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { 4298 if (new_end < node_pivots) 4299 wr_mas->pivots[new_end] = wr_mas->pivots[end]; 4300 4301 rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); 4302 if (new_end < node_pivots) 4303 ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); 4304 4305 wr_mas->pivots[end] = mas->last; 4306 rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); 4307 return true; 4308 } 4309 4310 return false; 4311 } 4312 4313 /* 4314 * mas_wr_bnode() - Slow path for a modification. 4315 * @wr_mas: The write maple state 4316 * 4317 * This is where split, rebalance end up. 4318 */ 4319 static void mas_wr_bnode(struct ma_wr_state *wr_mas) 4320 { 4321 struct maple_big_node b_node; 4322 4323 trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry); 4324 memset(&b_node, 0, sizeof(struct maple_big_node)); 4325 mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end); 4326 mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end); 4327 } 4328 4329 static inline void mas_wr_modify(struct ma_wr_state *wr_mas) 4330 { 4331 unsigned char node_slots; 4332 unsigned char node_size; 4333 struct ma_state *mas = wr_mas->mas; 4334 4335 /* Direct replacement */ 4336 if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { 4337 rcu_assign_pointer(wr_mas->slots[mas->offset], wr_mas->entry); 4338 if (!!wr_mas->entry ^ !!wr_mas->content) 4339 mas_update_gap(mas); 4340 return; 4341 } 4342 4343 /* Attempt to append */ 4344 node_slots = mt_slots[wr_mas->type]; 4345 node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; 4346 if (mas->max == ULONG_MAX) 4347 node_size++; 4348 4349 /* slot and node store will not fit, go to the slow path */ 4350 if (unlikely(node_size >= node_slots)) 4351 goto slow_path; 4352 4353 if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && 4354 (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { 4355 if (!wr_mas->content || !wr_mas->entry) 4356 mas_update_gap(mas); 4357 return; 4358 } 4359 4360 if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) 4361 return; 4362 else if (mas_wr_node_store(wr_mas)) 4363 return; 4364 4365 if (mas_is_err(mas)) 4366 return; 4367 4368 slow_path: 4369 mas_wr_bnode(wr_mas); 4370 } 4371 4372 /* 4373 * mas_wr_store_entry() - Internal call to store a value 4374 * @mas: The maple state 4375 * @entry: The entry to store. 4376 * 4377 * Return: The contents that was stored at the index. 4378 */ 4379 static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) 4380 { 4381 struct ma_state *mas = wr_mas->mas; 4382 4383 wr_mas->content = mas_start(mas); 4384 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4385 mas_store_root(mas, wr_mas->entry); 4386 return wr_mas->content; 4387 } 4388 4389 if (unlikely(!mas_wr_walk(wr_mas))) { 4390 mas_wr_spanning_store(wr_mas); 4391 return wr_mas->content; 4392 } 4393 4394 /* At this point, we are at the leaf node that needs to be altered. */ 4395 wr_mas->end_piv = wr_mas->r_max; 4396 mas_wr_end_piv(wr_mas); 4397 4398 if (!wr_mas->entry) 4399 mas_wr_extend_null(wr_mas); 4400 4401 /* New root for a single pointer */ 4402 if (unlikely(!mas->index && mas->last == ULONG_MAX)) { 4403 mas_new_root(mas, wr_mas->entry); 4404 return wr_mas->content; 4405 } 4406 4407 mas_wr_modify(wr_mas); 4408 return wr_mas->content; 4409 } 4410 4411 /** 4412 * mas_insert() - Internal call to insert a value 4413 * @mas: The maple state 4414 * @entry: The entry to store 4415 * 4416 * Return: %NULL or the contents that already exists at the requested index 4417 * otherwise. The maple state needs to be checked for error conditions. 4418 */ 4419 static inline void *mas_insert(struct ma_state *mas, void *entry) 4420 { 4421 MA_WR_STATE(wr_mas, mas, entry); 4422 4423 /* 4424 * Inserting a new range inserts either 0, 1, or 2 pivots within the 4425 * tree. If the insert fits exactly into an existing gap with a value 4426 * of NULL, then the slot only needs to be written with the new value. 4427 * If the range being inserted is adjacent to another range, then only a 4428 * single pivot needs to be inserted (as well as writing the entry). If 4429 * the new range is within a gap but does not touch any other ranges, 4430 * then two pivots need to be inserted: the start - 1, and the end. As 4431 * usual, the entry must be written. Most operations require a new node 4432 * to be allocated and replace an existing node to ensure RCU safety, 4433 * when in RCU mode. The exception to requiring a newly allocated node 4434 * is when inserting at the end of a node (appending). When done 4435 * carefully, appending can reuse the node in place. 4436 */ 4437 wr_mas.content = mas_start(mas); 4438 if (wr_mas.content) 4439 goto exists; 4440 4441 if (mas_is_none(mas) || mas_is_ptr(mas)) { 4442 mas_store_root(mas, entry); 4443 return NULL; 4444 } 4445 4446 /* spanning writes always overwrite something */ 4447 if (!mas_wr_walk(&wr_mas)) 4448 goto exists; 4449 4450 /* At this point, we are at the leaf node that needs to be altered. */ 4451 wr_mas.offset_end = mas->offset; 4452 wr_mas.end_piv = wr_mas.r_max; 4453 4454 if (wr_mas.content || (mas->last > wr_mas.r_max)) 4455 goto exists; 4456 4457 if (!entry) 4458 return NULL; 4459 4460 mas_wr_modify(&wr_mas); 4461 return wr_mas.content; 4462 4463 exists: 4464 mas_set_err(mas, -EEXIST); 4465 return wr_mas.content; 4466 4467 } 4468 4469 /* 4470 * mas_prev_node() - Find the prev non-null entry at the same level in the 4471 * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. 4472 * @mas: The maple state 4473 * @min: The lower limit to search 4474 * 4475 * The prev node value will be mas->node[mas->offset] or MAS_NONE. 4476 * Return: 1 if the node is dead, 0 otherwise. 4477 */ 4478 static inline int mas_prev_node(struct ma_state *mas, unsigned long min) 4479 { 4480 enum maple_type mt; 4481 int offset, level; 4482 void __rcu **slots; 4483 struct maple_node *node; 4484 struct maple_enode *enode; 4485 unsigned long *pivots; 4486 4487 if (mas_is_none(mas)) 4488 return 0; 4489 4490 level = 0; 4491 do { 4492 node = mas_mn(mas); 4493 if (ma_is_root(node)) 4494 goto no_entry; 4495 4496 /* Walk up. */ 4497 if (unlikely(mas_ascend(mas))) 4498 return 1; 4499 offset = mas->offset; 4500 level++; 4501 } while (!offset); 4502 4503 offset--; 4504 mt = mte_node_type(mas->node); 4505 node = mas_mn(mas); 4506 slots = ma_slots(node, mt); 4507 pivots = ma_pivots(node, mt); 4508 mas->max = pivots[offset]; 4509 if (offset) 4510 mas->min = pivots[offset - 1] + 1; 4511 if (unlikely(ma_dead_node(node))) 4512 return 1; 4513 4514 if (mas->max < min) 4515 goto no_entry_min; 4516 4517 while (level > 1) { 4518 level--; 4519 enode = mas_slot(mas, slots, offset); 4520 if (unlikely(ma_dead_node(node))) 4521 return 1; 4522 4523 mas->node = enode; 4524 mt = mte_node_type(mas->node); 4525 node = mas_mn(mas); 4526 slots = ma_slots(node, mt); 4527 pivots = ma_pivots(node, mt); 4528 offset = ma_data_end(node, mt, pivots, mas->max); 4529 if (offset) 4530 mas->min = pivots[offset - 1] + 1; 4531 4532 if (offset < mt_pivots[mt]) 4533 mas->max = pivots[offset]; 4534 4535 if (mas->max < min) 4536 goto no_entry; 4537 } 4538 4539 mas->node = mas_slot(mas, slots, offset); 4540 if (unlikely(ma_dead_node(node))) 4541 return 1; 4542 4543 mas->offset = mas_data_end(mas); 4544 if (unlikely(mte_dead_node(mas->node))) 4545 return 1; 4546 4547 return 0; 4548 4549 no_entry_min: 4550 mas->offset = offset; 4551 if (offset) 4552 mas->min = pivots[offset - 1] + 1; 4553 no_entry: 4554 if (unlikely(ma_dead_node(node))) 4555 return 1; 4556 4557 mas->node = MAS_NONE; 4558 return 0; 4559 } 4560 4561 /* 4562 * mas_next_node() - Get the next node at the same level in the tree. 4563 * @mas: The maple state 4564 * @max: The maximum pivot value to check. 4565 * 4566 * The next value will be mas->node[mas->offset] or MAS_NONE. 4567 * Return: 1 on dead node, 0 otherwise. 4568 */ 4569 static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, 4570 unsigned long max) 4571 { 4572 unsigned long min, pivot; 4573 unsigned long *pivots; 4574 struct maple_enode *enode; 4575 int level = 0; 4576 unsigned char offset; 4577 enum maple_type mt; 4578 void __rcu **slots; 4579 4580 if (mas->max >= max) 4581 goto no_entry; 4582 4583 level = 0; 4584 do { 4585 if (ma_is_root(node)) 4586 goto no_entry; 4587 4588 min = mas->max + 1; 4589 if (min > max) 4590 goto no_entry; 4591 4592 if (unlikely(mas_ascend(mas))) 4593 return 1; 4594 4595 offset = mas->offset; 4596 level++; 4597 node = mas_mn(mas); 4598 mt = mte_node_type(mas->node); 4599 pivots = ma_pivots(node, mt); 4600 } while (unlikely(offset == ma_data_end(node, mt, pivots, mas->max))); 4601 4602 slots = ma_slots(node, mt); 4603 pivot = mas_safe_pivot(mas, pivots, ++offset, mt); 4604 while (unlikely(level > 1)) { 4605 /* Descend, if necessary */ 4606 enode = mas_slot(mas, slots, offset); 4607 if (unlikely(ma_dead_node(node))) 4608 return 1; 4609 4610 mas->node = enode; 4611 level--; 4612 node = mas_mn(mas); 4613 mt = mte_node_type(mas->node); 4614 slots = ma_slots(node, mt); 4615 pivots = ma_pivots(node, mt); 4616 offset = 0; 4617 pivot = pivots[0]; 4618 } 4619 4620 enode = mas_slot(mas, slots, offset); 4621 if (unlikely(ma_dead_node(node))) 4622 return 1; 4623 4624 mas->node = enode; 4625 mas->min = min; 4626 mas->max = pivot; 4627 return 0; 4628 4629 no_entry: 4630 if (unlikely(ma_dead_node(node))) 4631 return 1; 4632 4633 mas->node = MAS_NONE; 4634 return 0; 4635 } 4636 4637 /* 4638 * mas_next_nentry() - Get the next node entry 4639 * @mas: The maple state 4640 * @max: The maximum value to check 4641 * @*range_start: Pointer to store the start of the range. 4642 * 4643 * Sets @mas->offset to the offset of the next node entry, @mas->last to the 4644 * pivot of the entry. 4645 * 4646 * Return: The next entry, %NULL otherwise 4647 */ 4648 static inline void *mas_next_nentry(struct ma_state *mas, 4649 struct maple_node *node, unsigned long max, enum maple_type type) 4650 { 4651 unsigned char count; 4652 unsigned long pivot; 4653 unsigned long *pivots; 4654 void __rcu **slots; 4655 void *entry; 4656 4657 if (mas->last == mas->max) { 4658 mas->index = mas->max; 4659 return NULL; 4660 } 4661 4662 pivots = ma_pivots(node, type); 4663 slots = ma_slots(node, type); 4664 mas->index = mas_safe_min(mas, pivots, mas->offset); 4665 count = ma_data_end(node, type, pivots, mas->max); 4666 if (ma_dead_node(node)) 4667 return NULL; 4668 4669 if (mas->index > max) 4670 return NULL; 4671 4672 if (mas->offset > count) 4673 return NULL; 4674 4675 while (mas->offset < count) { 4676 pivot = pivots[mas->offset]; 4677 entry = mas_slot(mas, slots, mas->offset); 4678 if (ma_dead_node(node)) 4679 return NULL; 4680 4681 if (entry) 4682 goto found; 4683 4684 if (pivot >= max) 4685 return NULL; 4686 4687 mas->index = pivot + 1; 4688 mas->offset++; 4689 } 4690 4691 if (mas->index > mas->max) { 4692 mas->index = mas->last; 4693 return NULL; 4694 } 4695 4696 pivot = mas_safe_pivot(mas, pivots, mas->offset, type); 4697 entry = mas_slot(mas, slots, mas->offset); 4698 if (ma_dead_node(node)) 4699 return NULL; 4700 4701 if (!pivot) 4702 return NULL; 4703 4704 if (!entry) 4705 return NULL; 4706 4707 found: 4708 mas->last = pivot; 4709 return entry; 4710 } 4711 4712 static inline void mas_rewalk(struct ma_state *mas, unsigned long index) 4713 { 4714 retry: 4715 mas_set(mas, index); 4716 mas_state_walk(mas); 4717 if (mas_is_start(mas)) 4718 goto retry; 4719 } 4720 4721 /* 4722 * mas_next_entry() - Internal function to get the next entry. 4723 * @mas: The maple state 4724 * @limit: The maximum range start. 4725 * 4726 * Set the @mas->node to the next entry and the range_start to 4727 * the beginning value for the entry. Does not check beyond @limit. 4728 * Sets @mas->index and @mas->last to the limit if it is hit. 4729 * Restarts on dead nodes. 4730 * 4731 * Return: the next entry or %NULL. 4732 */ 4733 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4734 { 4735 void *entry = NULL; 4736 struct maple_enode *prev_node; 4737 struct maple_node *node; 4738 unsigned char offset; 4739 unsigned long last; 4740 enum maple_type mt; 4741 4742 if (mas->index > limit) { 4743 mas->index = mas->last = limit; 4744 mas_pause(mas); 4745 return NULL; 4746 } 4747 last = mas->last; 4748 retry: 4749 offset = mas->offset; 4750 prev_node = mas->node; 4751 node = mas_mn(mas); 4752 mt = mte_node_type(mas->node); 4753 mas->offset++; 4754 if (unlikely(mas->offset >= mt_slots[mt])) { 4755 mas->offset = mt_slots[mt] - 1; 4756 goto next_node; 4757 } 4758 4759 while (!mas_is_none(mas)) { 4760 entry = mas_next_nentry(mas, node, limit, mt); 4761 if (unlikely(ma_dead_node(node))) { 4762 mas_rewalk(mas, last); 4763 goto retry; 4764 } 4765 4766 if (likely(entry)) 4767 return entry; 4768 4769 if (unlikely((mas->index > limit))) 4770 break; 4771 4772 next_node: 4773 prev_node = mas->node; 4774 offset = mas->offset; 4775 if (unlikely(mas_next_node(mas, node, limit))) { 4776 mas_rewalk(mas, last); 4777 goto retry; 4778 } 4779 mas->offset = 0; 4780 node = mas_mn(mas); 4781 mt = mte_node_type(mas->node); 4782 } 4783 4784 mas->index = mas->last = limit; 4785 mas->offset = offset; 4786 mas->node = prev_node; 4787 return NULL; 4788 } 4789 4790 /* 4791 * mas_prev_nentry() - Get the previous node entry. 4792 * @mas: The maple state. 4793 * @limit: The lower limit to check for a value. 4794 * 4795 * Return: the entry, %NULL otherwise. 4796 */ 4797 static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, 4798 unsigned long index) 4799 { 4800 unsigned long pivot, min; 4801 unsigned char offset; 4802 struct maple_node *mn; 4803 enum maple_type mt; 4804 unsigned long *pivots; 4805 void __rcu **slots; 4806 void *entry; 4807 4808 retry: 4809 if (!mas->offset) 4810 return NULL; 4811 4812 mn = mas_mn(mas); 4813 mt = mte_node_type(mas->node); 4814 offset = mas->offset - 1; 4815 if (offset >= mt_slots[mt]) 4816 offset = mt_slots[mt] - 1; 4817 4818 slots = ma_slots(mn, mt); 4819 pivots = ma_pivots(mn, mt); 4820 if (offset == mt_pivots[mt]) 4821 pivot = mas->max; 4822 else 4823 pivot = pivots[offset]; 4824 4825 if (unlikely(ma_dead_node(mn))) { 4826 mas_rewalk(mas, index); 4827 goto retry; 4828 } 4829 4830 while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || 4831 !pivot)) 4832 pivot = pivots[--offset]; 4833 4834 min = mas_safe_min(mas, pivots, offset); 4835 entry = mas_slot(mas, slots, offset); 4836 if (unlikely(ma_dead_node(mn))) { 4837 mas_rewalk(mas, index); 4838 goto retry; 4839 } 4840 4841 if (likely(entry)) { 4842 mas->offset = offset; 4843 mas->last = pivot; 4844 mas->index = min; 4845 } 4846 return entry; 4847 } 4848 4849 static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) 4850 { 4851 void *entry; 4852 4853 if (mas->index < min) { 4854 mas->index = mas->last = min; 4855 mas->node = MAS_NONE; 4856 return NULL; 4857 } 4858 retry: 4859 while (likely(!mas_is_none(mas))) { 4860 entry = mas_prev_nentry(mas, min, mas->index); 4861 if (unlikely(mas->last < min)) 4862 goto not_found; 4863 4864 if (likely(entry)) 4865 return entry; 4866 4867 if (unlikely(mas_prev_node(mas, min))) { 4868 mas_rewalk(mas, mas->index); 4869 goto retry; 4870 } 4871 4872 mas->offset++; 4873 } 4874 4875 mas->offset--; 4876 not_found: 4877 mas->index = mas->last = min; 4878 return NULL; 4879 } 4880 4881 /* 4882 * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the 4883 * highest gap address of a given size in a given node and descend. 4884 * @mas: The maple state 4885 * @size: The needed size. 4886 * 4887 * Return: True if found in a leaf, false otherwise. 4888 * 4889 */ 4890 static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) 4891 { 4892 enum maple_type type = mte_node_type(mas->node); 4893 struct maple_node *node = mas_mn(mas); 4894 unsigned long *pivots, *gaps; 4895 void __rcu **slots; 4896 unsigned long gap = 0; 4897 unsigned long max, min; 4898 unsigned char offset; 4899 4900 if (unlikely(mas_is_err(mas))) 4901 return true; 4902 4903 if (ma_is_dense(type)) { 4904 /* dense nodes. */ 4905 mas->offset = (unsigned char)(mas->index - mas->min); 4906 return true; 4907 } 4908 4909 pivots = ma_pivots(node, type); 4910 slots = ma_slots(node, type); 4911 gaps = ma_gaps(node, type); 4912 offset = mas->offset; 4913 min = mas_safe_min(mas, pivots, offset); 4914 /* Skip out of bounds. */ 4915 while (mas->last < min) 4916 min = mas_safe_min(mas, pivots, --offset); 4917 4918 max = mas_safe_pivot(mas, pivots, offset, type); 4919 while (mas->index <= max) { 4920 gap = 0; 4921 if (gaps) 4922 gap = gaps[offset]; 4923 else if (!mas_slot(mas, slots, offset)) 4924 gap = max - min + 1; 4925 4926 if (gap) { 4927 if ((size <= gap) && (size <= mas->last - min + 1)) 4928 break; 4929 4930 if (!gaps) { 4931 /* Skip the next slot, it cannot be a gap. */ 4932 if (offset < 2) 4933 goto ascend; 4934 4935 offset -= 2; 4936 max = pivots[offset]; 4937 min = mas_safe_min(mas, pivots, offset); 4938 continue; 4939 } 4940 } 4941 4942 if (!offset) 4943 goto ascend; 4944 4945 offset--; 4946 max = min - 1; 4947 min = mas_safe_min(mas, pivots, offset); 4948 } 4949 4950 if (unlikely((mas->index > max) || (size - 1 > max - mas->index))) 4951 goto no_space; 4952 4953 if (unlikely(ma_is_leaf(type))) { 4954 mas->offset = offset; 4955 mas->min = min; 4956 mas->max = min + gap - 1; 4957 return true; 4958 } 4959 4960 /* descend, only happens under lock. */ 4961 mas->node = mas_slot(mas, slots, offset); 4962 mas->min = min; 4963 mas->max = max; 4964 mas->offset = mas_data_end(mas); 4965 return false; 4966 4967 ascend: 4968 if (!mte_is_root(mas->node)) 4969 return false; 4970 4971 no_space: 4972 mas_set_err(mas, -EBUSY); 4973 return false; 4974 } 4975 4976 static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) 4977 { 4978 enum maple_type type = mte_node_type(mas->node); 4979 unsigned long pivot, min, gap = 0; 4980 unsigned char offset; 4981 unsigned long *gaps; 4982 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 4983 void __rcu **slots = ma_slots(mas_mn(mas), type); 4984 bool found = false; 4985 4986 if (ma_is_dense(type)) { 4987 mas->offset = (unsigned char)(mas->index - mas->min); 4988 return true; 4989 } 4990 4991 gaps = ma_gaps(mte_to_node(mas->node), type); 4992 offset = mas->offset; 4993 min = mas_safe_min(mas, pivots, offset); 4994 for (; offset < mt_slots[type]; offset++) { 4995 pivot = mas_safe_pivot(mas, pivots, offset, type); 4996 if (offset && !pivot) 4997 break; 4998 4999 /* Not within lower bounds */ 5000 if (mas->index > pivot) 5001 goto next_slot; 5002 5003 if (gaps) 5004 gap = gaps[offset]; 5005 else if (!mas_slot(mas, slots, offset)) 5006 gap = min(pivot, mas->last) - max(mas->index, min) + 1; 5007 else 5008 goto next_slot; 5009 5010 if (gap >= size) { 5011 if (ma_is_leaf(type)) { 5012 found = true; 5013 goto done; 5014 } 5015 if (mas->index <= pivot) { 5016 mas->node = mas_slot(mas, slots, offset); 5017 mas->min = min; 5018 mas->max = pivot; 5019 offset = 0; 5020 break; 5021 } 5022 } 5023 next_slot: 5024 min = pivot + 1; 5025 if (mas->last <= pivot) { 5026 mas_set_err(mas, -EBUSY); 5027 return true; 5028 } 5029 } 5030 5031 if (mte_is_root(mas->node)) 5032 found = true; 5033 done: 5034 mas->offset = offset; 5035 return found; 5036 } 5037 5038 /** 5039 * mas_walk() - Search for @mas->index in the tree. 5040 * @mas: The maple state. 5041 * 5042 * mas->index and mas->last will be set to the range if there is a value. If 5043 * mas->node is MAS_NONE, reset to MAS_START. 5044 * 5045 * Return: the entry at the location or %NULL. 5046 */ 5047 void *mas_walk(struct ma_state *mas) 5048 { 5049 void *entry; 5050 5051 retry: 5052 entry = mas_state_walk(mas); 5053 if (mas_is_start(mas)) 5054 goto retry; 5055 5056 if (mas_is_ptr(mas)) { 5057 if (!mas->index) { 5058 mas->last = 0; 5059 } else { 5060 mas->index = 1; 5061 mas->last = ULONG_MAX; 5062 } 5063 return entry; 5064 } 5065 5066 if (mas_is_none(mas)) { 5067 mas->index = 0; 5068 mas->last = ULONG_MAX; 5069 } 5070 5071 return entry; 5072 } 5073 EXPORT_SYMBOL_GPL(mas_walk); 5074 5075 static inline bool mas_rewind_node(struct ma_state *mas) 5076 { 5077 unsigned char slot; 5078 5079 do { 5080 if (mte_is_root(mas->node)) { 5081 slot = mas->offset; 5082 if (!slot) 5083 return false; 5084 } else { 5085 mas_ascend(mas); 5086 slot = mas->offset; 5087 } 5088 } while (!slot); 5089 5090 mas->offset = --slot; 5091 return true; 5092 } 5093 5094 /* 5095 * mas_skip_node() - Internal function. Skip over a node. 5096 * @mas: The maple state. 5097 * 5098 * Return: true if there is another node, false otherwise. 5099 */ 5100 static inline bool mas_skip_node(struct ma_state *mas) 5101 { 5102 if (mas_is_err(mas)) 5103 return false; 5104 5105 do { 5106 if (mte_is_root(mas->node)) { 5107 if (mas->offset >= mas_data_end(mas)) { 5108 mas_set_err(mas, -EBUSY); 5109 return false; 5110 } 5111 } else { 5112 mas_ascend(mas); 5113 } 5114 } while (mas->offset >= mas_data_end(mas)); 5115 5116 mas->offset++; 5117 return true; 5118 } 5119 5120 /* 5121 * mas_awalk() - Allocation walk. Search from low address to high, for a gap of 5122 * @size 5123 * @mas: The maple state 5124 * @size: The size of the gap required 5125 * 5126 * Search between @mas->index and @mas->last for a gap of @size. 5127 */ 5128 static inline void mas_awalk(struct ma_state *mas, unsigned long size) 5129 { 5130 struct maple_enode *last = NULL; 5131 5132 /* 5133 * There are 4 options: 5134 * go to child (descend) 5135 * go back to parent (ascend) 5136 * no gap found. (return, slot == MAPLE_NODE_SLOTS) 5137 * found the gap. (return, slot != MAPLE_NODE_SLOTS) 5138 */ 5139 while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { 5140 if (last == mas->node) 5141 mas_skip_node(mas); 5142 else 5143 last = mas->node; 5144 } 5145 } 5146 5147 /* 5148 * mas_fill_gap() - Fill a located gap with @entry. 5149 * @mas: The maple state 5150 * @entry: The value to store 5151 * @slot: The offset into the node to store the @entry 5152 * @size: The size of the entry 5153 * @index: The start location 5154 */ 5155 static inline void mas_fill_gap(struct ma_state *mas, void *entry, 5156 unsigned char slot, unsigned long size, unsigned long *index) 5157 { 5158 MA_WR_STATE(wr_mas, mas, entry); 5159 unsigned char pslot = mte_parent_slot(mas->node); 5160 struct maple_enode *mn = mas->node; 5161 unsigned long *pivots; 5162 enum maple_type ptype; 5163 /* 5164 * mas->index is the start address for the search 5165 * which may no longer be needed. 5166 * mas->last is the end address for the search 5167 */ 5168 5169 *index = mas->index; 5170 mas->last = mas->index + size - 1; 5171 5172 /* 5173 * It is possible that using mas->max and mas->min to correctly 5174 * calculate the index and last will cause an issue in the gap 5175 * calculation, so fix the ma_state here 5176 */ 5177 mas_ascend(mas); 5178 ptype = mte_node_type(mas->node); 5179 pivots = ma_pivots(mas_mn(mas), ptype); 5180 mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); 5181 mas->min = mas_safe_min(mas, pivots, pslot); 5182 mas->node = mn; 5183 mas->offset = slot; 5184 mas_wr_store_entry(&wr_mas); 5185 } 5186 5187 /* 5188 * mas_sparse_area() - Internal function. Return upper or lower limit when 5189 * searching for a gap in an empty tree. 5190 * @mas: The maple state 5191 * @min: the minimum range 5192 * @max: The maximum range 5193 * @size: The size of the gap 5194 * @fwd: Searching forward or back 5195 */ 5196 static inline void mas_sparse_area(struct ma_state *mas, unsigned long min, 5197 unsigned long max, unsigned long size, bool fwd) 5198 { 5199 unsigned long start = 0; 5200 5201 if (!unlikely(mas_is_none(mas))) 5202 start++; 5203 /* mas_is_ptr */ 5204 5205 if (start < min) 5206 start = min; 5207 5208 if (fwd) { 5209 mas->index = start; 5210 mas->last = start + size - 1; 5211 return; 5212 } 5213 5214 mas->index = max; 5215 } 5216 5217 /* 5218 * mas_empty_area() - Get the lowest address within the range that is 5219 * sufficient for the size requested. 5220 * @mas: The maple state 5221 * @min: The lowest value of the range 5222 * @max: The highest value of the range 5223 * @size: The size needed 5224 */ 5225 int mas_empty_area(struct ma_state *mas, unsigned long min, 5226 unsigned long max, unsigned long size) 5227 { 5228 unsigned char offset; 5229 unsigned long *pivots; 5230 enum maple_type mt; 5231 5232 if (mas_is_start(mas)) 5233 mas_start(mas); 5234 else if (mas->offset >= 2) 5235 mas->offset -= 2; 5236 else if (!mas_skip_node(mas)) 5237 return -EBUSY; 5238 5239 /* Empty set */ 5240 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5241 mas_sparse_area(mas, min, max, size, true); 5242 return 0; 5243 } 5244 5245 /* The start of the window can only be within these values */ 5246 mas->index = min; 5247 mas->last = max; 5248 mas_awalk(mas, size); 5249 5250 if (unlikely(mas_is_err(mas))) 5251 return xa_err(mas->node); 5252 5253 offset = mas->offset; 5254 if (unlikely(offset == MAPLE_NODE_SLOTS)) 5255 return -EBUSY; 5256 5257 mt = mte_node_type(mas->node); 5258 pivots = ma_pivots(mas_mn(mas), mt); 5259 if (offset) 5260 mas->min = pivots[offset - 1] + 1; 5261 5262 if (offset < mt_pivots[mt]) 5263 mas->max = pivots[offset]; 5264 5265 if (mas->index < mas->min) 5266 mas->index = mas->min; 5267 5268 mas->last = mas->index + size - 1; 5269 return 0; 5270 } 5271 EXPORT_SYMBOL_GPL(mas_empty_area); 5272 5273 /* 5274 * mas_empty_area_rev() - Get the highest address within the range that is 5275 * sufficient for the size requested. 5276 * @mas: The maple state 5277 * @min: The lowest value of the range 5278 * @max: The highest value of the range 5279 * @size: The size needed 5280 */ 5281 int mas_empty_area_rev(struct ma_state *mas, unsigned long min, 5282 unsigned long max, unsigned long size) 5283 { 5284 struct maple_enode *last = mas->node; 5285 5286 if (mas_is_start(mas)) { 5287 mas_start(mas); 5288 mas->offset = mas_data_end(mas); 5289 } else if (mas->offset >= 2) { 5290 mas->offset -= 2; 5291 } else if (!mas_rewind_node(mas)) { 5292 return -EBUSY; 5293 } 5294 5295 /* Empty set. */ 5296 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5297 mas_sparse_area(mas, min, max, size, false); 5298 return 0; 5299 } 5300 5301 /* The start of the window can only be within these values. */ 5302 mas->index = min; 5303 mas->last = max; 5304 5305 while (!mas_rev_awalk(mas, size)) { 5306 if (last == mas->node) { 5307 if (!mas_rewind_node(mas)) 5308 return -EBUSY; 5309 } else { 5310 last = mas->node; 5311 } 5312 } 5313 5314 if (mas_is_err(mas)) 5315 return xa_err(mas->node); 5316 5317 if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) 5318 return -EBUSY; 5319 5320 /* 5321 * mas_rev_awalk() has set mas->min and mas->max to the gap values. If 5322 * the maximum is outside the window we are searching, then use the last 5323 * location in the search. 5324 * mas->max and mas->min is the range of the gap. 5325 * mas->index and mas->last are currently set to the search range. 5326 */ 5327 5328 /* Trim the upper limit to the max. */ 5329 if (mas->max <= mas->last) 5330 mas->last = mas->max; 5331 5332 mas->index = mas->last - size + 1; 5333 return 0; 5334 } 5335 EXPORT_SYMBOL_GPL(mas_empty_area_rev); 5336 5337 static inline int mas_alloc(struct ma_state *mas, void *entry, 5338 unsigned long size, unsigned long *index) 5339 { 5340 unsigned long min; 5341 5342 mas_start(mas); 5343 if (mas_is_none(mas) || mas_is_ptr(mas)) { 5344 mas_root_expand(mas, entry); 5345 if (mas_is_err(mas)) 5346 return xa_err(mas->node); 5347 5348 if (!mas->index) 5349 return mte_pivot(mas->node, 0); 5350 return mte_pivot(mas->node, 1); 5351 } 5352 5353 /* Must be walking a tree. */ 5354 mas_awalk(mas, size); 5355 if (mas_is_err(mas)) 5356 return xa_err(mas->node); 5357 5358 if (mas->offset == MAPLE_NODE_SLOTS) 5359 goto no_gap; 5360 5361 /* 5362 * At this point, mas->node points to the right node and we have an 5363 * offset that has a sufficient gap. 5364 */ 5365 min = mas->min; 5366 if (mas->offset) 5367 min = mte_pivot(mas->node, mas->offset - 1) + 1; 5368 5369 if (mas->index < min) 5370 mas->index = min; 5371 5372 mas_fill_gap(mas, entry, mas->offset, size, index); 5373 return 0; 5374 5375 no_gap: 5376 return -EBUSY; 5377 } 5378 5379 static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, 5380 unsigned long max, void *entry, 5381 unsigned long size, unsigned long *index) 5382 { 5383 int ret = 0; 5384 5385 ret = mas_empty_area_rev(mas, min, max, size); 5386 if (ret) 5387 return ret; 5388 5389 if (mas_is_err(mas)) 5390 return xa_err(mas->node); 5391 5392 if (mas->offset == MAPLE_NODE_SLOTS) 5393 goto no_gap; 5394 5395 mas_fill_gap(mas, entry, mas->offset, size, index); 5396 return 0; 5397 5398 no_gap: 5399 return -EBUSY; 5400 } 5401 5402 /* 5403 * mas_dead_leaves() - Mark all leaves of a node as dead. 5404 * @mas: The maple state 5405 * @slots: Pointer to the slot array 5406 * 5407 * Must hold the write lock. 5408 * 5409 * Return: The number of leaves marked as dead. 5410 */ 5411 static inline 5412 unsigned char mas_dead_leaves(struct ma_state *mas, void __rcu **slots) 5413 { 5414 struct maple_node *node; 5415 enum maple_type type; 5416 void *entry; 5417 int offset; 5418 5419 for (offset = 0; offset < mt_slot_count(mas->node); offset++) { 5420 entry = mas_slot_locked(mas, slots, offset); 5421 type = mte_node_type(entry); 5422 node = mte_to_node(entry); 5423 /* Use both node and type to catch LE & BE metadata */ 5424 if (!node || !type) 5425 break; 5426 5427 mte_set_node_dead(entry); 5428 smp_wmb(); /* Needed for RCU */ 5429 node->type = type; 5430 rcu_assign_pointer(slots[offset], node); 5431 } 5432 5433 return offset; 5434 } 5435 5436 static void __rcu **mas_dead_walk(struct ma_state *mas, unsigned char offset) 5437 { 5438 struct maple_node *node, *next; 5439 void __rcu **slots = NULL; 5440 5441 next = mas_mn(mas); 5442 do { 5443 mas->node = ma_enode_ptr(next); 5444 node = mas_mn(mas); 5445 slots = ma_slots(node, node->type); 5446 next = mas_slot_locked(mas, slots, offset); 5447 offset = 0; 5448 } while (!ma_is_leaf(next->type)); 5449 5450 return slots; 5451 } 5452 5453 static void mt_free_walk(struct rcu_head *head) 5454 { 5455 void __rcu **slots; 5456 struct maple_node *node, *start; 5457 struct maple_tree mt; 5458 unsigned char offset; 5459 enum maple_type type; 5460 MA_STATE(mas, &mt, 0, 0); 5461 5462 node = container_of(head, struct maple_node, rcu); 5463 5464 if (ma_is_leaf(node->type)) 5465 goto free_leaf; 5466 5467 mt_init_flags(&mt, node->ma_flags); 5468 mas_lock(&mas); 5469 start = node; 5470 mas.node = mt_mk_node(node, node->type); 5471 slots = mas_dead_walk(&mas, 0); 5472 node = mas_mn(&mas); 5473 do { 5474 mt_free_bulk(node->slot_len, slots); 5475 offset = node->parent_slot + 1; 5476 mas.node = node->piv_parent; 5477 if (mas_mn(&mas) == node) 5478 goto start_slots_free; 5479 5480 type = mte_node_type(mas.node); 5481 slots = ma_slots(mte_to_node(mas.node), type); 5482 if ((offset < mt_slots[type]) && (slots[offset])) 5483 slots = mas_dead_walk(&mas, offset); 5484 5485 node = mas_mn(&mas); 5486 } while ((node != start) || (node->slot_len < offset)); 5487 5488 slots = ma_slots(node, node->type); 5489 mt_free_bulk(node->slot_len, slots); 5490 5491 start_slots_free: 5492 mas_unlock(&mas); 5493 free_leaf: 5494 mt_free_rcu(&node->rcu); 5495 } 5496 5497 static inline void __rcu **mas_destroy_descend(struct ma_state *mas, 5498 struct maple_enode *prev, unsigned char offset) 5499 { 5500 struct maple_node *node; 5501 struct maple_enode *next = mas->node; 5502 void __rcu **slots = NULL; 5503 5504 do { 5505 mas->node = next; 5506 node = mas_mn(mas); 5507 slots = ma_slots(node, mte_node_type(mas->node)); 5508 next = mas_slot_locked(mas, slots, 0); 5509 if ((mte_dead_node(next))) 5510 next = mas_slot_locked(mas, slots, 1); 5511 5512 mte_set_node_dead(mas->node); 5513 node->type = mte_node_type(mas->node); 5514 node->piv_parent = prev; 5515 node->parent_slot = offset; 5516 offset = 0; 5517 prev = mas->node; 5518 } while (!mte_is_leaf(next)); 5519 5520 return slots; 5521 } 5522 5523 static void mt_destroy_walk(struct maple_enode *enode, unsigned char ma_flags, 5524 bool free) 5525 { 5526 void __rcu **slots; 5527 struct maple_node *node = mte_to_node(enode); 5528 struct maple_enode *start; 5529 struct maple_tree mt; 5530 5531 MA_STATE(mas, &mt, 0, 0); 5532 5533 if (mte_is_leaf(enode)) 5534 goto free_leaf; 5535 5536 mt_init_flags(&mt, ma_flags); 5537 mas_lock(&mas); 5538 5539 mas.node = start = enode; 5540 slots = mas_destroy_descend(&mas, start, 0); 5541 node = mas_mn(&mas); 5542 do { 5543 enum maple_type type; 5544 unsigned char offset; 5545 struct maple_enode *parent, *tmp; 5546 5547 node->slot_len = mas_dead_leaves(&mas, slots); 5548 if (free) 5549 mt_free_bulk(node->slot_len, slots); 5550 offset = node->parent_slot + 1; 5551 mas.node = node->piv_parent; 5552 if (mas_mn(&mas) == node) 5553 goto start_slots_free; 5554 5555 type = mte_node_type(mas.node); 5556 slots = ma_slots(mte_to_node(mas.node), type); 5557 if (offset >= mt_slots[type]) 5558 goto next; 5559 5560 tmp = mas_slot_locked(&mas, slots, offset); 5561 if (mte_node_type(tmp) && mte_to_node(tmp)) { 5562 parent = mas.node; 5563 mas.node = tmp; 5564 slots = mas_destroy_descend(&mas, parent, offset); 5565 } 5566 next: 5567 node = mas_mn(&mas); 5568 } while (start != mas.node); 5569 5570 node = mas_mn(&mas); 5571 node->slot_len = mas_dead_leaves(&mas, slots); 5572 if (free) 5573 mt_free_bulk(node->slot_len, slots); 5574 5575 start_slots_free: 5576 mas_unlock(&mas); 5577 5578 free_leaf: 5579 if (free) 5580 mt_free_rcu(&node->rcu); 5581 } 5582 5583 /* 5584 * mte_destroy_walk() - Free a tree or sub-tree. 5585 * @enode: the encoded maple node (maple_enode) to start 5586 * @mt: the tree to free - needed for node types. 5587 * 5588 * Must hold the write lock. 5589 */ 5590 static inline void mte_destroy_walk(struct maple_enode *enode, 5591 struct maple_tree *mt) 5592 { 5593 struct maple_node *node = mte_to_node(enode); 5594 5595 if (mt_in_rcu(mt)) { 5596 mt_destroy_walk(enode, mt->ma_flags, false); 5597 call_rcu(&node->rcu, mt_free_walk); 5598 } else { 5599 mt_destroy_walk(enode, mt->ma_flags, true); 5600 } 5601 } 5602 5603 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5604 { 5605 if (unlikely(mas_is_paused(wr_mas->mas))) 5606 mas_reset(wr_mas->mas); 5607 5608 if (!mas_is_start(wr_mas->mas)) { 5609 if (mas_is_none(wr_mas->mas)) { 5610 mas_reset(wr_mas->mas); 5611 } else { 5612 wr_mas->r_max = wr_mas->mas->max; 5613 wr_mas->type = mte_node_type(wr_mas->mas->node); 5614 if (mas_is_span_wr(wr_mas)) 5615 mas_reset(wr_mas->mas); 5616 } 5617 } 5618 } 5619 5620 /* Interface */ 5621 5622 /** 5623 * mas_store() - Store an @entry. 5624 * @mas: The maple state. 5625 * @entry: The entry to store. 5626 * 5627 * The @mas->index and @mas->last is used to set the range for the @entry. 5628 * Note: The @mas should have pre-allocated entries to ensure there is memory to 5629 * store the entry. Please see mas_expected_entries()/mas_destroy() for more details. 5630 * 5631 * Return: the first entry between mas->index and mas->last or %NULL. 5632 */ 5633 void *mas_store(struct ma_state *mas, void *entry) 5634 { 5635 MA_WR_STATE(wr_mas, mas, entry); 5636 5637 trace_ma_write(__func__, mas, 0, entry); 5638 #ifdef CONFIG_DEBUG_MAPLE_TREE 5639 if (mas->index > mas->last) 5640 pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); 5641 MT_BUG_ON(mas->tree, mas->index > mas->last); 5642 if (mas->index > mas->last) { 5643 mas_set_err(mas, -EINVAL); 5644 return NULL; 5645 } 5646 5647 #endif 5648 5649 /* 5650 * Storing is the same operation as insert with the added caveat that it 5651 * can overwrite entries. Although this seems simple enough, one may 5652 * want to examine what happens if a single store operation was to 5653 * overwrite multiple entries within a self-balancing B-Tree. 5654 */ 5655 mas_wr_store_setup(&wr_mas); 5656 mas_wr_store_entry(&wr_mas); 5657 return wr_mas.content; 5658 } 5659 EXPORT_SYMBOL_GPL(mas_store); 5660 5661 /** 5662 * mas_store_gfp() - Store a value into the tree. 5663 * @mas: The maple state 5664 * @entry: The entry to store 5665 * @gfp: The GFP_FLAGS to use for allocations if necessary. 5666 * 5667 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 5668 * be allocated. 5669 */ 5670 int mas_store_gfp(struct ma_state *mas, void *entry, gfp_t gfp) 5671 { 5672 MA_WR_STATE(wr_mas, mas, entry); 5673 5674 mas_wr_store_setup(&wr_mas); 5675 trace_ma_write(__func__, mas, 0, entry); 5676 retry: 5677 mas_wr_store_entry(&wr_mas); 5678 if (unlikely(mas_nomem(mas, gfp))) 5679 goto retry; 5680 5681 if (unlikely(mas_is_err(mas))) 5682 return xa_err(mas->node); 5683 5684 return 0; 5685 } 5686 EXPORT_SYMBOL_GPL(mas_store_gfp); 5687 5688 /** 5689 * mas_store_prealloc() - Store a value into the tree using memory 5690 * preallocated in the maple state. 5691 * @mas: The maple state 5692 * @entry: The entry to store. 5693 */ 5694 void mas_store_prealloc(struct ma_state *mas, void *entry) 5695 { 5696 MA_WR_STATE(wr_mas, mas, entry); 5697 5698 mas_wr_store_setup(&wr_mas); 5699 trace_ma_write(__func__, mas, 0, entry); 5700 mas_wr_store_entry(&wr_mas); 5701 BUG_ON(mas_is_err(mas)); 5702 mas_destroy(mas); 5703 } 5704 EXPORT_SYMBOL_GPL(mas_store_prealloc); 5705 5706 /** 5707 * mas_preallocate() - Preallocate enough nodes for a store operation 5708 * @mas: The maple state 5709 * @gfp: The GFP_FLAGS to use for allocations. 5710 * 5711 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5712 */ 5713 int mas_preallocate(struct ma_state *mas, gfp_t gfp) 5714 { 5715 int ret; 5716 5717 mas_node_count_gfp(mas, 1 + mas_mt_height(mas) * 3, gfp); 5718 mas->mas_flags |= MA_STATE_PREALLOC; 5719 if (likely(!mas_is_err(mas))) 5720 return 0; 5721 5722 mas_set_alloc_req(mas, 0); 5723 ret = xa_err(mas->node); 5724 mas_reset(mas); 5725 mas_destroy(mas); 5726 mas_reset(mas); 5727 return ret; 5728 } 5729 5730 /* 5731 * mas_destroy() - destroy a maple state. 5732 * @mas: The maple state 5733 * 5734 * Upon completion, check the left-most node and rebalance against the node to 5735 * the right if necessary. Frees any allocated nodes associated with this maple 5736 * state. 5737 */ 5738 void mas_destroy(struct ma_state *mas) 5739 { 5740 struct maple_alloc *node; 5741 unsigned long total; 5742 5743 /* 5744 * When using mas_for_each() to insert an expected number of elements, 5745 * it is possible that the number inserted is less than the expected 5746 * number. To fix an invalid final node, a check is performed here to 5747 * rebalance the previous node with the final node. 5748 */ 5749 if (mas->mas_flags & MA_STATE_REBALANCE) { 5750 unsigned char end; 5751 5752 if (mas_is_start(mas)) 5753 mas_start(mas); 5754 5755 mtree_range_walk(mas); 5756 end = mas_data_end(mas) + 1; 5757 if (end < mt_min_slot_count(mas->node) - 1) 5758 mas_destroy_rebalance(mas, end); 5759 5760 mas->mas_flags &= ~MA_STATE_REBALANCE; 5761 } 5762 mas->mas_flags &= ~(MA_STATE_BULK|MA_STATE_PREALLOC); 5763 5764 total = mas_allocated(mas); 5765 while (total) { 5766 node = mas->alloc; 5767 mas->alloc = node->slot[0]; 5768 if (node->node_count > 1) { 5769 size_t count = node->node_count - 1; 5770 5771 mt_free_bulk(count, (void __rcu **)&node->slot[1]); 5772 total -= count; 5773 } 5774 kmem_cache_free(maple_node_cache, node); 5775 total--; 5776 } 5777 5778 mas->alloc = NULL; 5779 } 5780 EXPORT_SYMBOL_GPL(mas_destroy); 5781 5782 /* 5783 * mas_expected_entries() - Set the expected number of entries that will be inserted. 5784 * @mas: The maple state 5785 * @nr_entries: The number of expected entries. 5786 * 5787 * This will attempt to pre-allocate enough nodes to store the expected number 5788 * of entries. The allocations will occur using the bulk allocator interface 5789 * for speed. Please call mas_destroy() on the @mas after inserting the entries 5790 * to ensure any unused nodes are freed. 5791 * 5792 * Return: 0 on success, -ENOMEM if memory could not be allocated. 5793 */ 5794 int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) 5795 { 5796 int nonleaf_cap = MAPLE_ARANGE64_SLOTS - 2; 5797 struct maple_enode *enode = mas->node; 5798 int nr_nodes; 5799 int ret; 5800 5801 /* 5802 * Sometimes it is necessary to duplicate a tree to a new tree, such as 5803 * forking a process and duplicating the VMAs from one tree to a new 5804 * tree. When such a situation arises, it is known that the new tree is 5805 * not going to be used until the entire tree is populated. For 5806 * performance reasons, it is best to use a bulk load with RCU disabled. 5807 * This allows for optimistic splitting that favours the left and reuse 5808 * of nodes during the operation. 5809 */ 5810 5811 /* Optimize splitting for bulk insert in-order */ 5812 mas->mas_flags |= MA_STATE_BULK; 5813 5814 /* 5815 * Avoid overflow, assume a gap between each entry and a trailing null. 5816 * If this is wrong, it just means allocation can happen during 5817 * insertion of entries. 5818 */ 5819 nr_nodes = max(nr_entries, nr_entries * 2 + 1); 5820 if (!mt_is_alloc(mas->tree)) 5821 nonleaf_cap = MAPLE_RANGE64_SLOTS - 2; 5822 5823 /* Leaves; reduce slots to keep space for expansion */ 5824 nr_nodes = DIV_ROUND_UP(nr_nodes, MAPLE_RANGE64_SLOTS - 2); 5825 /* Internal nodes */ 5826 nr_nodes += DIV_ROUND_UP(nr_nodes, nonleaf_cap); 5827 /* Add working room for split (2 nodes) + new parents */ 5828 mas_node_count(mas, nr_nodes + 3); 5829 5830 /* Detect if allocations run out */ 5831 mas->mas_flags |= MA_STATE_PREALLOC; 5832 5833 if (!mas_is_err(mas)) 5834 return 0; 5835 5836 ret = xa_err(mas->node); 5837 mas->node = enode; 5838 mas_destroy(mas); 5839 return ret; 5840 5841 } 5842 EXPORT_SYMBOL_GPL(mas_expected_entries); 5843 5844 /** 5845 * mas_next() - Get the next entry. 5846 * @mas: The maple state 5847 * @max: The maximum index to check. 5848 * 5849 * Returns the next entry after @mas->index. 5850 * Must hold rcu_read_lock or the write lock. 5851 * Can return the zero entry. 5852 * 5853 * Return: The next entry or %NULL 5854 */ 5855 void *mas_next(struct ma_state *mas, unsigned long max) 5856 { 5857 if (mas_is_none(mas) || mas_is_paused(mas)) 5858 mas->node = MAS_START; 5859 5860 if (mas_is_start(mas)) 5861 mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ 5862 5863 if (mas_is_ptr(mas)) { 5864 if (!mas->index) { 5865 mas->index = 1; 5866 mas->last = ULONG_MAX; 5867 } 5868 return NULL; 5869 } 5870 5871 if (mas->last == ULONG_MAX) 5872 return NULL; 5873 5874 /* Retries on dead nodes handled by mas_next_entry */ 5875 return mas_next_entry(mas, max); 5876 } 5877 EXPORT_SYMBOL_GPL(mas_next); 5878 5879 /** 5880 * mt_next() - get the next value in the maple tree 5881 * @mt: The maple tree 5882 * @index: The start index 5883 * @max: The maximum index to check 5884 * 5885 * Return: The entry at @index or higher, or %NULL if nothing is found. 5886 */ 5887 void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) 5888 { 5889 void *entry = NULL; 5890 MA_STATE(mas, mt, index, index); 5891 5892 rcu_read_lock(); 5893 entry = mas_next(&mas, max); 5894 rcu_read_unlock(); 5895 return entry; 5896 } 5897 EXPORT_SYMBOL_GPL(mt_next); 5898 5899 /** 5900 * mas_prev() - Get the previous entry 5901 * @mas: The maple state 5902 * @min: The minimum value to check. 5903 * 5904 * Must hold rcu_read_lock or the write lock. 5905 * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not 5906 * searchable nodes. 5907 * 5908 * Return: the previous value or %NULL. 5909 */ 5910 void *mas_prev(struct ma_state *mas, unsigned long min) 5911 { 5912 if (!mas->index) { 5913 /* Nothing comes before 0 */ 5914 mas->last = 0; 5915 mas->node = MAS_NONE; 5916 return NULL; 5917 } 5918 5919 if (unlikely(mas_is_ptr(mas))) 5920 return NULL; 5921 5922 if (mas_is_none(mas) || mas_is_paused(mas)) 5923 mas->node = MAS_START; 5924 5925 if (mas_is_start(mas)) { 5926 mas_walk(mas); 5927 if (!mas->index) 5928 return NULL; 5929 } 5930 5931 if (mas_is_ptr(mas)) { 5932 if (!mas->index) { 5933 mas->last = 0; 5934 return NULL; 5935 } 5936 5937 mas->index = mas->last = 0; 5938 return mas_root_locked(mas); 5939 } 5940 return mas_prev_entry(mas, min); 5941 } 5942 EXPORT_SYMBOL_GPL(mas_prev); 5943 5944 /** 5945 * mt_prev() - get the previous value in the maple tree 5946 * @mt: The maple tree 5947 * @index: The start index 5948 * @min: The minimum index to check 5949 * 5950 * Return: The entry at @index or lower, or %NULL if nothing is found. 5951 */ 5952 void *mt_prev(struct maple_tree *mt, unsigned long index, unsigned long min) 5953 { 5954 void *entry = NULL; 5955 MA_STATE(mas, mt, index, index); 5956 5957 rcu_read_lock(); 5958 entry = mas_prev(&mas, min); 5959 rcu_read_unlock(); 5960 return entry; 5961 } 5962 EXPORT_SYMBOL_GPL(mt_prev); 5963 5964 /** 5965 * mas_pause() - Pause a mas_find/mas_for_each to drop the lock. 5966 * @mas: The maple state to pause 5967 * 5968 * Some users need to pause a walk and drop the lock they're holding in 5969 * order to yield to a higher priority thread or carry out an operation 5970 * on an entry. Those users should call this function before they drop 5971 * the lock. It resets the @mas to be suitable for the next iteration 5972 * of the loop after the user has reacquired the lock. If most entries 5973 * found during a walk require you to call mas_pause(), the mt_for_each() 5974 * iterator may be more appropriate. 5975 * 5976 */ 5977 void mas_pause(struct ma_state *mas) 5978 { 5979 mas->node = MAS_PAUSE; 5980 } 5981 EXPORT_SYMBOL_GPL(mas_pause); 5982 5983 /** 5984 * mas_find() - On the first call, find the entry at or after mas->index up to 5985 * %max. Otherwise, find the entry after mas->index. 5986 * @mas: The maple state 5987 * @max: The maximum value to check. 5988 * 5989 * Must hold rcu_read_lock or the write lock. 5990 * If an entry exists, last and index are updated accordingly. 5991 * May set @mas->node to MAS_NONE. 5992 * 5993 * Return: The entry or %NULL. 5994 */ 5995 void *mas_find(struct ma_state *mas, unsigned long max) 5996 { 5997 if (unlikely(mas_is_paused(mas))) { 5998 if (unlikely(mas->last == ULONG_MAX)) { 5999 mas->node = MAS_NONE; 6000 return NULL; 6001 } 6002 mas->node = MAS_START; 6003 mas->index = ++mas->last; 6004 } 6005 6006 if (unlikely(mas_is_none(mas))) 6007 mas->node = MAS_START; 6008 6009 if (unlikely(mas_is_start(mas))) { 6010 /* First run or continue */ 6011 void *entry; 6012 6013 if (mas->index > max) 6014 return NULL; 6015 6016 entry = mas_walk(mas); 6017 if (entry) 6018 return entry; 6019 } 6020 6021 if (unlikely(!mas_searchable(mas))) 6022 return NULL; 6023 6024 /* Retries on dead nodes handled by mas_next_entry */ 6025 return mas_next_entry(mas, max); 6026 } 6027 EXPORT_SYMBOL_GPL(mas_find); 6028 6029 /** 6030 * mas_find_rev: On the first call, find the first non-null entry at or below 6031 * mas->index down to %min. Otherwise find the first non-null entry below 6032 * mas->index down to %min. 6033 * @mas: The maple state 6034 * @min: The minimum value to check. 6035 * 6036 * Must hold rcu_read_lock or the write lock. 6037 * If an entry exists, last and index are updated accordingly. 6038 * May set @mas->node to MAS_NONE. 6039 * 6040 * Return: The entry or %NULL. 6041 */ 6042 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6043 { 6044 if (unlikely(mas_is_paused(mas))) { 6045 if (unlikely(mas->last == ULONG_MAX)) { 6046 mas->node = MAS_NONE; 6047 return NULL; 6048 } 6049 mas->node = MAS_START; 6050 mas->last = --mas->index; 6051 } 6052 6053 if (unlikely(mas_is_start(mas))) { 6054 /* First run or continue */ 6055 void *entry; 6056 6057 if (mas->index < min) 6058 return NULL; 6059 6060 entry = mas_walk(mas); 6061 if (entry) 6062 return entry; 6063 } 6064 6065 if (unlikely(!mas_searchable(mas))) 6066 return NULL; 6067 6068 if (mas->index < min) 6069 return NULL; 6070 6071 /* Retries on dead nodes handled by mas_prev_entry */ 6072 return mas_prev_entry(mas, min); 6073 } 6074 EXPORT_SYMBOL_GPL(mas_find_rev); 6075 6076 /** 6077 * mas_erase() - Find the range in which index resides and erase the entire 6078 * range. 6079 * @mas: The maple state 6080 * 6081 * Must hold the write lock. 6082 * Searches for @mas->index, sets @mas->index and @mas->last to the range and 6083 * erases that range. 6084 * 6085 * Return: the entry that was erased or %NULL, @mas->index and @mas->last are updated. 6086 */ 6087 void *mas_erase(struct ma_state *mas) 6088 { 6089 void *entry; 6090 MA_WR_STATE(wr_mas, mas, NULL); 6091 6092 if (mas_is_none(mas) || mas_is_paused(mas)) 6093 mas->node = MAS_START; 6094 6095 /* Retry unnecessary when holding the write lock. */ 6096 entry = mas_state_walk(mas); 6097 if (!entry) 6098 return NULL; 6099 6100 write_retry: 6101 /* Must reset to ensure spanning writes of last slot are detected */ 6102 mas_reset(mas); 6103 mas_wr_store_setup(&wr_mas); 6104 mas_wr_store_entry(&wr_mas); 6105 if (mas_nomem(mas, GFP_KERNEL)) 6106 goto write_retry; 6107 6108 return entry; 6109 } 6110 EXPORT_SYMBOL_GPL(mas_erase); 6111 6112 /** 6113 * mas_nomem() - Check if there was an error allocating and do the allocation 6114 * if necessary If there are allocations, then free them. 6115 * @mas: The maple state 6116 * @gfp: The GFP_FLAGS to use for allocations 6117 * Return: true on allocation, false otherwise. 6118 */ 6119 bool mas_nomem(struct ma_state *mas, gfp_t gfp) 6120 __must_hold(mas->tree->lock) 6121 { 6122 if (likely(mas->node != MA_ERROR(-ENOMEM))) { 6123 mas_destroy(mas); 6124 return false; 6125 } 6126 6127 if (gfpflags_allow_blocking(gfp) && !mt_external_lock(mas->tree)) { 6128 mtree_unlock(mas->tree); 6129 mas_alloc_nodes(mas, gfp); 6130 mtree_lock(mas->tree); 6131 } else { 6132 mas_alloc_nodes(mas, gfp); 6133 } 6134 6135 if (!mas_allocated(mas)) 6136 return false; 6137 6138 mas->node = MAS_START; 6139 return true; 6140 } 6141 6142 void __init maple_tree_init(void) 6143 { 6144 maple_node_cache = kmem_cache_create("maple_node", 6145 sizeof(struct maple_node), sizeof(struct maple_node), 6146 SLAB_PANIC, NULL); 6147 } 6148 6149 /** 6150 * mtree_load() - Load a value stored in a maple tree 6151 * @mt: The maple tree 6152 * @index: The index to load 6153 * 6154 * Return: the entry or %NULL 6155 */ 6156 void *mtree_load(struct maple_tree *mt, unsigned long index) 6157 { 6158 MA_STATE(mas, mt, index, index); 6159 void *entry; 6160 6161 trace_ma_read(__func__, &mas); 6162 rcu_read_lock(); 6163 retry: 6164 entry = mas_start(&mas); 6165 if (unlikely(mas_is_none(&mas))) 6166 goto unlock; 6167 6168 if (unlikely(mas_is_ptr(&mas))) { 6169 if (index) 6170 entry = NULL; 6171 6172 goto unlock; 6173 } 6174 6175 entry = mtree_lookup_walk(&mas); 6176 if (!entry && unlikely(mas_is_start(&mas))) 6177 goto retry; 6178 unlock: 6179 rcu_read_unlock(); 6180 if (xa_is_zero(entry)) 6181 return NULL; 6182 6183 return entry; 6184 } 6185 EXPORT_SYMBOL(mtree_load); 6186 6187 /** 6188 * mtree_store_range() - Store an entry at a given range. 6189 * @mt: The maple tree 6190 * @index: The start of the range 6191 * @last: The end of the range 6192 * @entry: The entry to store 6193 * @gfp: The GFP_FLAGS to use for allocations 6194 * 6195 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6196 * be allocated. 6197 */ 6198 int mtree_store_range(struct maple_tree *mt, unsigned long index, 6199 unsigned long last, void *entry, gfp_t gfp) 6200 { 6201 MA_STATE(mas, mt, index, last); 6202 MA_WR_STATE(wr_mas, &mas, entry); 6203 6204 trace_ma_write(__func__, &mas, 0, entry); 6205 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6206 return -EINVAL; 6207 6208 if (index > last) 6209 return -EINVAL; 6210 6211 mtree_lock(mt); 6212 retry: 6213 mas_wr_store_entry(&wr_mas); 6214 if (mas_nomem(&mas, gfp)) 6215 goto retry; 6216 6217 mtree_unlock(mt); 6218 if (mas_is_err(&mas)) 6219 return xa_err(mas.node); 6220 6221 return 0; 6222 } 6223 EXPORT_SYMBOL(mtree_store_range); 6224 6225 /** 6226 * mtree_store() - Store an entry at a given index. 6227 * @mt: The maple tree 6228 * @index: The index to store the value 6229 * @entry: The entry to store 6230 * @gfp: The GFP_FLAGS to use for allocations 6231 * 6232 * Return: 0 on success, -EINVAL on invalid request, -ENOMEM if memory could not 6233 * be allocated. 6234 */ 6235 int mtree_store(struct maple_tree *mt, unsigned long index, void *entry, 6236 gfp_t gfp) 6237 { 6238 return mtree_store_range(mt, index, index, entry, gfp); 6239 } 6240 EXPORT_SYMBOL(mtree_store); 6241 6242 /** 6243 * mtree_insert_range() - Insert an entry at a give range if there is no value. 6244 * @mt: The maple tree 6245 * @first: The start of the range 6246 * @last: The end of the range 6247 * @entry: The entry to store 6248 * @gfp: The GFP_FLAGS to use for allocations. 6249 * 6250 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6251 * request, -ENOMEM if memory could not be allocated. 6252 */ 6253 int mtree_insert_range(struct maple_tree *mt, unsigned long first, 6254 unsigned long last, void *entry, gfp_t gfp) 6255 { 6256 MA_STATE(ms, mt, first, last); 6257 6258 if (WARN_ON_ONCE(xa_is_advanced(entry))) 6259 return -EINVAL; 6260 6261 if (first > last) 6262 return -EINVAL; 6263 6264 mtree_lock(mt); 6265 retry: 6266 mas_insert(&ms, entry); 6267 if (mas_nomem(&ms, gfp)) 6268 goto retry; 6269 6270 mtree_unlock(mt); 6271 if (mas_is_err(&ms)) 6272 return xa_err(ms.node); 6273 6274 return 0; 6275 } 6276 EXPORT_SYMBOL(mtree_insert_range); 6277 6278 /** 6279 * mtree_insert() - Insert an entry at a give index if there is no value. 6280 * @mt: The maple tree 6281 * @index : The index to store the value 6282 * @entry: The entry to store 6283 * @gfp: The FGP_FLAGS to use for allocations. 6284 * 6285 * Return: 0 on success, -EEXISTS if the range is occupied, -EINVAL on invalid 6286 * request, -ENOMEM if memory could not be allocated. 6287 */ 6288 int mtree_insert(struct maple_tree *mt, unsigned long index, void *entry, 6289 gfp_t gfp) 6290 { 6291 return mtree_insert_range(mt, index, index, entry, gfp); 6292 } 6293 EXPORT_SYMBOL(mtree_insert); 6294 6295 int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, 6296 void *entry, unsigned long size, unsigned long min, 6297 unsigned long max, gfp_t gfp) 6298 { 6299 int ret = 0; 6300 6301 MA_STATE(mas, mt, min, max - size); 6302 if (!mt_is_alloc(mt)) 6303 return -EINVAL; 6304 6305 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6306 return -EINVAL; 6307 6308 if (min > max) 6309 return -EINVAL; 6310 6311 if (max < size) 6312 return -EINVAL; 6313 6314 if (!size) 6315 return -EINVAL; 6316 6317 mtree_lock(mt); 6318 retry: 6319 mas.offset = 0; 6320 mas.index = min; 6321 mas.last = max - size; 6322 ret = mas_alloc(&mas, entry, size, startp); 6323 if (mas_nomem(&mas, gfp)) 6324 goto retry; 6325 6326 mtree_unlock(mt); 6327 return ret; 6328 } 6329 EXPORT_SYMBOL(mtree_alloc_range); 6330 6331 int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, 6332 void *entry, unsigned long size, unsigned long min, 6333 unsigned long max, gfp_t gfp) 6334 { 6335 int ret = 0; 6336 6337 MA_STATE(mas, mt, min, max - size); 6338 if (!mt_is_alloc(mt)) 6339 return -EINVAL; 6340 6341 if (WARN_ON_ONCE(mt_is_reserved(entry))) 6342 return -EINVAL; 6343 6344 if (min >= max) 6345 return -EINVAL; 6346 6347 if (max < size - 1) 6348 return -EINVAL; 6349 6350 if (!size) 6351 return -EINVAL; 6352 6353 mtree_lock(mt); 6354 retry: 6355 ret = mas_rev_alloc(&mas, min, max, entry, size, startp); 6356 if (mas_nomem(&mas, gfp)) 6357 goto retry; 6358 6359 mtree_unlock(mt); 6360 return ret; 6361 } 6362 EXPORT_SYMBOL(mtree_alloc_rrange); 6363 6364 /** 6365 * mtree_erase() - Find an index and erase the entire range. 6366 * @mt: The maple tree 6367 * @index: The index to erase 6368 * 6369 * Erasing is the same as a walk to an entry then a store of a NULL to that 6370 * ENTIRE range. In fact, it is implemented as such using the advanced API. 6371 * 6372 * Return: The entry stored at the @index or %NULL 6373 */ 6374 void *mtree_erase(struct maple_tree *mt, unsigned long index) 6375 { 6376 void *entry = NULL; 6377 6378 MA_STATE(mas, mt, index, index); 6379 trace_ma_op(__func__, &mas); 6380 6381 mtree_lock(mt); 6382 entry = mas_erase(&mas); 6383 mtree_unlock(mt); 6384 6385 return entry; 6386 } 6387 EXPORT_SYMBOL(mtree_erase); 6388 6389 /** 6390 * __mt_destroy() - Walk and free all nodes of a locked maple tree. 6391 * @mt: The maple tree 6392 * 6393 * Note: Does not handle locking. 6394 */ 6395 void __mt_destroy(struct maple_tree *mt) 6396 { 6397 void *root = mt_root_locked(mt); 6398 6399 rcu_assign_pointer(mt->ma_root, NULL); 6400 if (xa_is_node(root)) 6401 mte_destroy_walk(root, mt); 6402 6403 mt->ma_flags = 0; 6404 } 6405 EXPORT_SYMBOL_GPL(__mt_destroy); 6406 6407 /** 6408 * mtree_destroy() - Destroy a maple tree 6409 * @mt: The maple tree 6410 * 6411 * Frees all resources used by the tree. Handles locking. 6412 */ 6413 void mtree_destroy(struct maple_tree *mt) 6414 { 6415 mtree_lock(mt); 6416 __mt_destroy(mt); 6417 mtree_unlock(mt); 6418 } 6419 EXPORT_SYMBOL(mtree_destroy); 6420 6421 /** 6422 * mt_find() - Search from the start up until an entry is found. 6423 * @mt: The maple tree 6424 * @index: Pointer which contains the start location of the search 6425 * @max: The maximum value to check 6426 * 6427 * Handles locking. @index will be incremented to one beyond the range. 6428 * 6429 * Return: The entry at or after the @index or %NULL 6430 */ 6431 void *mt_find(struct maple_tree *mt, unsigned long *index, unsigned long max) 6432 { 6433 MA_STATE(mas, mt, *index, *index); 6434 void *entry; 6435 #ifdef CONFIG_DEBUG_MAPLE_TREE 6436 unsigned long copy = *index; 6437 #endif 6438 6439 trace_ma_read(__func__, &mas); 6440 6441 if ((*index) > max) 6442 return NULL; 6443 6444 rcu_read_lock(); 6445 retry: 6446 entry = mas_state_walk(&mas); 6447 if (mas_is_start(&mas)) 6448 goto retry; 6449 6450 if (unlikely(xa_is_zero(entry))) 6451 entry = NULL; 6452 6453 if (entry) 6454 goto unlock; 6455 6456 while (mas_searchable(&mas) && (mas.index < max)) { 6457 entry = mas_next_entry(&mas, max); 6458 if (likely(entry && !xa_is_zero(entry))) 6459 break; 6460 } 6461 6462 if (unlikely(xa_is_zero(entry))) 6463 entry = NULL; 6464 unlock: 6465 rcu_read_unlock(); 6466 if (likely(entry)) { 6467 *index = mas.last + 1; 6468 #ifdef CONFIG_DEBUG_MAPLE_TREE 6469 if ((*index) && (*index) <= copy) 6470 pr_err("index not increased! %lx <= %lx\n", 6471 *index, copy); 6472 MT_BUG_ON(mt, (*index) && ((*index) <= copy)); 6473 #endif 6474 } 6475 6476 return entry; 6477 } 6478 EXPORT_SYMBOL(mt_find); 6479 6480 /** 6481 * mt_find_after() - Search from the start up until an entry is found. 6482 * @mt: The maple tree 6483 * @index: Pointer which contains the start location of the search 6484 * @max: The maximum value to check 6485 * 6486 * Handles locking, detects wrapping on index == 0 6487 * 6488 * Return: The entry at or after the @index or %NULL 6489 */ 6490 void *mt_find_after(struct maple_tree *mt, unsigned long *index, 6491 unsigned long max) 6492 { 6493 if (!(*index)) 6494 return NULL; 6495 6496 return mt_find(mt, index, max); 6497 } 6498 EXPORT_SYMBOL(mt_find_after); 6499 6500 #ifdef CONFIG_DEBUG_MAPLE_TREE 6501 atomic_t maple_tree_tests_run; 6502 EXPORT_SYMBOL_GPL(maple_tree_tests_run); 6503 atomic_t maple_tree_tests_passed; 6504 EXPORT_SYMBOL_GPL(maple_tree_tests_passed); 6505 6506 #ifndef __KERNEL__ 6507 extern void kmem_cache_set_non_kernel(struct kmem_cache *, unsigned int); 6508 void mt_set_non_kernel(unsigned int val) 6509 { 6510 kmem_cache_set_non_kernel(maple_node_cache, val); 6511 } 6512 6513 extern unsigned long kmem_cache_get_alloc(struct kmem_cache *); 6514 unsigned long mt_get_alloc_size(void) 6515 { 6516 return kmem_cache_get_alloc(maple_node_cache); 6517 } 6518 6519 extern void kmem_cache_zero_nr_tallocated(struct kmem_cache *); 6520 void mt_zero_nr_tallocated(void) 6521 { 6522 kmem_cache_zero_nr_tallocated(maple_node_cache); 6523 } 6524 6525 extern unsigned int kmem_cache_nr_tallocated(struct kmem_cache *); 6526 unsigned int mt_nr_tallocated(void) 6527 { 6528 return kmem_cache_nr_tallocated(maple_node_cache); 6529 } 6530 6531 extern unsigned int kmem_cache_nr_allocated(struct kmem_cache *); 6532 unsigned int mt_nr_allocated(void) 6533 { 6534 return kmem_cache_nr_allocated(maple_node_cache); 6535 } 6536 6537 /* 6538 * mas_dead_node() - Check if the maple state is pointing to a dead node. 6539 * @mas: The maple state 6540 * @index: The index to restore in @mas. 6541 * 6542 * Used in test code. 6543 * Return: 1 if @mas has been reset to MAS_START, 0 otherwise. 6544 */ 6545 static inline int mas_dead_node(struct ma_state *mas, unsigned long index) 6546 { 6547 if (unlikely(!mas_searchable(mas) || mas_is_start(mas))) 6548 return 0; 6549 6550 if (likely(!mte_dead_node(mas->node))) 6551 return 0; 6552 6553 mas_rewalk(mas, index); 6554 return 1; 6555 } 6556 6557 void mt_cache_shrink(void) 6558 { 6559 } 6560 #else 6561 /* 6562 * mt_cache_shrink() - For testing, don't use this. 6563 * 6564 * Certain testcases can trigger an OOM when combined with other memory 6565 * debugging configuration options. This function is used to reduce the 6566 * possibility of an out of memory even due to kmem_cache objects remaining 6567 * around for longer than usual. 6568 */ 6569 void mt_cache_shrink(void) 6570 { 6571 kmem_cache_shrink(maple_node_cache); 6572 6573 } 6574 EXPORT_SYMBOL_GPL(mt_cache_shrink); 6575 6576 #endif /* not defined __KERNEL__ */ 6577 /* 6578 * mas_get_slot() - Get the entry in the maple state node stored at @offset. 6579 * @mas: The maple state 6580 * @offset: The offset into the slot array to fetch. 6581 * 6582 * Return: The entry stored at @offset. 6583 */ 6584 static inline struct maple_enode *mas_get_slot(struct ma_state *mas, 6585 unsigned char offset) 6586 { 6587 return mas_slot(mas, ma_slots(mas_mn(mas), mte_node_type(mas->node)), 6588 offset); 6589 } 6590 6591 6592 /* 6593 * mas_first_entry() - Go the first leaf and find the first entry. 6594 * @mas: the maple state. 6595 * @limit: the maximum index to check. 6596 * @*r_start: Pointer to set to the range start. 6597 * 6598 * Sets mas->offset to the offset of the entry, r_start to the range minimum. 6599 * 6600 * Return: The first entry or MAS_NONE. 6601 */ 6602 static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, 6603 unsigned long limit, enum maple_type mt) 6604 6605 { 6606 unsigned long max; 6607 unsigned long *pivots; 6608 void __rcu **slots; 6609 void *entry = NULL; 6610 6611 mas->index = mas->min; 6612 if (mas->index > limit) 6613 goto none; 6614 6615 max = mas->max; 6616 mas->offset = 0; 6617 while (likely(!ma_is_leaf(mt))) { 6618 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6619 slots = ma_slots(mn, mt); 6620 pivots = ma_pivots(mn, mt); 6621 max = pivots[0]; 6622 entry = mas_slot(mas, slots, 0); 6623 if (unlikely(ma_dead_node(mn))) 6624 return NULL; 6625 mas->node = entry; 6626 mn = mas_mn(mas); 6627 mt = mte_node_type(mas->node); 6628 } 6629 MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); 6630 6631 mas->max = max; 6632 slots = ma_slots(mn, mt); 6633 entry = mas_slot(mas, slots, 0); 6634 if (unlikely(ma_dead_node(mn))) 6635 return NULL; 6636 6637 /* Slot 0 or 1 must be set */ 6638 if (mas->index > limit) 6639 goto none; 6640 6641 if (likely(entry)) 6642 return entry; 6643 6644 pivots = ma_pivots(mn, mt); 6645 mas->index = pivots[0] + 1; 6646 mas->offset = 1; 6647 entry = mas_slot(mas, slots, 1); 6648 if (unlikely(ma_dead_node(mn))) 6649 return NULL; 6650 6651 if (mas->index > limit) 6652 goto none; 6653 6654 if (likely(entry)) 6655 return entry; 6656 6657 none: 6658 if (likely(!ma_dead_node(mn))) 6659 mas->node = MAS_NONE; 6660 return NULL; 6661 } 6662 6663 /* Depth first search, post-order */ 6664 static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) 6665 { 6666 6667 struct maple_enode *p = MAS_NONE, *mn = mas->node; 6668 unsigned long p_min, p_max; 6669 6670 mas_next_node(mas, mas_mn(mas), max); 6671 if (!mas_is_none(mas)) 6672 return; 6673 6674 if (mte_is_root(mn)) 6675 return; 6676 6677 mas->node = mn; 6678 mas_ascend(mas); 6679 while (mas->node != MAS_NONE) { 6680 p = mas->node; 6681 p_min = mas->min; 6682 p_max = mas->max; 6683 mas_prev_node(mas, 0); 6684 } 6685 6686 if (p == MAS_NONE) 6687 return; 6688 6689 mas->node = p; 6690 mas->max = p_max; 6691 mas->min = p_min; 6692 } 6693 6694 /* Tree validations */ 6695 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6696 unsigned long min, unsigned long max, unsigned int depth); 6697 static void mt_dump_range(unsigned long min, unsigned long max, 6698 unsigned int depth) 6699 { 6700 static const char spaces[] = " "; 6701 6702 if (min == max) 6703 pr_info("%.*s%lu: ", depth * 2, spaces, min); 6704 else 6705 pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); 6706 } 6707 6708 static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, 6709 unsigned int depth) 6710 { 6711 mt_dump_range(min, max, depth); 6712 6713 if (xa_is_value(entry)) 6714 pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), 6715 xa_to_value(entry), entry); 6716 else if (xa_is_zero(entry)) 6717 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 6718 else if (mt_is_reserved(entry)) 6719 pr_cont("UNKNOWN ENTRY (%p)\n", entry); 6720 else 6721 pr_cont("%p\n", entry); 6722 } 6723 6724 static void mt_dump_range64(const struct maple_tree *mt, void *entry, 6725 unsigned long min, unsigned long max, unsigned int depth) 6726 { 6727 struct maple_range_64 *node = &mte_to_node(entry)->mr64; 6728 bool leaf = mte_is_leaf(entry); 6729 unsigned long first = min; 6730 int i; 6731 6732 pr_cont(" contents: "); 6733 for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) 6734 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6735 pr_cont("%p\n", node->slot[i]); 6736 for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { 6737 unsigned long last = max; 6738 6739 if (i < (MAPLE_RANGE64_SLOTS - 1)) 6740 last = node->pivot[i]; 6741 else if (!node->slot[i] && max != mt_node_max(entry)) 6742 break; 6743 if (last == 0 && i > 0) 6744 break; 6745 if (leaf) 6746 mt_dump_entry(mt_slot(mt, node->slot, i), 6747 first, last, depth + 1); 6748 else if (node->slot[i]) 6749 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6750 first, last, depth + 1); 6751 6752 if (last == max) 6753 break; 6754 if (last > max) { 6755 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6756 node, last, max, i); 6757 break; 6758 } 6759 first = last + 1; 6760 } 6761 } 6762 6763 static void mt_dump_arange64(const struct maple_tree *mt, void *entry, 6764 unsigned long min, unsigned long max, unsigned int depth) 6765 { 6766 struct maple_arange_64 *node = &mte_to_node(entry)->ma64; 6767 bool leaf = mte_is_leaf(entry); 6768 unsigned long first = min; 6769 int i; 6770 6771 pr_cont(" contents: "); 6772 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) 6773 pr_cont("%lu ", node->gap[i]); 6774 pr_cont("| %02X %02X| ", node->meta.end, node->meta.gap); 6775 for (i = 0; i < MAPLE_ARANGE64_SLOTS - 1; i++) 6776 pr_cont("%p %lu ", node->slot[i], node->pivot[i]); 6777 pr_cont("%p\n", node->slot[i]); 6778 for (i = 0; i < MAPLE_ARANGE64_SLOTS; i++) { 6779 unsigned long last = max; 6780 6781 if (i < (MAPLE_ARANGE64_SLOTS - 1)) 6782 last = node->pivot[i]; 6783 else if (!node->slot[i]) 6784 break; 6785 if (last == 0 && i > 0) 6786 break; 6787 if (leaf) 6788 mt_dump_entry(mt_slot(mt, node->slot, i), 6789 first, last, depth + 1); 6790 else if (node->slot[i]) 6791 mt_dump_node(mt, mt_slot(mt, node->slot, i), 6792 first, last, depth + 1); 6793 6794 if (last == max) 6795 break; 6796 if (last > max) { 6797 pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", 6798 node, last, max, i); 6799 break; 6800 } 6801 first = last + 1; 6802 } 6803 } 6804 6805 static void mt_dump_node(const struct maple_tree *mt, void *entry, 6806 unsigned long min, unsigned long max, unsigned int depth) 6807 { 6808 struct maple_node *node = mte_to_node(entry); 6809 unsigned int type = mte_node_type(entry); 6810 unsigned int i; 6811 6812 mt_dump_range(min, max, depth); 6813 6814 pr_cont("node %p depth %d type %d parent %p", node, depth, type, 6815 node ? node->parent : NULL); 6816 switch (type) { 6817 case maple_dense: 6818 pr_cont("\n"); 6819 for (i = 0; i < MAPLE_NODE_SLOTS; i++) { 6820 if (min + i > max) 6821 pr_cont("OUT OF RANGE: "); 6822 mt_dump_entry(mt_slot(mt, node->slot, i), 6823 min + i, min + i, depth); 6824 } 6825 break; 6826 case maple_leaf_64: 6827 case maple_range_64: 6828 mt_dump_range64(mt, entry, min, max, depth); 6829 break; 6830 case maple_arange_64: 6831 mt_dump_arange64(mt, entry, min, max, depth); 6832 break; 6833 6834 default: 6835 pr_cont(" UNKNOWN TYPE\n"); 6836 } 6837 } 6838 6839 void mt_dump(const struct maple_tree *mt) 6840 { 6841 void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); 6842 6843 pr_info("maple_tree(%p) flags %X, height %u root %p\n", 6844 mt, mt->ma_flags, mt_height(mt), entry); 6845 if (!xa_is_node(entry)) 6846 mt_dump_entry(entry, 0, 0, 0); 6847 else if (entry) 6848 mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); 6849 } 6850 EXPORT_SYMBOL_GPL(mt_dump); 6851 6852 /* 6853 * Calculate the maximum gap in a node and check if that's what is reported in 6854 * the parent (unless root). 6855 */ 6856 static void mas_validate_gaps(struct ma_state *mas) 6857 { 6858 struct maple_enode *mte = mas->node; 6859 struct maple_node *p_mn; 6860 unsigned long gap = 0, max_gap = 0; 6861 unsigned long p_end, p_start = mas->min; 6862 unsigned char p_slot; 6863 unsigned long *gaps = NULL; 6864 unsigned long *pivots = ma_pivots(mte_to_node(mte), mte_node_type(mte)); 6865 int i; 6866 6867 if (ma_is_dense(mte_node_type(mte))) { 6868 for (i = 0; i < mt_slot_count(mte); i++) { 6869 if (mas_get_slot(mas, i)) { 6870 if (gap > max_gap) 6871 max_gap = gap; 6872 gap = 0; 6873 continue; 6874 } 6875 gap++; 6876 } 6877 goto counted; 6878 } 6879 6880 gaps = ma_gaps(mte_to_node(mte), mte_node_type(mte)); 6881 for (i = 0; i < mt_slot_count(mte); i++) { 6882 p_end = mas_logical_pivot(mas, pivots, i, mte_node_type(mte)); 6883 6884 if (!gaps) { 6885 if (mas_get_slot(mas, i)) { 6886 gap = 0; 6887 goto not_empty; 6888 } 6889 6890 gap += p_end - p_start + 1; 6891 } else { 6892 void *entry = mas_get_slot(mas, i); 6893 6894 gap = gaps[i]; 6895 if (!entry) { 6896 if (gap != p_end - p_start + 1) { 6897 pr_err("%p[%u] -> %p %lu != %lu - %lu + 1\n", 6898 mas_mn(mas), i, 6899 mas_get_slot(mas, i), gap, 6900 p_end, p_start); 6901 mt_dump(mas->tree); 6902 6903 MT_BUG_ON(mas->tree, 6904 gap != p_end - p_start + 1); 6905 } 6906 } else { 6907 if (gap > p_end - p_start + 1) { 6908 pr_err("%p[%u] %lu >= %lu - %lu + 1 (%lu)\n", 6909 mas_mn(mas), i, gap, p_end, p_start, 6910 p_end - p_start + 1); 6911 MT_BUG_ON(mas->tree, 6912 gap > p_end - p_start + 1); 6913 } 6914 } 6915 } 6916 6917 if (gap > max_gap) 6918 max_gap = gap; 6919 not_empty: 6920 p_start = p_end + 1; 6921 if (p_end >= mas->max) 6922 break; 6923 } 6924 6925 counted: 6926 if (mte_is_root(mte)) 6927 return; 6928 6929 p_slot = mte_parent_slot(mas->node); 6930 p_mn = mte_parent(mte); 6931 MT_BUG_ON(mas->tree, max_gap > mas->max); 6932 if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { 6933 pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); 6934 mt_dump(mas->tree); 6935 } 6936 6937 MT_BUG_ON(mas->tree, 6938 ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); 6939 } 6940 6941 static void mas_validate_parent_slot(struct ma_state *mas) 6942 { 6943 struct maple_node *parent; 6944 struct maple_enode *node; 6945 enum maple_type p_type = mas_parent_enum(mas, mas->node); 6946 unsigned char p_slot = mte_parent_slot(mas->node); 6947 void __rcu **slots; 6948 int i; 6949 6950 if (mte_is_root(mas->node)) 6951 return; 6952 6953 parent = mte_parent(mas->node); 6954 slots = ma_slots(parent, p_type); 6955 MT_BUG_ON(mas->tree, mas_mn(mas) == parent); 6956 6957 /* Check prev/next parent slot for duplicate node entry */ 6958 6959 for (i = 0; i < mt_slots[p_type]; i++) { 6960 node = mas_slot(mas, slots, i); 6961 if (i == p_slot) { 6962 if (node != mas->node) 6963 pr_err("parent %p[%u] does not have %p\n", 6964 parent, i, mas_mn(mas)); 6965 MT_BUG_ON(mas->tree, node != mas->node); 6966 } else if (node == mas->node) { 6967 pr_err("Invalid child %p at parent %p[%u] p_slot %u\n", 6968 mas_mn(mas), parent, i, p_slot); 6969 MT_BUG_ON(mas->tree, node == mas->node); 6970 } 6971 } 6972 } 6973 6974 static void mas_validate_child_slot(struct ma_state *mas) 6975 { 6976 enum maple_type type = mte_node_type(mas->node); 6977 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 6978 unsigned long *pivots = ma_pivots(mte_to_node(mas->node), type); 6979 struct maple_enode *child; 6980 unsigned char i; 6981 6982 if (mte_is_leaf(mas->node)) 6983 return; 6984 6985 for (i = 0; i < mt_slots[type]; i++) { 6986 child = mas_slot(mas, slots, i); 6987 if (!pivots[i] || pivots[i] == mas->max) 6988 break; 6989 6990 if (!child) 6991 break; 6992 6993 if (mte_parent_slot(child) != i) { 6994 pr_err("Slot error at %p[%u]: child %p has pslot %u\n", 6995 mas_mn(mas), i, mte_to_node(child), 6996 mte_parent_slot(child)); 6997 MT_BUG_ON(mas->tree, 1); 6998 } 6999 7000 if (mte_parent(child) != mte_to_node(mas->node)) { 7001 pr_err("child %p has parent %p not %p\n", 7002 mte_to_node(child), mte_parent(child), 7003 mte_to_node(mas->node)); 7004 MT_BUG_ON(mas->tree, 1); 7005 } 7006 } 7007 } 7008 7009 /* 7010 * Validate all pivots are within mas->min and mas->max. 7011 */ 7012 static void mas_validate_limits(struct ma_state *mas) 7013 { 7014 int i; 7015 unsigned long prev_piv = 0; 7016 enum maple_type type = mte_node_type(mas->node); 7017 void __rcu **slots = ma_slots(mte_to_node(mas->node), type); 7018 unsigned long *pivots = ma_pivots(mas_mn(mas), type); 7019 7020 /* all limits are fine here. */ 7021 if (mte_is_root(mas->node)) 7022 return; 7023 7024 for (i = 0; i < mt_slots[type]; i++) { 7025 unsigned long piv; 7026 7027 piv = mas_safe_pivot(mas, pivots, i, type); 7028 7029 if (!piv && (i != 0)) 7030 break; 7031 7032 if (!mte_is_leaf(mas->node)) { 7033 void *entry = mas_slot(mas, slots, i); 7034 7035 if (!entry) 7036 pr_err("%p[%u] cannot be null\n", 7037 mas_mn(mas), i); 7038 7039 MT_BUG_ON(mas->tree, !entry); 7040 } 7041 7042 if (prev_piv > piv) { 7043 pr_err("%p[%u] piv %lu < prev_piv %lu\n", 7044 mas_mn(mas), i, piv, prev_piv); 7045 MT_BUG_ON(mas->tree, piv < prev_piv); 7046 } 7047 7048 if (piv < mas->min) { 7049 pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, 7050 piv, mas->min); 7051 MT_BUG_ON(mas->tree, piv < mas->min); 7052 } 7053 if (piv > mas->max) { 7054 pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, 7055 piv, mas->max); 7056 MT_BUG_ON(mas->tree, piv > mas->max); 7057 } 7058 prev_piv = piv; 7059 if (piv == mas->max) 7060 break; 7061 } 7062 for (i += 1; i < mt_slots[type]; i++) { 7063 void *entry = mas_slot(mas, slots, i); 7064 7065 if (entry && (i != mt_slots[type] - 1)) { 7066 pr_err("%p[%u] should not have entry %p\n", mas_mn(mas), 7067 i, entry); 7068 MT_BUG_ON(mas->tree, entry != NULL); 7069 } 7070 7071 if (i < mt_pivots[type]) { 7072 unsigned long piv = pivots[i]; 7073 7074 if (!piv) 7075 continue; 7076 7077 pr_err("%p[%u] should not have piv %lu\n", 7078 mas_mn(mas), i, piv); 7079 MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1); 7080 } 7081 } 7082 } 7083 7084 static void mt_validate_nulls(struct maple_tree *mt) 7085 { 7086 void *entry, *last = (void *)1; 7087 unsigned char offset = 0; 7088 void __rcu **slots; 7089 MA_STATE(mas, mt, 0, 0); 7090 7091 mas_start(&mas); 7092 if (mas_is_none(&mas) || (mas.node == MAS_ROOT)) 7093 return; 7094 7095 while (!mte_is_leaf(mas.node)) 7096 mas_descend(&mas); 7097 7098 slots = ma_slots(mte_to_node(mas.node), mte_node_type(mas.node)); 7099 do { 7100 entry = mas_slot(&mas, slots, offset); 7101 if (!last && !entry) { 7102 pr_err("Sequential nulls end at %p[%u]\n", 7103 mas_mn(&mas), offset); 7104 } 7105 MT_BUG_ON(mt, !last && !entry); 7106 last = entry; 7107 if (offset == mas_data_end(&mas)) { 7108 mas_next_node(&mas, mas_mn(&mas), ULONG_MAX); 7109 if (mas_is_none(&mas)) 7110 return; 7111 offset = 0; 7112 slots = ma_slots(mte_to_node(mas.node), 7113 mte_node_type(mas.node)); 7114 } else { 7115 offset++; 7116 } 7117 7118 } while (!mas_is_none(&mas)); 7119 } 7120 7121 /* 7122 * validate a maple tree by checking: 7123 * 1. The limits (pivots are within mas->min to mas->max) 7124 * 2. The gap is correctly set in the parents 7125 */ 7126 void mt_validate(struct maple_tree *mt) 7127 { 7128 unsigned char end; 7129 7130 MA_STATE(mas, mt, 0, 0); 7131 rcu_read_lock(); 7132 mas_start(&mas); 7133 if (!mas_searchable(&mas)) 7134 goto done; 7135 7136 mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); 7137 while (!mas_is_none(&mas)) { 7138 MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); 7139 if (!mte_is_root(mas.node)) { 7140 end = mas_data_end(&mas); 7141 if ((end < mt_min_slot_count(mas.node)) && 7142 (mas.max != ULONG_MAX)) { 7143 pr_err("Invalid size %u of %p\n", end, 7144 mas_mn(&mas)); 7145 MT_BUG_ON(mas.tree, 1); 7146 } 7147 7148 } 7149 mas_validate_parent_slot(&mas); 7150 mas_validate_child_slot(&mas); 7151 mas_validate_limits(&mas); 7152 if (mt_is_alloc(mt)) 7153 mas_validate_gaps(&mas); 7154 mas_dfs_postorder(&mas, ULONG_MAX); 7155 } 7156 mt_validate_nulls(mt); 7157 done: 7158 rcu_read_unlock(); 7159 7160 } 7161 EXPORT_SYMBOL_GPL(mt_validate); 7162 7163 #endif /* CONFIG_DEBUG_MAPLE_TREE */ 7164