1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * XArray implementation 4 * Copyright (c) 2017 Microsoft Corporation 5 * Author: Matthew Wilcox <willy@infradead.org> 6 */ 7 8 #include <linux/bitmap.h> 9 #include <linux/export.h> 10 #include <linux/list.h> 11 #include <linux/slab.h> 12 #include <linux/xarray.h> 13 14 /* 15 * Coding conventions in this file: 16 * 17 * @xa is used to refer to the entire xarray. 18 * @xas is the 'xarray operation state'. It may be either a pointer to 19 * an xa_state, or an xa_state stored on the stack. This is an unfortunate 20 * ambiguity. 21 * @index is the index of the entry being operated on 22 * @mark is an xa_mark_t; a small number indicating one of the mark bits. 23 * @node refers to an xa_node; usually the primary one being operated on by 24 * this function. 25 * @offset is the index into the slots array inside an xa_node. 26 * @parent refers to the @xa_node closer to the head than @node. 27 * @entry refers to something stored in a slot in the xarray 28 */ 29 30 static inline unsigned int xa_lock_type(const struct xarray *xa) 31 { 32 return (__force unsigned int)xa->xa_flags & 3; 33 } 34 35 static inline void xas_lock_type(struct xa_state *xas, unsigned int lock_type) 36 { 37 if (lock_type == XA_LOCK_IRQ) 38 xas_lock_irq(xas); 39 else if (lock_type == XA_LOCK_BH) 40 xas_lock_bh(xas); 41 else 42 xas_lock(xas); 43 } 44 45 static inline void xas_unlock_type(struct xa_state *xas, unsigned int lock_type) 46 { 47 if (lock_type == XA_LOCK_IRQ) 48 xas_unlock_irq(xas); 49 else if (lock_type == XA_LOCK_BH) 50 xas_unlock_bh(xas); 51 else 52 xas_unlock(xas); 53 } 54 55 static inline bool xa_track_free(const struct xarray *xa) 56 { 57 return xa->xa_flags & XA_FLAGS_TRACK_FREE; 58 } 59 60 static inline bool xa_zero_busy(const struct xarray *xa) 61 { 62 return xa->xa_flags & XA_FLAGS_ZERO_BUSY; 63 } 64 65 static inline void xa_mark_set(struct xarray *xa, xa_mark_t mark) 66 { 67 if (!(xa->xa_flags & XA_FLAGS_MARK(mark))) 68 xa->xa_flags |= XA_FLAGS_MARK(mark); 69 } 70 71 static inline void xa_mark_clear(struct xarray *xa, xa_mark_t mark) 72 { 73 if (xa->xa_flags & XA_FLAGS_MARK(mark)) 74 xa->xa_flags &= ~(XA_FLAGS_MARK(mark)); 75 } 76 77 static inline unsigned long *node_marks(struct xa_node *node, xa_mark_t mark) 78 { 79 return node->marks[(__force unsigned)mark]; 80 } 81 82 static inline bool node_get_mark(struct xa_node *node, 83 unsigned int offset, xa_mark_t mark) 84 { 85 return test_bit(offset, node_marks(node, mark)); 86 } 87 88 /* returns true if the bit was set */ 89 static inline bool node_set_mark(struct xa_node *node, unsigned int offset, 90 xa_mark_t mark) 91 { 92 return __test_and_set_bit(offset, node_marks(node, mark)); 93 } 94 95 /* returns true if the bit was set */ 96 static inline bool node_clear_mark(struct xa_node *node, unsigned int offset, 97 xa_mark_t mark) 98 { 99 return __test_and_clear_bit(offset, node_marks(node, mark)); 100 } 101 102 static inline bool node_any_mark(struct xa_node *node, xa_mark_t mark) 103 { 104 return !bitmap_empty(node_marks(node, mark), XA_CHUNK_SIZE); 105 } 106 107 static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) 108 { 109 bitmap_fill(node_marks(node, mark), XA_CHUNK_SIZE); 110 } 111 112 #define mark_inc(mark) do { \ 113 mark = (__force xa_mark_t)((__force unsigned)(mark) + 1); \ 114 } while (0) 115 116 /* 117 * xas_squash_marks() - Merge all marks to the first entry 118 * @xas: Array operation state. 119 * 120 * Set a mark on the first entry if any entry has it set. Clear marks on 121 * all sibling entries. 122 */ 123 static void xas_squash_marks(const struct xa_state *xas) 124 { 125 unsigned int mark = 0; 126 unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; 127 128 if (!xas->xa_sibs) 129 return; 130 131 do { 132 unsigned long *marks = xas->xa_node->marks[mark]; 133 if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) 134 continue; 135 __set_bit(xas->xa_offset, marks); 136 bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); 137 } while (mark++ != (__force unsigned)XA_MARK_MAX); 138 } 139 140 /* extracts the offset within this node from the index */ 141 static unsigned int get_offset(unsigned long index, struct xa_node *node) 142 { 143 return (index >> node->shift) & XA_CHUNK_MASK; 144 } 145 146 static void xas_set_offset(struct xa_state *xas) 147 { 148 xas->xa_offset = get_offset(xas->xa_index, xas->xa_node); 149 } 150 151 /* move the index either forwards (find) or backwards (sibling slot) */ 152 static void xas_move_index(struct xa_state *xas, unsigned long offset) 153 { 154 unsigned int shift = xas->xa_node->shift; 155 xas->xa_index &= ~XA_CHUNK_MASK << shift; 156 xas->xa_index += offset << shift; 157 } 158 159 static void xas_advance(struct xa_state *xas) 160 { 161 xas->xa_offset++; 162 xas_move_index(xas, xas->xa_offset); 163 } 164 165 static void *set_bounds(struct xa_state *xas) 166 { 167 xas->xa_node = XAS_BOUNDS; 168 return NULL; 169 } 170 171 /* 172 * Starts a walk. If the @xas is already valid, we assume that it's on 173 * the right path and just return where we've got to. If we're in an 174 * error state, return NULL. If the index is outside the current scope 175 * of the xarray, return NULL without changing @xas->xa_node. Otherwise 176 * set @xas->xa_node to NULL and return the current head of the array. 177 */ 178 static void *xas_start(struct xa_state *xas) 179 { 180 void *entry; 181 182 if (xas_valid(xas)) 183 return xas_reload(xas); 184 if (xas_error(xas)) 185 return NULL; 186 187 entry = xa_head(xas->xa); 188 if (!xa_is_node(entry)) { 189 if (xas->xa_index) 190 return set_bounds(xas); 191 } else { 192 if ((xas->xa_index >> xa_to_node(entry)->shift) > XA_CHUNK_MASK) 193 return set_bounds(xas); 194 } 195 196 xas->xa_node = NULL; 197 return entry; 198 } 199 200 static void *xas_descend(struct xa_state *xas, struct xa_node *node) 201 { 202 unsigned int offset = get_offset(xas->xa_index, node); 203 void *entry = xa_entry(xas->xa, node, offset); 204 205 xas->xa_node = node; 206 if (xa_is_sibling(entry)) { 207 offset = xa_to_sibling(entry); 208 entry = xa_entry(xas->xa, node, offset); 209 } 210 211 xas->xa_offset = offset; 212 return entry; 213 } 214 215 /** 216 * xas_load() - Load an entry from the XArray (advanced). 217 * @xas: XArray operation state. 218 * 219 * Usually walks the @xas to the appropriate state to load the entry 220 * stored at xa_index. However, it will do nothing and return %NULL if 221 * @xas is in an error state. xas_load() will never expand the tree. 222 * 223 * If the xa_state is set up to operate on a multi-index entry, xas_load() 224 * may return %NULL or an internal entry, even if there are entries 225 * present within the range specified by @xas. 226 * 227 * Context: Any context. The caller should hold the xa_lock or the RCU lock. 228 * Return: Usually an entry in the XArray, but see description for exceptions. 229 */ 230 void *xas_load(struct xa_state *xas) 231 { 232 void *entry = xas_start(xas); 233 234 while (xa_is_node(entry)) { 235 struct xa_node *node = xa_to_node(entry); 236 237 if (xas->xa_shift > node->shift) 238 break; 239 entry = xas_descend(xas, node); 240 if (node->shift == 0) 241 break; 242 } 243 return entry; 244 } 245 EXPORT_SYMBOL_GPL(xas_load); 246 247 /* Move the radix tree node cache here */ 248 extern struct kmem_cache *radix_tree_node_cachep; 249 extern void radix_tree_node_rcu_free(struct rcu_head *head); 250 251 #define XA_RCU_FREE ((struct xarray *)1) 252 253 static void xa_node_free(struct xa_node *node) 254 { 255 XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); 256 node->array = XA_RCU_FREE; 257 call_rcu(&node->rcu_head, radix_tree_node_rcu_free); 258 } 259 260 /* 261 * xas_destroy() - Free any resources allocated during the XArray operation. 262 * @xas: XArray operation state. 263 * 264 * This function is now internal-only. 265 */ 266 static void xas_destroy(struct xa_state *xas) 267 { 268 struct xa_node *node = xas->xa_alloc; 269 270 if (!node) 271 return; 272 XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); 273 kmem_cache_free(radix_tree_node_cachep, node); 274 xas->xa_alloc = NULL; 275 } 276 277 /** 278 * xas_nomem() - Allocate memory if needed. 279 * @xas: XArray operation state. 280 * @gfp: Memory allocation flags. 281 * 282 * If we need to add new nodes to the XArray, we try to allocate memory 283 * with GFP_NOWAIT while holding the lock, which will usually succeed. 284 * If it fails, @xas is flagged as needing memory to continue. The caller 285 * should drop the lock and call xas_nomem(). If xas_nomem() succeeds, 286 * the caller should retry the operation. 287 * 288 * Forward progress is guaranteed as one node is allocated here and 289 * stored in the xa_state where it will be found by xas_alloc(). More 290 * nodes will likely be found in the slab allocator, but we do not tie 291 * them up here. 292 * 293 * Return: true if memory was needed, and was successfully allocated. 294 */ 295 bool xas_nomem(struct xa_state *xas, gfp_t gfp) 296 { 297 if (xas->xa_node != XA_ERROR(-ENOMEM)) { 298 xas_destroy(xas); 299 return false; 300 } 301 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 302 if (!xas->xa_alloc) 303 return false; 304 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); 305 xas->xa_node = XAS_RESTART; 306 return true; 307 } 308 EXPORT_SYMBOL_GPL(xas_nomem); 309 310 /* 311 * __xas_nomem() - Drop locks and allocate memory if needed. 312 * @xas: XArray operation state. 313 * @gfp: Memory allocation flags. 314 * 315 * Internal variant of xas_nomem(). 316 * 317 * Return: true if memory was needed, and was successfully allocated. 318 */ 319 static bool __xas_nomem(struct xa_state *xas, gfp_t gfp) 320 __must_hold(xas->xa->xa_lock) 321 { 322 unsigned int lock_type = xa_lock_type(xas->xa); 323 324 if (xas->xa_node != XA_ERROR(-ENOMEM)) { 325 xas_destroy(xas); 326 return false; 327 } 328 if (gfpflags_allow_blocking(gfp)) { 329 xas_unlock_type(xas, lock_type); 330 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 331 xas_lock_type(xas, lock_type); 332 } else { 333 xas->xa_alloc = kmem_cache_alloc(radix_tree_node_cachep, gfp); 334 } 335 if (!xas->xa_alloc) 336 return false; 337 XA_NODE_BUG_ON(xas->xa_alloc, !list_empty(&xas->xa_alloc->private_list)); 338 xas->xa_node = XAS_RESTART; 339 return true; 340 } 341 342 static void xas_update(struct xa_state *xas, struct xa_node *node) 343 { 344 if (xas->xa_update) 345 xas->xa_update(node); 346 else 347 XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); 348 } 349 350 static void *xas_alloc(struct xa_state *xas, unsigned int shift) 351 { 352 struct xa_node *parent = xas->xa_node; 353 struct xa_node *node = xas->xa_alloc; 354 355 if (xas_invalid(xas)) 356 return NULL; 357 358 if (node) { 359 xas->xa_alloc = NULL; 360 } else { 361 node = kmem_cache_alloc(radix_tree_node_cachep, 362 GFP_NOWAIT | __GFP_NOWARN); 363 if (!node) { 364 xas_set_err(xas, -ENOMEM); 365 return NULL; 366 } 367 } 368 369 if (parent) { 370 node->offset = xas->xa_offset; 371 parent->count++; 372 XA_NODE_BUG_ON(node, parent->count > XA_CHUNK_SIZE); 373 xas_update(xas, parent); 374 } 375 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); 376 XA_NODE_BUG_ON(node, !list_empty(&node->private_list)); 377 node->shift = shift; 378 node->count = 0; 379 node->nr_values = 0; 380 RCU_INIT_POINTER(node->parent, xas->xa_node); 381 node->array = xas->xa; 382 383 return node; 384 } 385 386 #ifdef CONFIG_XARRAY_MULTI 387 /* Returns the number of indices covered by a given xa_state */ 388 static unsigned long xas_size(const struct xa_state *xas) 389 { 390 return (xas->xa_sibs + 1UL) << xas->xa_shift; 391 } 392 #endif 393 394 /* 395 * Use this to calculate the maximum index that will need to be created 396 * in order to add the entry described by @xas. Because we cannot store a 397 * multiple-index entry at index 0, the calculation is a little more complex 398 * than you might expect. 399 */ 400 static unsigned long xas_max(struct xa_state *xas) 401 { 402 unsigned long max = xas->xa_index; 403 404 #ifdef CONFIG_XARRAY_MULTI 405 if (xas->xa_shift || xas->xa_sibs) { 406 unsigned long mask = xas_size(xas) - 1; 407 max |= mask; 408 if (mask == max) 409 max++; 410 } 411 #endif 412 413 return max; 414 } 415 416 /* The maximum index that can be contained in the array without expanding it */ 417 static unsigned long max_index(void *entry) 418 { 419 if (!xa_is_node(entry)) 420 return 0; 421 return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; 422 } 423 424 static void xas_shrink(struct xa_state *xas) 425 { 426 struct xarray *xa = xas->xa; 427 struct xa_node *node = xas->xa_node; 428 429 for (;;) { 430 void *entry; 431 432 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); 433 if (node->count != 1) 434 break; 435 entry = xa_entry_locked(xa, node, 0); 436 if (!entry) 437 break; 438 if (!xa_is_node(entry) && node->shift) 439 break; 440 if (xa_is_zero(entry) && xa_zero_busy(xa)) 441 entry = NULL; 442 xas->xa_node = XAS_BOUNDS; 443 444 RCU_INIT_POINTER(xa->xa_head, entry); 445 if (xa_track_free(xa) && !node_get_mark(node, 0, XA_FREE_MARK)) 446 xa_mark_clear(xa, XA_FREE_MARK); 447 448 node->count = 0; 449 node->nr_values = 0; 450 if (!xa_is_node(entry)) 451 RCU_INIT_POINTER(node->slots[0], XA_RETRY_ENTRY); 452 xas_update(xas, node); 453 xa_node_free(node); 454 if (!xa_is_node(entry)) 455 break; 456 node = xa_to_node(entry); 457 node->parent = NULL; 458 } 459 } 460 461 /* 462 * xas_delete_node() - Attempt to delete an xa_node 463 * @xas: Array operation state. 464 * 465 * Attempts to delete the @xas->xa_node. This will fail if xa->node has 466 * a non-zero reference count. 467 */ 468 static void xas_delete_node(struct xa_state *xas) 469 { 470 struct xa_node *node = xas->xa_node; 471 472 for (;;) { 473 struct xa_node *parent; 474 475 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); 476 if (node->count) 477 break; 478 479 parent = xa_parent_locked(xas->xa, node); 480 xas->xa_node = parent; 481 xas->xa_offset = node->offset; 482 xa_node_free(node); 483 484 if (!parent) { 485 xas->xa->xa_head = NULL; 486 xas->xa_node = XAS_BOUNDS; 487 return; 488 } 489 490 parent->slots[xas->xa_offset] = NULL; 491 parent->count--; 492 XA_NODE_BUG_ON(parent, parent->count > XA_CHUNK_SIZE); 493 node = parent; 494 xas_update(xas, node); 495 } 496 497 if (!node->parent) 498 xas_shrink(xas); 499 } 500 501 /** 502 * xas_free_nodes() - Free this node and all nodes that it references 503 * @xas: Array operation state. 504 * @top: Node to free 505 * 506 * This node has been removed from the tree. We must now free it and all 507 * of its subnodes. There may be RCU walkers with references into the tree, 508 * so we must replace all entries with retry markers. 509 */ 510 static void xas_free_nodes(struct xa_state *xas, struct xa_node *top) 511 { 512 unsigned int offset = 0; 513 struct xa_node *node = top; 514 515 for (;;) { 516 void *entry = xa_entry_locked(xas->xa, node, offset); 517 518 if (node->shift && xa_is_node(entry)) { 519 node = xa_to_node(entry); 520 offset = 0; 521 continue; 522 } 523 if (entry) 524 RCU_INIT_POINTER(node->slots[offset], XA_RETRY_ENTRY); 525 offset++; 526 while (offset == XA_CHUNK_SIZE) { 527 struct xa_node *parent; 528 529 parent = xa_parent_locked(xas->xa, node); 530 offset = node->offset + 1; 531 node->count = 0; 532 node->nr_values = 0; 533 xas_update(xas, node); 534 xa_node_free(node); 535 if (node == top) 536 return; 537 node = parent; 538 } 539 } 540 } 541 542 /* 543 * xas_expand adds nodes to the head of the tree until it has reached 544 * sufficient height to be able to contain @xas->xa_index 545 */ 546 static int xas_expand(struct xa_state *xas, void *head) 547 { 548 struct xarray *xa = xas->xa; 549 struct xa_node *node = NULL; 550 unsigned int shift = 0; 551 unsigned long max = xas_max(xas); 552 553 if (!head) { 554 if (max == 0) 555 return 0; 556 while ((max >> shift) >= XA_CHUNK_SIZE) 557 shift += XA_CHUNK_SHIFT; 558 return shift + XA_CHUNK_SHIFT; 559 } else if (xa_is_node(head)) { 560 node = xa_to_node(head); 561 shift = node->shift + XA_CHUNK_SHIFT; 562 } 563 xas->xa_node = NULL; 564 565 while (max > max_index(head)) { 566 xa_mark_t mark = 0; 567 568 XA_NODE_BUG_ON(node, shift > BITS_PER_LONG); 569 node = xas_alloc(xas, shift); 570 if (!node) 571 return -ENOMEM; 572 573 node->count = 1; 574 if (xa_is_value(head)) 575 node->nr_values = 1; 576 RCU_INIT_POINTER(node->slots[0], head); 577 578 /* Propagate the aggregated mark info to the new child */ 579 for (;;) { 580 if (xa_track_free(xa) && mark == XA_FREE_MARK) { 581 node_mark_all(node, XA_FREE_MARK); 582 if (!xa_marked(xa, XA_FREE_MARK)) { 583 node_clear_mark(node, 0, XA_FREE_MARK); 584 xa_mark_set(xa, XA_FREE_MARK); 585 } 586 } else if (xa_marked(xa, mark)) { 587 node_set_mark(node, 0, mark); 588 } 589 if (mark == XA_MARK_MAX) 590 break; 591 mark_inc(mark); 592 } 593 594 /* 595 * Now that the new node is fully initialised, we can add 596 * it to the tree 597 */ 598 if (xa_is_node(head)) { 599 xa_to_node(head)->offset = 0; 600 rcu_assign_pointer(xa_to_node(head)->parent, node); 601 } 602 head = xa_mk_node(node); 603 rcu_assign_pointer(xa->xa_head, head); 604 xas_update(xas, node); 605 606 shift += XA_CHUNK_SHIFT; 607 } 608 609 xas->xa_node = node; 610 return shift; 611 } 612 613 /* 614 * xas_create() - Create a slot to store an entry in. 615 * @xas: XArray operation state. 616 * @allow_root: %true if we can store the entry in the root directly 617 * 618 * Most users will not need to call this function directly, as it is called 619 * by xas_store(). It is useful for doing conditional store operations 620 * (see the xa_cmpxchg() implementation for an example). 621 * 622 * Return: If the slot already existed, returns the contents of this slot. 623 * If the slot was newly created, returns %NULL. If it failed to create the 624 * slot, returns %NULL and indicates the error in @xas. 625 */ 626 static void *xas_create(struct xa_state *xas, bool allow_root) 627 { 628 struct xarray *xa = xas->xa; 629 void *entry; 630 void __rcu **slot; 631 struct xa_node *node = xas->xa_node; 632 int shift; 633 unsigned int order = xas->xa_shift; 634 635 if (xas_top(node)) { 636 entry = xa_head_locked(xa); 637 xas->xa_node = NULL; 638 if (!entry && xa_zero_busy(xa)) 639 entry = XA_ZERO_ENTRY; 640 shift = xas_expand(xas, entry); 641 if (shift < 0) 642 return NULL; 643 if (!shift && !allow_root) 644 shift = XA_CHUNK_SHIFT; 645 entry = xa_head_locked(xa); 646 slot = &xa->xa_head; 647 } else if (xas_error(xas)) { 648 return NULL; 649 } else if (node) { 650 unsigned int offset = xas->xa_offset; 651 652 shift = node->shift; 653 entry = xa_entry_locked(xa, node, offset); 654 slot = &node->slots[offset]; 655 } else { 656 shift = 0; 657 entry = xa_head_locked(xa); 658 slot = &xa->xa_head; 659 } 660 661 while (shift > order) { 662 shift -= XA_CHUNK_SHIFT; 663 if (!entry) { 664 node = xas_alloc(xas, shift); 665 if (!node) 666 break; 667 if (xa_track_free(xa)) 668 node_mark_all(node, XA_FREE_MARK); 669 rcu_assign_pointer(*slot, xa_mk_node(node)); 670 } else if (xa_is_node(entry)) { 671 node = xa_to_node(entry); 672 } else { 673 break; 674 } 675 entry = xas_descend(xas, node); 676 slot = &node->slots[xas->xa_offset]; 677 } 678 679 return entry; 680 } 681 682 /** 683 * xas_create_range() - Ensure that stores to this range will succeed 684 * @xas: XArray operation state. 685 * 686 * Creates all of the slots in the range covered by @xas. Sets @xas to 687 * create single-index entries and positions it at the beginning of the 688 * range. This is for the benefit of users which have not yet been 689 * converted to use multi-index entries. 690 */ 691 void xas_create_range(struct xa_state *xas) 692 { 693 unsigned long index = xas->xa_index; 694 unsigned char shift = xas->xa_shift; 695 unsigned char sibs = xas->xa_sibs; 696 697 xas->xa_index |= ((sibs + 1) << shift) - 1; 698 if (xas_is_node(xas) && xas->xa_node->shift == xas->xa_shift) 699 xas->xa_offset |= sibs; 700 xas->xa_shift = 0; 701 xas->xa_sibs = 0; 702 703 for (;;) { 704 xas_create(xas, true); 705 if (xas_error(xas)) 706 goto restore; 707 if (xas->xa_index <= (index | XA_CHUNK_MASK)) 708 goto success; 709 xas->xa_index -= XA_CHUNK_SIZE; 710 711 for (;;) { 712 struct xa_node *node = xas->xa_node; 713 xas->xa_node = xa_parent_locked(xas->xa, node); 714 xas->xa_offset = node->offset - 1; 715 if (node->offset != 0) 716 break; 717 } 718 } 719 720 restore: 721 xas->xa_shift = shift; 722 xas->xa_sibs = sibs; 723 xas->xa_index = index; 724 return; 725 success: 726 xas->xa_index = index; 727 if (xas->xa_node) 728 xas_set_offset(xas); 729 } 730 EXPORT_SYMBOL_GPL(xas_create_range); 731 732 static void update_node(struct xa_state *xas, struct xa_node *node, 733 int count, int values) 734 { 735 if (!node || (!count && !values)) 736 return; 737 738 node->count += count; 739 node->nr_values += values; 740 XA_NODE_BUG_ON(node, node->count > XA_CHUNK_SIZE); 741 XA_NODE_BUG_ON(node, node->nr_values > XA_CHUNK_SIZE); 742 xas_update(xas, node); 743 if (count < 0) 744 xas_delete_node(xas); 745 } 746 747 /** 748 * xas_store() - Store this entry in the XArray. 749 * @xas: XArray operation state. 750 * @entry: New entry. 751 * 752 * If @xas is operating on a multi-index entry, the entry returned by this 753 * function is essentially meaningless (it may be an internal entry or it 754 * may be %NULL, even if there are non-NULL entries at some of the indices 755 * covered by the range). This is not a problem for any current users, 756 * and can be changed if needed. 757 * 758 * Return: The old entry at this index. 759 */ 760 void *xas_store(struct xa_state *xas, void *entry) 761 { 762 struct xa_node *node; 763 void __rcu **slot = &xas->xa->xa_head; 764 unsigned int offset, max; 765 int count = 0; 766 int values = 0; 767 void *first, *next; 768 bool value = xa_is_value(entry); 769 770 if (entry) { 771 bool allow_root = !xa_is_node(entry) && !xa_is_zero(entry); 772 first = xas_create(xas, allow_root); 773 } else { 774 first = xas_load(xas); 775 } 776 777 if (xas_invalid(xas)) 778 return first; 779 node = xas->xa_node; 780 if (node && (xas->xa_shift < node->shift)) 781 xas->xa_sibs = 0; 782 if ((first == entry) && !xas->xa_sibs) 783 return first; 784 785 next = first; 786 offset = xas->xa_offset; 787 max = xas->xa_offset + xas->xa_sibs; 788 if (node) { 789 slot = &node->slots[offset]; 790 if (xas->xa_sibs) 791 xas_squash_marks(xas); 792 } 793 if (!entry) 794 xas_init_marks(xas); 795 796 for (;;) { 797 /* 798 * Must clear the marks before setting the entry to NULL, 799 * otherwise xas_for_each_marked may find a NULL entry and 800 * stop early. rcu_assign_pointer contains a release barrier 801 * so the mark clearing will appear to happen before the 802 * entry is set to NULL. 803 */ 804 rcu_assign_pointer(*slot, entry); 805 if (xa_is_node(next) && (!node || node->shift)) 806 xas_free_nodes(xas, xa_to_node(next)); 807 if (!node) 808 break; 809 count += !next - !entry; 810 values += !xa_is_value(first) - !value; 811 if (entry) { 812 if (offset == max) 813 break; 814 if (!xa_is_sibling(entry)) 815 entry = xa_mk_sibling(xas->xa_offset); 816 } else { 817 if (offset == XA_CHUNK_MASK) 818 break; 819 } 820 next = xa_entry_locked(xas->xa, node, ++offset); 821 if (!xa_is_sibling(next)) { 822 if (!entry && (offset > max)) 823 break; 824 first = next; 825 } 826 slot++; 827 } 828 829 update_node(xas, node, count, values); 830 return first; 831 } 832 EXPORT_SYMBOL_GPL(xas_store); 833 834 /** 835 * xas_get_mark() - Returns the state of this mark. 836 * @xas: XArray operation state. 837 * @mark: Mark number. 838 * 839 * Return: true if the mark is set, false if the mark is clear or @xas 840 * is in an error state. 841 */ 842 bool xas_get_mark(const struct xa_state *xas, xa_mark_t mark) 843 { 844 if (xas_invalid(xas)) 845 return false; 846 if (!xas->xa_node) 847 return xa_marked(xas->xa, mark); 848 return node_get_mark(xas->xa_node, xas->xa_offset, mark); 849 } 850 EXPORT_SYMBOL_GPL(xas_get_mark); 851 852 /** 853 * xas_set_mark() - Sets the mark on this entry and its parents. 854 * @xas: XArray operation state. 855 * @mark: Mark number. 856 * 857 * Sets the specified mark on this entry, and walks up the tree setting it 858 * on all the ancestor entries. Does nothing if @xas has not been walked to 859 * an entry, or is in an error state. 860 */ 861 void xas_set_mark(const struct xa_state *xas, xa_mark_t mark) 862 { 863 struct xa_node *node = xas->xa_node; 864 unsigned int offset = xas->xa_offset; 865 866 if (xas_invalid(xas)) 867 return; 868 869 while (node) { 870 if (node_set_mark(node, offset, mark)) 871 return; 872 offset = node->offset; 873 node = xa_parent_locked(xas->xa, node); 874 } 875 876 if (!xa_marked(xas->xa, mark)) 877 xa_mark_set(xas->xa, mark); 878 } 879 EXPORT_SYMBOL_GPL(xas_set_mark); 880 881 /** 882 * xas_clear_mark() - Clears the mark on this entry and its parents. 883 * @xas: XArray operation state. 884 * @mark: Mark number. 885 * 886 * Clears the specified mark on this entry, and walks back to the head 887 * attempting to clear it on all the ancestor entries. Does nothing if 888 * @xas has not been walked to an entry, or is in an error state. 889 */ 890 void xas_clear_mark(const struct xa_state *xas, xa_mark_t mark) 891 { 892 struct xa_node *node = xas->xa_node; 893 unsigned int offset = xas->xa_offset; 894 895 if (xas_invalid(xas)) 896 return; 897 898 while (node) { 899 if (!node_clear_mark(node, offset, mark)) 900 return; 901 if (node_any_mark(node, mark)) 902 return; 903 904 offset = node->offset; 905 node = xa_parent_locked(xas->xa, node); 906 } 907 908 if (xa_marked(xas->xa, mark)) 909 xa_mark_clear(xas->xa, mark); 910 } 911 EXPORT_SYMBOL_GPL(xas_clear_mark); 912 913 /** 914 * xas_init_marks() - Initialise all marks for the entry 915 * @xas: Array operations state. 916 * 917 * Initialise all marks for the entry specified by @xas. If we're tracking 918 * free entries with a mark, we need to set it on all entries. All other 919 * marks are cleared. 920 * 921 * This implementation is not as efficient as it could be; we may walk 922 * up the tree multiple times. 923 */ 924 void xas_init_marks(const struct xa_state *xas) 925 { 926 xa_mark_t mark = 0; 927 928 for (;;) { 929 if (xa_track_free(xas->xa) && mark == XA_FREE_MARK) 930 xas_set_mark(xas, mark); 931 else 932 xas_clear_mark(xas, mark); 933 if (mark == XA_MARK_MAX) 934 break; 935 mark_inc(mark); 936 } 937 } 938 EXPORT_SYMBOL_GPL(xas_init_marks); 939 940 /** 941 * xas_pause() - Pause a walk to drop a lock. 942 * @xas: XArray operation state. 943 * 944 * Some users need to pause a walk and drop the lock they're holding in 945 * order to yield to a higher priority thread or carry out an operation 946 * on an entry. Those users should call this function before they drop 947 * the lock. It resets the @xas to be suitable for the next iteration 948 * of the loop after the user has reacquired the lock. If most entries 949 * found during a walk require you to call xas_pause(), the xa_for_each() 950 * iterator may be more appropriate. 951 * 952 * Note that xas_pause() only works for forward iteration. If a user needs 953 * to pause a reverse iteration, we will need a xas_pause_rev(). 954 */ 955 void xas_pause(struct xa_state *xas) 956 { 957 struct xa_node *node = xas->xa_node; 958 959 if (xas_invalid(xas)) 960 return; 961 962 if (node) { 963 unsigned int offset = xas->xa_offset; 964 while (++offset < XA_CHUNK_SIZE) { 965 if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) 966 break; 967 } 968 xas->xa_index += (offset - xas->xa_offset) << node->shift; 969 } else { 970 xas->xa_index++; 971 } 972 xas->xa_node = XAS_RESTART; 973 } 974 EXPORT_SYMBOL_GPL(xas_pause); 975 976 /* 977 * __xas_prev() - Find the previous entry in the XArray. 978 * @xas: XArray operation state. 979 * 980 * Helper function for xas_prev() which handles all the complex cases 981 * out of line. 982 */ 983 void *__xas_prev(struct xa_state *xas) 984 { 985 void *entry; 986 987 if (!xas_frozen(xas->xa_node)) 988 xas->xa_index--; 989 if (xas_not_node(xas->xa_node)) 990 return xas_load(xas); 991 992 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) 993 xas->xa_offset--; 994 995 while (xas->xa_offset == 255) { 996 xas->xa_offset = xas->xa_node->offset - 1; 997 xas->xa_node = xa_parent(xas->xa, xas->xa_node); 998 if (!xas->xa_node) 999 return set_bounds(xas); 1000 } 1001 1002 for (;;) { 1003 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); 1004 if (!xa_is_node(entry)) 1005 return entry; 1006 1007 xas->xa_node = xa_to_node(entry); 1008 xas_set_offset(xas); 1009 } 1010 } 1011 EXPORT_SYMBOL_GPL(__xas_prev); 1012 1013 /* 1014 * __xas_next() - Find the next entry in the XArray. 1015 * @xas: XArray operation state. 1016 * 1017 * Helper function for xas_next() which handles all the complex cases 1018 * out of line. 1019 */ 1020 void *__xas_next(struct xa_state *xas) 1021 { 1022 void *entry; 1023 1024 if (!xas_frozen(xas->xa_node)) 1025 xas->xa_index++; 1026 if (xas_not_node(xas->xa_node)) 1027 return xas_load(xas); 1028 1029 if (xas->xa_offset != get_offset(xas->xa_index, xas->xa_node)) 1030 xas->xa_offset++; 1031 1032 while (xas->xa_offset == XA_CHUNK_SIZE) { 1033 xas->xa_offset = xas->xa_node->offset + 1; 1034 xas->xa_node = xa_parent(xas->xa, xas->xa_node); 1035 if (!xas->xa_node) 1036 return set_bounds(xas); 1037 } 1038 1039 for (;;) { 1040 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); 1041 if (!xa_is_node(entry)) 1042 return entry; 1043 1044 xas->xa_node = xa_to_node(entry); 1045 xas_set_offset(xas); 1046 } 1047 } 1048 EXPORT_SYMBOL_GPL(__xas_next); 1049 1050 /** 1051 * xas_find() - Find the next present entry in the XArray. 1052 * @xas: XArray operation state. 1053 * @max: Highest index to return. 1054 * 1055 * If the @xas has not yet been walked to an entry, return the entry 1056 * which has an index >= xas.xa_index. If it has been walked, the entry 1057 * currently being pointed at has been processed, and so we move to the 1058 * next entry. 1059 * 1060 * If no entry is found and the array is smaller than @max, the iterator 1061 * is set to the smallest index not yet in the array. This allows @xas 1062 * to be immediately passed to xas_store(). 1063 * 1064 * Return: The entry, if found, otherwise %NULL. 1065 */ 1066 void *xas_find(struct xa_state *xas, unsigned long max) 1067 { 1068 void *entry; 1069 1070 if (xas_error(xas)) 1071 return NULL; 1072 1073 if (!xas->xa_node) { 1074 xas->xa_index = 1; 1075 return set_bounds(xas); 1076 } else if (xas_top(xas->xa_node)) { 1077 entry = xas_load(xas); 1078 if (entry || xas_not_node(xas->xa_node)) 1079 return entry; 1080 } else if (!xas->xa_node->shift && 1081 xas->xa_offset != (xas->xa_index & XA_CHUNK_MASK)) { 1082 xas->xa_offset = ((xas->xa_index - 1) & XA_CHUNK_MASK) + 1; 1083 } 1084 1085 xas_advance(xas); 1086 1087 while (xas->xa_node && (xas->xa_index <= max)) { 1088 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { 1089 xas->xa_offset = xas->xa_node->offset + 1; 1090 xas->xa_node = xa_parent(xas->xa, xas->xa_node); 1091 continue; 1092 } 1093 1094 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); 1095 if (xa_is_node(entry)) { 1096 xas->xa_node = xa_to_node(entry); 1097 xas->xa_offset = 0; 1098 continue; 1099 } 1100 if (entry && !xa_is_sibling(entry)) 1101 return entry; 1102 1103 xas_advance(xas); 1104 } 1105 1106 if (!xas->xa_node) 1107 xas->xa_node = XAS_BOUNDS; 1108 return NULL; 1109 } 1110 EXPORT_SYMBOL_GPL(xas_find); 1111 1112 /** 1113 * xas_find_marked() - Find the next marked entry in the XArray. 1114 * @xas: XArray operation state. 1115 * @max: Highest index to return. 1116 * @mark: Mark number to search for. 1117 * 1118 * If the @xas has not yet been walked to an entry, return the marked entry 1119 * which has an index >= xas.xa_index. If it has been walked, the entry 1120 * currently being pointed at has been processed, and so we return the 1121 * first marked entry with an index > xas.xa_index. 1122 * 1123 * If no marked entry is found and the array is smaller than @max, @xas is 1124 * set to the bounds state and xas->xa_index is set to the smallest index 1125 * not yet in the array. This allows @xas to be immediately passed to 1126 * xas_store(). 1127 * 1128 * If no entry is found before @max is reached, @xas is set to the restart 1129 * state. 1130 * 1131 * Return: The entry, if found, otherwise %NULL. 1132 */ 1133 void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) 1134 { 1135 bool advance = true; 1136 unsigned int offset; 1137 void *entry; 1138 1139 if (xas_error(xas)) 1140 return NULL; 1141 1142 if (!xas->xa_node) { 1143 xas->xa_index = 1; 1144 goto out; 1145 } else if (xas_top(xas->xa_node)) { 1146 advance = false; 1147 entry = xa_head(xas->xa); 1148 xas->xa_node = NULL; 1149 if (xas->xa_index > max_index(entry)) 1150 goto out; 1151 if (!xa_is_node(entry)) { 1152 if (xa_marked(xas->xa, mark)) 1153 return entry; 1154 xas->xa_index = 1; 1155 goto out; 1156 } 1157 xas->xa_node = xa_to_node(entry); 1158 xas->xa_offset = xas->xa_index >> xas->xa_node->shift; 1159 } 1160 1161 while (xas->xa_index <= max) { 1162 if (unlikely(xas->xa_offset == XA_CHUNK_SIZE)) { 1163 xas->xa_offset = xas->xa_node->offset + 1; 1164 xas->xa_node = xa_parent(xas->xa, xas->xa_node); 1165 if (!xas->xa_node) 1166 break; 1167 advance = false; 1168 continue; 1169 } 1170 1171 if (!advance) { 1172 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); 1173 if (xa_is_sibling(entry)) { 1174 xas->xa_offset = xa_to_sibling(entry); 1175 xas_move_index(xas, xas->xa_offset); 1176 } 1177 } 1178 1179 offset = xas_find_chunk(xas, advance, mark); 1180 if (offset > xas->xa_offset) { 1181 advance = false; 1182 xas_move_index(xas, offset); 1183 /* Mind the wrap */ 1184 if ((xas->xa_index - 1) >= max) 1185 goto max; 1186 xas->xa_offset = offset; 1187 if (offset == XA_CHUNK_SIZE) 1188 continue; 1189 } 1190 1191 entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); 1192 if (!xa_is_node(entry)) 1193 return entry; 1194 xas->xa_node = xa_to_node(entry); 1195 xas_set_offset(xas); 1196 } 1197 1198 out: 1199 if (xas->xa_index > max) 1200 goto max; 1201 return set_bounds(xas); 1202 max: 1203 xas->xa_node = XAS_RESTART; 1204 return NULL; 1205 } 1206 EXPORT_SYMBOL_GPL(xas_find_marked); 1207 1208 /** 1209 * xas_find_conflict() - Find the next present entry in a range. 1210 * @xas: XArray operation state. 1211 * 1212 * The @xas describes both a range and a position within that range. 1213 * 1214 * Context: Any context. Expects xa_lock to be held. 1215 * Return: The next entry in the range covered by @xas or %NULL. 1216 */ 1217 void *xas_find_conflict(struct xa_state *xas) 1218 { 1219 void *curr; 1220 1221 if (xas_error(xas)) 1222 return NULL; 1223 1224 if (!xas->xa_node) 1225 return NULL; 1226 1227 if (xas_top(xas->xa_node)) { 1228 curr = xas_start(xas); 1229 if (!curr) 1230 return NULL; 1231 while (xa_is_node(curr)) { 1232 struct xa_node *node = xa_to_node(curr); 1233 curr = xas_descend(xas, node); 1234 } 1235 if (curr) 1236 return curr; 1237 } 1238 1239 if (xas->xa_node->shift > xas->xa_shift) 1240 return NULL; 1241 1242 for (;;) { 1243 if (xas->xa_node->shift == xas->xa_shift) { 1244 if ((xas->xa_offset & xas->xa_sibs) == xas->xa_sibs) 1245 break; 1246 } else if (xas->xa_offset == XA_CHUNK_MASK) { 1247 xas->xa_offset = xas->xa_node->offset; 1248 xas->xa_node = xa_parent_locked(xas->xa, xas->xa_node); 1249 if (!xas->xa_node) 1250 break; 1251 continue; 1252 } 1253 curr = xa_entry_locked(xas->xa, xas->xa_node, ++xas->xa_offset); 1254 if (xa_is_sibling(curr)) 1255 continue; 1256 while (xa_is_node(curr)) { 1257 xas->xa_node = xa_to_node(curr); 1258 xas->xa_offset = 0; 1259 curr = xa_entry_locked(xas->xa, xas->xa_node, 0); 1260 } 1261 if (curr) 1262 return curr; 1263 } 1264 xas->xa_offset -= xas->xa_sibs; 1265 return NULL; 1266 } 1267 EXPORT_SYMBOL_GPL(xas_find_conflict); 1268 1269 /** 1270 * xa_load() - Load an entry from an XArray. 1271 * @xa: XArray. 1272 * @index: index into array. 1273 * 1274 * Context: Any context. Takes and releases the RCU lock. 1275 * Return: The entry at @index in @xa. 1276 */ 1277 void *xa_load(struct xarray *xa, unsigned long index) 1278 { 1279 XA_STATE(xas, xa, index); 1280 void *entry; 1281 1282 rcu_read_lock(); 1283 do { 1284 entry = xas_load(&xas); 1285 if (xa_is_zero(entry)) 1286 entry = NULL; 1287 } while (xas_retry(&xas, entry)); 1288 rcu_read_unlock(); 1289 1290 return entry; 1291 } 1292 EXPORT_SYMBOL(xa_load); 1293 1294 static void *xas_result(struct xa_state *xas, void *curr) 1295 { 1296 if (xa_is_zero(curr)) 1297 return NULL; 1298 if (xas_error(xas)) 1299 curr = xas->xa_node; 1300 return curr; 1301 } 1302 1303 /** 1304 * __xa_erase() - Erase this entry from the XArray while locked. 1305 * @xa: XArray. 1306 * @index: Index into array. 1307 * 1308 * After this function returns, loading from @index will return %NULL. 1309 * If the index is part of a multi-index entry, all indices will be erased 1310 * and none of the entries will be part of a multi-index entry. 1311 * 1312 * Context: Any context. Expects xa_lock to be held on entry. 1313 * Return: The entry which used to be at this index. 1314 */ 1315 void *__xa_erase(struct xarray *xa, unsigned long index) 1316 { 1317 XA_STATE(xas, xa, index); 1318 return xas_result(&xas, xas_store(&xas, NULL)); 1319 } 1320 EXPORT_SYMBOL(__xa_erase); 1321 1322 /** 1323 * xa_erase() - Erase this entry from the XArray. 1324 * @xa: XArray. 1325 * @index: Index of entry. 1326 * 1327 * After this function returns, loading from @index will return %NULL. 1328 * If the index is part of a multi-index entry, all indices will be erased 1329 * and none of the entries will be part of a multi-index entry. 1330 * 1331 * Context: Any context. Takes and releases the xa_lock. 1332 * Return: The entry which used to be at this index. 1333 */ 1334 void *xa_erase(struct xarray *xa, unsigned long index) 1335 { 1336 void *entry; 1337 1338 xa_lock(xa); 1339 entry = __xa_erase(xa, index); 1340 xa_unlock(xa); 1341 1342 return entry; 1343 } 1344 EXPORT_SYMBOL(xa_erase); 1345 1346 /** 1347 * __xa_store() - Store this entry in the XArray. 1348 * @xa: XArray. 1349 * @index: Index into array. 1350 * @entry: New entry. 1351 * @gfp: Memory allocation flags. 1352 * 1353 * You must already be holding the xa_lock when calling this function. 1354 * It will drop the lock if needed to allocate memory, and then reacquire 1355 * it afterwards. 1356 * 1357 * Context: Any context. Expects xa_lock to be held on entry. May 1358 * release and reacquire xa_lock if @gfp flags permit. 1359 * Return: The old entry at this index or xa_err() if an error happened. 1360 */ 1361 void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1362 { 1363 XA_STATE(xas, xa, index); 1364 void *curr; 1365 1366 if (WARN_ON_ONCE(xa_is_advanced(entry))) 1367 return XA_ERROR(-EINVAL); 1368 if (xa_track_free(xa) && !entry) 1369 entry = XA_ZERO_ENTRY; 1370 1371 do { 1372 curr = xas_store(&xas, entry); 1373 if (xa_track_free(xa)) 1374 xas_clear_mark(&xas, XA_FREE_MARK); 1375 } while (__xas_nomem(&xas, gfp)); 1376 1377 return xas_result(&xas, curr); 1378 } 1379 EXPORT_SYMBOL(__xa_store); 1380 1381 /** 1382 * xa_store() - Store this entry in the XArray. 1383 * @xa: XArray. 1384 * @index: Index into array. 1385 * @entry: New entry. 1386 * @gfp: Memory allocation flags. 1387 * 1388 * After this function returns, loads from this index will return @entry. 1389 * Storing into an existing multislot entry updates the entry of every index. 1390 * The marks associated with @index are unaffected unless @entry is %NULL. 1391 * 1392 * Context: Any context. Takes and releases the xa_lock. 1393 * May sleep if the @gfp flags permit. 1394 * Return: The old entry at this index on success, xa_err(-EINVAL) if @entry 1395 * cannot be stored in an XArray, or xa_err(-ENOMEM) if memory allocation 1396 * failed. 1397 */ 1398 void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1399 { 1400 void *curr; 1401 1402 xa_lock(xa); 1403 curr = __xa_store(xa, index, entry, gfp); 1404 xa_unlock(xa); 1405 1406 return curr; 1407 } 1408 EXPORT_SYMBOL(xa_store); 1409 1410 /** 1411 * __xa_cmpxchg() - Store this entry in the XArray. 1412 * @xa: XArray. 1413 * @index: Index into array. 1414 * @old: Old value to test against. 1415 * @entry: New entry. 1416 * @gfp: Memory allocation flags. 1417 * 1418 * You must already be holding the xa_lock when calling this function. 1419 * It will drop the lock if needed to allocate memory, and then reacquire 1420 * it afterwards. 1421 * 1422 * Context: Any context. Expects xa_lock to be held on entry. May 1423 * release and reacquire xa_lock if @gfp flags permit. 1424 * Return: The old entry at this index or xa_err() if an error happened. 1425 */ 1426 void *__xa_cmpxchg(struct xarray *xa, unsigned long index, 1427 void *old, void *entry, gfp_t gfp) 1428 { 1429 XA_STATE(xas, xa, index); 1430 void *curr; 1431 1432 if (WARN_ON_ONCE(xa_is_advanced(entry))) 1433 return XA_ERROR(-EINVAL); 1434 1435 do { 1436 curr = xas_load(&xas); 1437 if (curr == old) { 1438 xas_store(&xas, entry); 1439 if (xa_track_free(xa) && entry && !curr) 1440 xas_clear_mark(&xas, XA_FREE_MARK); 1441 } 1442 } while (__xas_nomem(&xas, gfp)); 1443 1444 return xas_result(&xas, curr); 1445 } 1446 EXPORT_SYMBOL(__xa_cmpxchg); 1447 1448 /** 1449 * __xa_insert() - Store this entry in the XArray if no entry is present. 1450 * @xa: XArray. 1451 * @index: Index into array. 1452 * @entry: New entry. 1453 * @gfp: Memory allocation flags. 1454 * 1455 * Inserting a NULL entry will store a reserved entry (like xa_reserve()) 1456 * if no entry is present. Inserting will fail if a reserved entry is 1457 * present, even though loading from this index will return NULL. 1458 * 1459 * Context: Any context. Expects xa_lock to be held on entry. May 1460 * release and reacquire xa_lock if @gfp flags permit. 1461 * Return: 0 if the store succeeded. -EBUSY if another entry was present. 1462 * -ENOMEM if memory could not be allocated. 1463 */ 1464 int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) 1465 { 1466 XA_STATE(xas, xa, index); 1467 void *curr; 1468 1469 if (WARN_ON_ONCE(xa_is_advanced(entry))) 1470 return -EINVAL; 1471 if (!entry) 1472 entry = XA_ZERO_ENTRY; 1473 1474 do { 1475 curr = xas_load(&xas); 1476 if (!curr) { 1477 xas_store(&xas, entry); 1478 if (xa_track_free(xa)) 1479 xas_clear_mark(&xas, XA_FREE_MARK); 1480 } else { 1481 xas_set_err(&xas, -EBUSY); 1482 } 1483 } while (__xas_nomem(&xas, gfp)); 1484 1485 return xas_error(&xas); 1486 } 1487 EXPORT_SYMBOL(__xa_insert); 1488 1489 #ifdef CONFIG_XARRAY_MULTI 1490 static void xas_set_range(struct xa_state *xas, unsigned long first, 1491 unsigned long last) 1492 { 1493 unsigned int shift = 0; 1494 unsigned long sibs = last - first; 1495 unsigned int offset = XA_CHUNK_MASK; 1496 1497 xas_set(xas, first); 1498 1499 while ((first & XA_CHUNK_MASK) == 0) { 1500 if (sibs < XA_CHUNK_MASK) 1501 break; 1502 if ((sibs == XA_CHUNK_MASK) && (offset < XA_CHUNK_MASK)) 1503 break; 1504 shift += XA_CHUNK_SHIFT; 1505 if (offset == XA_CHUNK_MASK) 1506 offset = sibs & XA_CHUNK_MASK; 1507 sibs >>= XA_CHUNK_SHIFT; 1508 first >>= XA_CHUNK_SHIFT; 1509 } 1510 1511 offset = first & XA_CHUNK_MASK; 1512 if (offset + sibs > XA_CHUNK_MASK) 1513 sibs = XA_CHUNK_MASK - offset; 1514 if ((((first + sibs + 1) << shift) - 1) > last) 1515 sibs -= 1; 1516 1517 xas->xa_shift = shift; 1518 xas->xa_sibs = sibs; 1519 } 1520 1521 /** 1522 * xa_store_range() - Store this entry at a range of indices in the XArray. 1523 * @xa: XArray. 1524 * @first: First index to affect. 1525 * @last: Last index to affect. 1526 * @entry: New entry. 1527 * @gfp: Memory allocation flags. 1528 * 1529 * After this function returns, loads from any index between @first and @last, 1530 * inclusive will return @entry. 1531 * Storing into an existing multislot entry updates the entry of every index. 1532 * The marks associated with @index are unaffected unless @entry is %NULL. 1533 * 1534 * Context: Process context. Takes and releases the xa_lock. May sleep 1535 * if the @gfp flags permit. 1536 * Return: %NULL on success, xa_err(-EINVAL) if @entry cannot be stored in 1537 * an XArray, or xa_err(-ENOMEM) if memory allocation failed. 1538 */ 1539 void *xa_store_range(struct xarray *xa, unsigned long first, 1540 unsigned long last, void *entry, gfp_t gfp) 1541 { 1542 XA_STATE(xas, xa, 0); 1543 1544 if (WARN_ON_ONCE(xa_is_internal(entry))) 1545 return XA_ERROR(-EINVAL); 1546 if (last < first) 1547 return XA_ERROR(-EINVAL); 1548 1549 do { 1550 xas_lock(&xas); 1551 if (entry) { 1552 unsigned int order = BITS_PER_LONG; 1553 if (last + 1) 1554 order = __ffs(last + 1); 1555 xas_set_order(&xas, last, order); 1556 xas_create(&xas, true); 1557 if (xas_error(&xas)) 1558 goto unlock; 1559 } 1560 do { 1561 xas_set_range(&xas, first, last); 1562 xas_store(&xas, entry); 1563 if (xas_error(&xas)) 1564 goto unlock; 1565 first += xas_size(&xas); 1566 } while (first <= last); 1567 unlock: 1568 xas_unlock(&xas); 1569 } while (xas_nomem(&xas, gfp)); 1570 1571 return xas_result(&xas, NULL); 1572 } 1573 EXPORT_SYMBOL(xa_store_range); 1574 #endif /* CONFIG_XARRAY_MULTI */ 1575 1576 /** 1577 * __xa_alloc() - Find somewhere to store this entry in the XArray. 1578 * @xa: XArray. 1579 * @id: Pointer to ID. 1580 * @limit: Range for allocated ID. 1581 * @entry: New entry. 1582 * @gfp: Memory allocation flags. 1583 * 1584 * Finds an empty entry in @xa between @limit.min and @limit.max, 1585 * stores the index into the @id pointer, then stores the entry at 1586 * that index. A concurrent lookup will not see an uninitialised @id. 1587 * 1588 * Context: Any context. Expects xa_lock to be held on entry. May 1589 * release and reacquire xa_lock if @gfp flags permit. 1590 * Return: 0 on success, -ENOMEM if memory could not be allocated or 1591 * -EBUSY if there are no free entries in @limit. 1592 */ 1593 int __xa_alloc(struct xarray *xa, u32 *id, void *entry, 1594 struct xa_limit limit, gfp_t gfp) 1595 { 1596 XA_STATE(xas, xa, 0); 1597 1598 if (WARN_ON_ONCE(xa_is_advanced(entry))) 1599 return -EINVAL; 1600 if (WARN_ON_ONCE(!xa_track_free(xa))) 1601 return -EINVAL; 1602 1603 if (!entry) 1604 entry = XA_ZERO_ENTRY; 1605 1606 do { 1607 xas.xa_index = limit.min; 1608 xas_find_marked(&xas, limit.max, XA_FREE_MARK); 1609 if (xas.xa_node == XAS_RESTART) 1610 xas_set_err(&xas, -EBUSY); 1611 else 1612 *id = xas.xa_index; 1613 xas_store(&xas, entry); 1614 xas_clear_mark(&xas, XA_FREE_MARK); 1615 } while (__xas_nomem(&xas, gfp)); 1616 1617 return xas_error(&xas); 1618 } 1619 EXPORT_SYMBOL(__xa_alloc); 1620 1621 /** 1622 * __xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. 1623 * @xa: XArray. 1624 * @id: Pointer to ID. 1625 * @entry: New entry. 1626 * @limit: Range of allocated ID. 1627 * @next: Pointer to next ID to allocate. 1628 * @gfp: Memory allocation flags. 1629 * 1630 * Finds an empty entry in @xa between @limit.min and @limit.max, 1631 * stores the index into the @id pointer, then stores the entry at 1632 * that index. A concurrent lookup will not see an uninitialised @id. 1633 * The search for an empty entry will start at @next and will wrap 1634 * around if necessary. 1635 * 1636 * Context: Any context. Expects xa_lock to be held on entry. May 1637 * release and reacquire xa_lock if @gfp flags permit. 1638 * Return: 0 if the allocation succeeded without wrapping. 1 if the 1639 * allocation succeeded after wrapping, -ENOMEM if memory could not be 1640 * allocated or -EBUSY if there are no free entries in @limit. 1641 */ 1642 int __xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, 1643 struct xa_limit limit, u32 *next, gfp_t gfp) 1644 { 1645 u32 min = limit.min; 1646 int ret; 1647 1648 limit.min = max(min, *next); 1649 ret = __xa_alloc(xa, id, entry, limit, gfp); 1650 if ((xa->xa_flags & XA_FLAGS_ALLOC_WRAPPED) && ret == 0) { 1651 xa->xa_flags &= ~XA_FLAGS_ALLOC_WRAPPED; 1652 ret = 1; 1653 } 1654 1655 if (ret < 0 && limit.min > min) { 1656 limit.min = min; 1657 ret = __xa_alloc(xa, id, entry, limit, gfp); 1658 if (ret == 0) 1659 ret = 1; 1660 } 1661 1662 if (ret >= 0) { 1663 *next = *id + 1; 1664 if (*next == 0) 1665 xa->xa_flags |= XA_FLAGS_ALLOC_WRAPPED; 1666 } 1667 return ret; 1668 } 1669 EXPORT_SYMBOL(__xa_alloc_cyclic); 1670 1671 /** 1672 * __xa_set_mark() - Set this mark on this entry while locked. 1673 * @xa: XArray. 1674 * @index: Index of entry. 1675 * @mark: Mark number. 1676 * 1677 * Attempting to set a mark on a %NULL entry does not succeed. 1678 * 1679 * Context: Any context. Expects xa_lock to be held on entry. 1680 */ 1681 void __xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) 1682 { 1683 XA_STATE(xas, xa, index); 1684 void *entry = xas_load(&xas); 1685 1686 if (entry) 1687 xas_set_mark(&xas, mark); 1688 } 1689 EXPORT_SYMBOL(__xa_set_mark); 1690 1691 /** 1692 * __xa_clear_mark() - Clear this mark on this entry while locked. 1693 * @xa: XArray. 1694 * @index: Index of entry. 1695 * @mark: Mark number. 1696 * 1697 * Context: Any context. Expects xa_lock to be held on entry. 1698 */ 1699 void __xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) 1700 { 1701 XA_STATE(xas, xa, index); 1702 void *entry = xas_load(&xas); 1703 1704 if (entry) 1705 xas_clear_mark(&xas, mark); 1706 } 1707 EXPORT_SYMBOL(__xa_clear_mark); 1708 1709 /** 1710 * xa_get_mark() - Inquire whether this mark is set on this entry. 1711 * @xa: XArray. 1712 * @index: Index of entry. 1713 * @mark: Mark number. 1714 * 1715 * This function uses the RCU read lock, so the result may be out of date 1716 * by the time it returns. If you need the result to be stable, use a lock. 1717 * 1718 * Context: Any context. Takes and releases the RCU lock. 1719 * Return: True if the entry at @index has this mark set, false if it doesn't. 1720 */ 1721 bool xa_get_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) 1722 { 1723 XA_STATE(xas, xa, index); 1724 void *entry; 1725 1726 rcu_read_lock(); 1727 entry = xas_start(&xas); 1728 while (xas_get_mark(&xas, mark)) { 1729 if (!xa_is_node(entry)) 1730 goto found; 1731 entry = xas_descend(&xas, xa_to_node(entry)); 1732 } 1733 rcu_read_unlock(); 1734 return false; 1735 found: 1736 rcu_read_unlock(); 1737 return true; 1738 } 1739 EXPORT_SYMBOL(xa_get_mark); 1740 1741 /** 1742 * xa_set_mark() - Set this mark on this entry. 1743 * @xa: XArray. 1744 * @index: Index of entry. 1745 * @mark: Mark number. 1746 * 1747 * Attempting to set a mark on a %NULL entry does not succeed. 1748 * 1749 * Context: Process context. Takes and releases the xa_lock. 1750 */ 1751 void xa_set_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) 1752 { 1753 xa_lock(xa); 1754 __xa_set_mark(xa, index, mark); 1755 xa_unlock(xa); 1756 } 1757 EXPORT_SYMBOL(xa_set_mark); 1758 1759 /** 1760 * xa_clear_mark() - Clear this mark on this entry. 1761 * @xa: XArray. 1762 * @index: Index of entry. 1763 * @mark: Mark number. 1764 * 1765 * Clearing a mark always succeeds. 1766 * 1767 * Context: Process context. Takes and releases the xa_lock. 1768 */ 1769 void xa_clear_mark(struct xarray *xa, unsigned long index, xa_mark_t mark) 1770 { 1771 xa_lock(xa); 1772 __xa_clear_mark(xa, index, mark); 1773 xa_unlock(xa); 1774 } 1775 EXPORT_SYMBOL(xa_clear_mark); 1776 1777 /** 1778 * xa_find() - Search the XArray for an entry. 1779 * @xa: XArray. 1780 * @indexp: Pointer to an index. 1781 * @max: Maximum index to search to. 1782 * @filter: Selection criterion. 1783 * 1784 * Finds the entry in @xa which matches the @filter, and has the lowest 1785 * index that is at least @indexp and no more than @max. 1786 * If an entry is found, @indexp is updated to be the index of the entry. 1787 * This function is protected by the RCU read lock, so it may not find 1788 * entries which are being simultaneously added. It will not return an 1789 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). 1790 * 1791 * Context: Any context. Takes and releases the RCU lock. 1792 * Return: The entry, if found, otherwise %NULL. 1793 */ 1794 void *xa_find(struct xarray *xa, unsigned long *indexp, 1795 unsigned long max, xa_mark_t filter) 1796 { 1797 XA_STATE(xas, xa, *indexp); 1798 void *entry; 1799 1800 rcu_read_lock(); 1801 do { 1802 if ((__force unsigned int)filter < XA_MAX_MARKS) 1803 entry = xas_find_marked(&xas, max, filter); 1804 else 1805 entry = xas_find(&xas, max); 1806 } while (xas_retry(&xas, entry)); 1807 rcu_read_unlock(); 1808 1809 if (entry) 1810 *indexp = xas.xa_index; 1811 return entry; 1812 } 1813 EXPORT_SYMBOL(xa_find); 1814 1815 /** 1816 * xa_find_after() - Search the XArray for a present entry. 1817 * @xa: XArray. 1818 * @indexp: Pointer to an index. 1819 * @max: Maximum index to search to. 1820 * @filter: Selection criterion. 1821 * 1822 * Finds the entry in @xa which matches the @filter and has the lowest 1823 * index that is above @indexp and no more than @max. 1824 * If an entry is found, @indexp is updated to be the index of the entry. 1825 * This function is protected by the RCU read lock, so it may miss entries 1826 * which are being simultaneously added. It will not return an 1827 * %XA_RETRY_ENTRY; if you need to see retry entries, use xas_find(). 1828 * 1829 * Context: Any context. Takes and releases the RCU lock. 1830 * Return: The pointer, if found, otherwise %NULL. 1831 */ 1832 void *xa_find_after(struct xarray *xa, unsigned long *indexp, 1833 unsigned long max, xa_mark_t filter) 1834 { 1835 XA_STATE(xas, xa, *indexp + 1); 1836 void *entry; 1837 1838 rcu_read_lock(); 1839 for (;;) { 1840 if ((__force unsigned int)filter < XA_MAX_MARKS) 1841 entry = xas_find_marked(&xas, max, filter); 1842 else 1843 entry = xas_find(&xas, max); 1844 if (xas.xa_node == XAS_BOUNDS) 1845 break; 1846 if (xas.xa_shift) { 1847 if (xas.xa_index & ((1UL << xas.xa_shift) - 1)) 1848 continue; 1849 } else { 1850 if (xas.xa_offset < (xas.xa_index & XA_CHUNK_MASK)) 1851 continue; 1852 } 1853 if (!xas_retry(&xas, entry)) 1854 break; 1855 } 1856 rcu_read_unlock(); 1857 1858 if (entry) 1859 *indexp = xas.xa_index; 1860 return entry; 1861 } 1862 EXPORT_SYMBOL(xa_find_after); 1863 1864 static unsigned int xas_extract_present(struct xa_state *xas, void **dst, 1865 unsigned long max, unsigned int n) 1866 { 1867 void *entry; 1868 unsigned int i = 0; 1869 1870 rcu_read_lock(); 1871 xas_for_each(xas, entry, max) { 1872 if (xas_retry(xas, entry)) 1873 continue; 1874 dst[i++] = entry; 1875 if (i == n) 1876 break; 1877 } 1878 rcu_read_unlock(); 1879 1880 return i; 1881 } 1882 1883 static unsigned int xas_extract_marked(struct xa_state *xas, void **dst, 1884 unsigned long max, unsigned int n, xa_mark_t mark) 1885 { 1886 void *entry; 1887 unsigned int i = 0; 1888 1889 rcu_read_lock(); 1890 xas_for_each_marked(xas, entry, max, mark) { 1891 if (xas_retry(xas, entry)) 1892 continue; 1893 dst[i++] = entry; 1894 if (i == n) 1895 break; 1896 } 1897 rcu_read_unlock(); 1898 1899 return i; 1900 } 1901 1902 /** 1903 * xa_extract() - Copy selected entries from the XArray into a normal array. 1904 * @xa: The source XArray to copy from. 1905 * @dst: The buffer to copy entries into. 1906 * @start: The first index in the XArray eligible to be selected. 1907 * @max: The last index in the XArray eligible to be selected. 1908 * @n: The maximum number of entries to copy. 1909 * @filter: Selection criterion. 1910 * 1911 * Copies up to @n entries that match @filter from the XArray. The 1912 * copied entries will have indices between @start and @max, inclusive. 1913 * 1914 * The @filter may be an XArray mark value, in which case entries which are 1915 * marked with that mark will be copied. It may also be %XA_PRESENT, in 1916 * which case all entries which are not %NULL will be copied. 1917 * 1918 * The entries returned may not represent a snapshot of the XArray at a 1919 * moment in time. For example, if another thread stores to index 5, then 1920 * index 10, calling xa_extract() may return the old contents of index 5 1921 * and the new contents of index 10. Indices not modified while this 1922 * function is running will not be skipped. 1923 * 1924 * If you need stronger guarantees, holding the xa_lock across calls to this 1925 * function will prevent concurrent modification. 1926 * 1927 * Context: Any context. Takes and releases the RCU lock. 1928 * Return: The number of entries copied. 1929 */ 1930 unsigned int xa_extract(struct xarray *xa, void **dst, unsigned long start, 1931 unsigned long max, unsigned int n, xa_mark_t filter) 1932 { 1933 XA_STATE(xas, xa, start); 1934 1935 if (!n) 1936 return 0; 1937 1938 if ((__force unsigned int)filter < XA_MAX_MARKS) 1939 return xas_extract_marked(&xas, dst, max, n, filter); 1940 return xas_extract_present(&xas, dst, max, n); 1941 } 1942 EXPORT_SYMBOL(xa_extract); 1943 1944 /** 1945 * xa_destroy() - Free all internal data structures. 1946 * @xa: XArray. 1947 * 1948 * After calling this function, the XArray is empty and has freed all memory 1949 * allocated for its internal data structures. You are responsible for 1950 * freeing the objects referenced by the XArray. 1951 * 1952 * Context: Any context. Takes and releases the xa_lock, interrupt-safe. 1953 */ 1954 void xa_destroy(struct xarray *xa) 1955 { 1956 XA_STATE(xas, xa, 0); 1957 unsigned long flags; 1958 void *entry; 1959 1960 xas.xa_node = NULL; 1961 xas_lock_irqsave(&xas, flags); 1962 entry = xa_head_locked(xa); 1963 RCU_INIT_POINTER(xa->xa_head, NULL); 1964 xas_init_marks(&xas); 1965 if (xa_zero_busy(xa)) 1966 xa_mark_clear(xa, XA_FREE_MARK); 1967 /* lockdep checks we're still holding the lock in xas_free_nodes() */ 1968 if (xa_is_node(entry)) 1969 xas_free_nodes(&xas, xa_to_node(entry)); 1970 xas_unlock_irqrestore(&xas, flags); 1971 } 1972 EXPORT_SYMBOL(xa_destroy); 1973 1974 #ifdef XA_DEBUG 1975 void xa_dump_node(const struct xa_node *node) 1976 { 1977 unsigned i, j; 1978 1979 if (!node) 1980 return; 1981 if ((unsigned long)node & 3) { 1982 pr_cont("node %px\n", node); 1983 return; 1984 } 1985 1986 pr_cont("node %px %s %d parent %px shift %d count %d values %d " 1987 "array %px list %px %px marks", 1988 node, node->parent ? "offset" : "max", node->offset, 1989 node->parent, node->shift, node->count, node->nr_values, 1990 node->array, node->private_list.prev, node->private_list.next); 1991 for (i = 0; i < XA_MAX_MARKS; i++) 1992 for (j = 0; j < XA_MARK_LONGS; j++) 1993 pr_cont(" %lx", node->marks[i][j]); 1994 pr_cont("\n"); 1995 } 1996 1997 void xa_dump_index(unsigned long index, unsigned int shift) 1998 { 1999 if (!shift) 2000 pr_info("%lu: ", index); 2001 else if (shift >= BITS_PER_LONG) 2002 pr_info("0-%lu: ", ~0UL); 2003 else 2004 pr_info("%lu-%lu: ", index, index | ((1UL << shift) - 1)); 2005 } 2006 2007 void xa_dump_entry(const void *entry, unsigned long index, unsigned long shift) 2008 { 2009 if (!entry) 2010 return; 2011 2012 xa_dump_index(index, shift); 2013 2014 if (xa_is_node(entry)) { 2015 if (shift == 0) { 2016 pr_cont("%px\n", entry); 2017 } else { 2018 unsigned long i; 2019 struct xa_node *node = xa_to_node(entry); 2020 xa_dump_node(node); 2021 for (i = 0; i < XA_CHUNK_SIZE; i++) 2022 xa_dump_entry(node->slots[i], 2023 index + (i << node->shift), node->shift); 2024 } 2025 } else if (xa_is_value(entry)) 2026 pr_cont("value %ld (0x%lx) [%px]\n", xa_to_value(entry), 2027 xa_to_value(entry), entry); 2028 else if (!xa_is_internal(entry)) 2029 pr_cont("%px\n", entry); 2030 else if (xa_is_retry(entry)) 2031 pr_cont("retry (%ld)\n", xa_to_internal(entry)); 2032 else if (xa_is_sibling(entry)) 2033 pr_cont("sibling (slot %ld)\n", xa_to_sibling(entry)); 2034 else if (xa_is_zero(entry)) 2035 pr_cont("zero (%ld)\n", xa_to_internal(entry)); 2036 else 2037 pr_cont("UNKNOWN ENTRY (%px)\n", entry); 2038 } 2039 2040 void xa_dump(const struct xarray *xa) 2041 { 2042 void *entry = xa->xa_head; 2043 unsigned int shift = 0; 2044 2045 pr_info("xarray: %px head %px flags %x marks %d %d %d\n", xa, entry, 2046 xa->xa_flags, xa_marked(xa, XA_MARK_0), 2047 xa_marked(xa, XA_MARK_1), xa_marked(xa, XA_MARK_2)); 2048 if (xa_is_node(entry)) 2049 shift = xa_to_node(entry)->shift + XA_CHUNK_SHIFT; 2050 xa_dump_entry(entry, 0, shift); 2051 } 2052 #endif 2053