1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright (C) 2001 Momchil Velikov 4 * Portions Copyright (C) 2001 Christoph Hellwig 5 * Copyright (C) 2005 SGI, Christoph Lameter 6 * Copyright (C) 2006 Nick Piggin 7 * Copyright (C) 2012 Konstantin Khlebnikov 8 * Copyright (C) 2016 Intel, Matthew Wilcox 9 * Copyright (C) 2016 Intel, Ross Zwisler 10 */ 11 12 #include <linux/bitmap.h> 13 #include <linux/bitops.h> 14 #include <linux/bug.h> 15 #include <linux/cpu.h> 16 #include <linux/errno.h> 17 #include <linux/export.h> 18 #include <linux/idr.h> 19 #include <linux/init.h> 20 #include <linux/kernel.h> 21 #include <linux/kmemleak.h> 22 #include <linux/percpu.h> 23 #include <linux/preempt.h> /* in_interrupt() */ 24 #include <linux/radix-tree.h> 25 #include <linux/rcupdate.h> 26 #include <linux/slab.h> 27 #include <linux/string.h> 28 #include <linux/xarray.h> 29 30 #include "radix-tree.h" 31 32 /* 33 * Radix tree node cache. 34 */ 35 struct kmem_cache *radix_tree_node_cachep; 36 37 /* 38 * The radix tree is variable-height, so an insert operation not only has 39 * to build the branch to its corresponding item, it also has to build the 40 * branch to existing items if the size has to be increased (by 41 * radix_tree_extend). 42 * 43 * The worst case is a zero height tree with just a single item at index 0, 44 * and then inserting an item at index ULONG_MAX. This requires 2 new branches 45 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared. 46 * Hence: 47 */ 48 #define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1) 49 50 /* 51 * The IDR does not have to be as high as the radix tree since it uses 52 * signed integers, not unsigned longs. 53 */ 54 #define IDR_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(int) - 1) 55 #define IDR_MAX_PATH (DIV_ROUND_UP(IDR_INDEX_BITS, \ 56 RADIX_TREE_MAP_SHIFT)) 57 #define IDR_PRELOAD_SIZE (IDR_MAX_PATH * 2 - 1) 58 59 /* 60 * Per-cpu pool of preloaded nodes 61 */ 62 DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 63 .lock = INIT_LOCAL_LOCK(lock), 64 }; 65 EXPORT_PER_CPU_SYMBOL_GPL(radix_tree_preloads); 66 67 static inline struct radix_tree_node *entry_to_node(void *ptr) 68 { 69 return (void *)((unsigned long)ptr & ~RADIX_TREE_INTERNAL_NODE); 70 } 71 72 static inline void *node_to_entry(void *ptr) 73 { 74 return (void *)((unsigned long)ptr | RADIX_TREE_INTERNAL_NODE); 75 } 76 77 #define RADIX_TREE_RETRY XA_RETRY_ENTRY 78 79 static inline unsigned long 80 get_slot_offset(const struct radix_tree_node *parent, void __rcu **slot) 81 { 82 return parent ? slot - parent->slots : 0; 83 } 84 85 static unsigned int radix_tree_descend(const struct radix_tree_node *parent, 86 struct radix_tree_node **nodep, unsigned long index) 87 { 88 unsigned int offset = (index >> parent->shift) & RADIX_TREE_MAP_MASK; 89 void __rcu **entry = rcu_dereference_raw(parent->slots[offset]); 90 91 *nodep = (void *)entry; 92 return offset; 93 } 94 95 static inline gfp_t root_gfp_mask(const struct radix_tree_root *root) 96 { 97 return root->xa_flags & (__GFP_BITS_MASK & ~GFP_ZONEMASK); 98 } 99 100 static inline void tag_set(struct radix_tree_node *node, unsigned int tag, 101 int offset) 102 { 103 __set_bit(offset, node->tags[tag]); 104 } 105 106 static inline void tag_clear(struct radix_tree_node *node, unsigned int tag, 107 int offset) 108 { 109 __clear_bit(offset, node->tags[tag]); 110 } 111 112 static inline int tag_get(const struct radix_tree_node *node, unsigned int tag, 113 int offset) 114 { 115 return test_bit(offset, node->tags[tag]); 116 } 117 118 static inline void root_tag_set(struct radix_tree_root *root, unsigned tag) 119 { 120 root->xa_flags |= (__force gfp_t)(1 << (tag + ROOT_TAG_SHIFT)); 121 } 122 123 static inline void root_tag_clear(struct radix_tree_root *root, unsigned tag) 124 { 125 root->xa_flags &= (__force gfp_t)~(1 << (tag + ROOT_TAG_SHIFT)); 126 } 127 128 static inline void root_tag_clear_all(struct radix_tree_root *root) 129 { 130 root->xa_flags &= (__force gfp_t)((1 << ROOT_TAG_SHIFT) - 1); 131 } 132 133 static inline int root_tag_get(const struct radix_tree_root *root, unsigned tag) 134 { 135 return (__force int)root->xa_flags & (1 << (tag + ROOT_TAG_SHIFT)); 136 } 137 138 static inline unsigned root_tags_get(const struct radix_tree_root *root) 139 { 140 return (__force unsigned)root->xa_flags >> ROOT_TAG_SHIFT; 141 } 142 143 static inline bool is_idr(const struct radix_tree_root *root) 144 { 145 return !!(root->xa_flags & ROOT_IS_IDR); 146 } 147 148 /* 149 * Returns 1 if any slot in the node has this tag set. 150 * Otherwise returns 0. 151 */ 152 static inline int any_tag_set(const struct radix_tree_node *node, 153 unsigned int tag) 154 { 155 unsigned idx; 156 for (idx = 0; idx < RADIX_TREE_TAG_LONGS; idx++) { 157 if (node->tags[tag][idx]) 158 return 1; 159 } 160 return 0; 161 } 162 163 static inline void all_tag_set(struct radix_tree_node *node, unsigned int tag) 164 { 165 bitmap_fill(node->tags[tag], RADIX_TREE_MAP_SIZE); 166 } 167 168 /** 169 * radix_tree_find_next_bit - find the next set bit in a memory region 170 * 171 * @node: where to begin the search 172 * @tag: the tag index 173 * @offset: the bitnumber to start searching at 174 * 175 * Unrollable variant of find_next_bit() for constant size arrays. 176 * Tail bits starting from size to roundup(size, BITS_PER_LONG) must be zero. 177 * Returns next bit offset, or size if nothing found. 178 */ 179 static __always_inline unsigned long 180 radix_tree_find_next_bit(struct radix_tree_node *node, unsigned int tag, 181 unsigned long offset) 182 { 183 const unsigned long *addr = node->tags[tag]; 184 185 if (offset < RADIX_TREE_MAP_SIZE) { 186 unsigned long tmp; 187 188 addr += offset / BITS_PER_LONG; 189 tmp = *addr >> (offset % BITS_PER_LONG); 190 if (tmp) 191 return __ffs(tmp) + offset; 192 offset = (offset + BITS_PER_LONG) & ~(BITS_PER_LONG - 1); 193 while (offset < RADIX_TREE_MAP_SIZE) { 194 tmp = *++addr; 195 if (tmp) 196 return __ffs(tmp) + offset; 197 offset += BITS_PER_LONG; 198 } 199 } 200 return RADIX_TREE_MAP_SIZE; 201 } 202 203 static unsigned int iter_offset(const struct radix_tree_iter *iter) 204 { 205 return iter->index & RADIX_TREE_MAP_MASK; 206 } 207 208 /* 209 * The maximum index which can be stored in a radix tree 210 */ 211 static inline unsigned long shift_maxindex(unsigned int shift) 212 { 213 return (RADIX_TREE_MAP_SIZE << shift) - 1; 214 } 215 216 static inline unsigned long node_maxindex(const struct radix_tree_node *node) 217 { 218 return shift_maxindex(node->shift); 219 } 220 221 static unsigned long next_index(unsigned long index, 222 const struct radix_tree_node *node, 223 unsigned long offset) 224 { 225 return (index & ~node_maxindex(node)) + (offset << node->shift); 226 } 227 228 /* 229 * This assumes that the caller has performed appropriate preallocation, and 230 * that the caller has pinned this thread of control to the current CPU. 231 */ 232 static struct radix_tree_node * 233 radix_tree_node_alloc(gfp_t gfp_mask, struct radix_tree_node *parent, 234 struct radix_tree_root *root, 235 unsigned int shift, unsigned int offset, 236 unsigned int count, unsigned int nr_values) 237 { 238 struct radix_tree_node *ret = NULL; 239 240 /* 241 * Preload code isn't irq safe and it doesn't make sense to use 242 * preloading during an interrupt anyway as all the allocations have 243 * to be atomic. So just do normal allocation when in interrupt. 244 */ 245 if (!gfpflags_allow_blocking(gfp_mask) && !in_interrupt()) { 246 struct radix_tree_preload *rtp; 247 248 /* 249 * Even if the caller has preloaded, try to allocate from the 250 * cache first for the new node to get accounted to the memory 251 * cgroup. 252 */ 253 ret = kmem_cache_alloc(radix_tree_node_cachep, 254 gfp_mask | __GFP_NOWARN); 255 if (ret) 256 goto out; 257 258 /* 259 * Provided the caller has preloaded here, we will always 260 * succeed in getting a node here (and never reach 261 * kmem_cache_alloc) 262 */ 263 rtp = this_cpu_ptr(&radix_tree_preloads); 264 if (rtp->nr) { 265 ret = rtp->nodes; 266 rtp->nodes = ret->parent; 267 rtp->nr--; 268 } 269 /* 270 * Update the allocation stack trace as this is more useful 271 * for debugging. 272 */ 273 kmemleak_update_trace(ret); 274 goto out; 275 } 276 ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 277 out: 278 BUG_ON(radix_tree_is_internal_node(ret)); 279 if (ret) { 280 ret->shift = shift; 281 ret->offset = offset; 282 ret->count = count; 283 ret->nr_values = nr_values; 284 ret->parent = parent; 285 ret->array = root; 286 } 287 return ret; 288 } 289 290 void radix_tree_node_rcu_free(struct rcu_head *head) 291 { 292 struct radix_tree_node *node = 293 container_of(head, struct radix_tree_node, rcu_head); 294 295 /* 296 * Must only free zeroed nodes into the slab. We can be left with 297 * non-NULL entries by radix_tree_free_nodes, so clear the entries 298 * and tags here. 299 */ 300 memset(node->slots, 0, sizeof(node->slots)); 301 memset(node->tags, 0, sizeof(node->tags)); 302 INIT_LIST_HEAD(&node->private_list); 303 304 kmem_cache_free(radix_tree_node_cachep, node); 305 } 306 307 static inline void 308 radix_tree_node_free(struct radix_tree_node *node) 309 { 310 call_rcu(&node->rcu_head, radix_tree_node_rcu_free); 311 } 312 313 /* 314 * Load up this CPU's radix_tree_node buffer with sufficient objects to 315 * ensure that the addition of a single element in the tree cannot fail. On 316 * success, return zero, with preemption disabled. On error, return -ENOMEM 317 * with preemption not disabled. 318 * 319 * To make use of this facility, the radix tree must be initialised without 320 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). 321 */ 322 static __must_check int __radix_tree_preload(gfp_t gfp_mask, unsigned nr) 323 { 324 struct radix_tree_preload *rtp; 325 struct radix_tree_node *node; 326 int ret = -ENOMEM; 327 328 /* 329 * Nodes preloaded by one cgroup can be used by another cgroup, so 330 * they should never be accounted to any particular memory cgroup. 331 */ 332 gfp_mask &= ~__GFP_ACCOUNT; 333 334 local_lock(&radix_tree_preloads.lock); 335 rtp = this_cpu_ptr(&radix_tree_preloads); 336 while (rtp->nr < nr) { 337 local_unlock(&radix_tree_preloads.lock); 338 node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); 339 if (node == NULL) 340 goto out; 341 local_lock(&radix_tree_preloads.lock); 342 rtp = this_cpu_ptr(&radix_tree_preloads); 343 if (rtp->nr < nr) { 344 node->parent = rtp->nodes; 345 rtp->nodes = node; 346 rtp->nr++; 347 } else { 348 kmem_cache_free(radix_tree_node_cachep, node); 349 } 350 } 351 ret = 0; 352 out: 353 return ret; 354 } 355 356 /* 357 * Load up this CPU's radix_tree_node buffer with sufficient objects to 358 * ensure that the addition of a single element in the tree cannot fail. On 359 * success, return zero, with preemption disabled. On error, return -ENOMEM 360 * with preemption not disabled. 361 * 362 * To make use of this facility, the radix tree must be initialised without 363 * __GFP_DIRECT_RECLAIM being passed to INIT_RADIX_TREE(). 364 */ 365 int radix_tree_preload(gfp_t gfp_mask) 366 { 367 /* Warn on non-sensical use... */ 368 WARN_ON_ONCE(!gfpflags_allow_blocking(gfp_mask)); 369 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); 370 } 371 EXPORT_SYMBOL(radix_tree_preload); 372 373 /* 374 * The same as above function, except we don't guarantee preloading happens. 375 * We do it, if we decide it helps. On success, return zero with preemption 376 * disabled. On error, return -ENOMEM with preemption not disabled. 377 */ 378 int radix_tree_maybe_preload(gfp_t gfp_mask) 379 { 380 if (gfpflags_allow_blocking(gfp_mask)) 381 return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); 382 /* Preloading doesn't help anything with this gfp mask, skip it */ 383 local_lock(&radix_tree_preloads.lock); 384 return 0; 385 } 386 EXPORT_SYMBOL(radix_tree_maybe_preload); 387 388 static unsigned radix_tree_load_root(const struct radix_tree_root *root, 389 struct radix_tree_node **nodep, unsigned long *maxindex) 390 { 391 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); 392 393 *nodep = node; 394 395 if (likely(radix_tree_is_internal_node(node))) { 396 node = entry_to_node(node); 397 *maxindex = node_maxindex(node); 398 return node->shift + RADIX_TREE_MAP_SHIFT; 399 } 400 401 *maxindex = 0; 402 return 0; 403 } 404 405 /* 406 * Extend a radix tree so it can store key @index. 407 */ 408 static int radix_tree_extend(struct radix_tree_root *root, gfp_t gfp, 409 unsigned long index, unsigned int shift) 410 { 411 void *entry; 412 unsigned int maxshift; 413 int tag; 414 415 /* Figure out what the shift should be. */ 416 maxshift = shift; 417 while (index > shift_maxindex(maxshift)) 418 maxshift += RADIX_TREE_MAP_SHIFT; 419 420 entry = rcu_dereference_raw(root->xa_head); 421 if (!entry && (!is_idr(root) || root_tag_get(root, IDR_FREE))) 422 goto out; 423 424 do { 425 struct radix_tree_node *node = radix_tree_node_alloc(gfp, NULL, 426 root, shift, 0, 1, 0); 427 if (!node) 428 return -ENOMEM; 429 430 if (is_idr(root)) { 431 all_tag_set(node, IDR_FREE); 432 if (!root_tag_get(root, IDR_FREE)) { 433 tag_clear(node, IDR_FREE, 0); 434 root_tag_set(root, IDR_FREE); 435 } 436 } else { 437 /* Propagate the aggregated tag info to the new child */ 438 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { 439 if (root_tag_get(root, tag)) 440 tag_set(node, tag, 0); 441 } 442 } 443 444 BUG_ON(shift > BITS_PER_LONG); 445 if (radix_tree_is_internal_node(entry)) { 446 entry_to_node(entry)->parent = node; 447 } else if (xa_is_value(entry)) { 448 /* Moving a value entry root->xa_head to a node */ 449 node->nr_values = 1; 450 } 451 /* 452 * entry was already in the radix tree, so we do not need 453 * rcu_assign_pointer here 454 */ 455 node->slots[0] = (void __rcu *)entry; 456 entry = node_to_entry(node); 457 rcu_assign_pointer(root->xa_head, entry); 458 shift += RADIX_TREE_MAP_SHIFT; 459 } while (shift <= maxshift); 460 out: 461 return maxshift + RADIX_TREE_MAP_SHIFT; 462 } 463 464 /** 465 * radix_tree_shrink - shrink radix tree to minimum height 466 * @root: radix tree root 467 */ 468 static inline bool radix_tree_shrink(struct radix_tree_root *root) 469 { 470 bool shrunk = false; 471 472 for (;;) { 473 struct radix_tree_node *node = rcu_dereference_raw(root->xa_head); 474 struct radix_tree_node *child; 475 476 if (!radix_tree_is_internal_node(node)) 477 break; 478 node = entry_to_node(node); 479 480 /* 481 * The candidate node has more than one child, or its child 482 * is not at the leftmost slot, we cannot shrink. 483 */ 484 if (node->count != 1) 485 break; 486 child = rcu_dereference_raw(node->slots[0]); 487 if (!child) 488 break; 489 490 /* 491 * For an IDR, we must not shrink entry 0 into the root in 492 * case somebody calls idr_replace() with a pointer that 493 * appears to be an internal entry 494 */ 495 if (!node->shift && is_idr(root)) 496 break; 497 498 if (radix_tree_is_internal_node(child)) 499 entry_to_node(child)->parent = NULL; 500 501 /* 502 * We don't need rcu_assign_pointer(), since we are simply 503 * moving the node from one part of the tree to another: if it 504 * was safe to dereference the old pointer to it 505 * (node->slots[0]), it will be safe to dereference the new 506 * one (root->xa_head) as far as dependent read barriers go. 507 */ 508 root->xa_head = (void __rcu *)child; 509 if (is_idr(root) && !tag_get(node, IDR_FREE, 0)) 510 root_tag_clear(root, IDR_FREE); 511 512 /* 513 * We have a dilemma here. The node's slot[0] must not be 514 * NULLed in case there are concurrent lookups expecting to 515 * find the item. However if this was a bottom-level node, 516 * then it may be subject to the slot pointer being visible 517 * to callers dereferencing it. If item corresponding to 518 * slot[0] is subsequently deleted, these callers would expect 519 * their slot to become empty sooner or later. 520 * 521 * For example, lockless pagecache will look up a slot, deref 522 * the page pointer, and if the page has 0 refcount it means it 523 * was concurrently deleted from pagecache so try the deref 524 * again. Fortunately there is already a requirement for logic 525 * to retry the entire slot lookup -- the indirect pointer 526 * problem (replacing direct root node with an indirect pointer 527 * also results in a stale slot). So tag the slot as indirect 528 * to force callers to retry. 529 */ 530 node->count = 0; 531 if (!radix_tree_is_internal_node(child)) { 532 node->slots[0] = (void __rcu *)RADIX_TREE_RETRY; 533 } 534 535 WARN_ON_ONCE(!list_empty(&node->private_list)); 536 radix_tree_node_free(node); 537 shrunk = true; 538 } 539 540 return shrunk; 541 } 542 543 static bool delete_node(struct radix_tree_root *root, 544 struct radix_tree_node *node) 545 { 546 bool deleted = false; 547 548 do { 549 struct radix_tree_node *parent; 550 551 if (node->count) { 552 if (node_to_entry(node) == 553 rcu_dereference_raw(root->xa_head)) 554 deleted |= radix_tree_shrink(root); 555 return deleted; 556 } 557 558 parent = node->parent; 559 if (parent) { 560 parent->slots[node->offset] = NULL; 561 parent->count--; 562 } else { 563 /* 564 * Shouldn't the tags already have all been cleared 565 * by the caller? 566 */ 567 if (!is_idr(root)) 568 root_tag_clear_all(root); 569 root->xa_head = NULL; 570 } 571 572 WARN_ON_ONCE(!list_empty(&node->private_list)); 573 radix_tree_node_free(node); 574 deleted = true; 575 576 node = parent; 577 } while (node); 578 579 return deleted; 580 } 581 582 /** 583 * __radix_tree_create - create a slot in a radix tree 584 * @root: radix tree root 585 * @index: index key 586 * @nodep: returns node 587 * @slotp: returns slot 588 * 589 * Create, if necessary, and return the node and slot for an item 590 * at position @index in the radix tree @root. 591 * 592 * Until there is more than one item in the tree, no nodes are 593 * allocated and @root->xa_head is used as a direct slot instead of 594 * pointing to a node, in which case *@nodep will be NULL. 595 * 596 * Returns -ENOMEM, or 0 for success. 597 */ 598 static int __radix_tree_create(struct radix_tree_root *root, 599 unsigned long index, struct radix_tree_node **nodep, 600 void __rcu ***slotp) 601 { 602 struct radix_tree_node *node = NULL, *child; 603 void __rcu **slot = (void __rcu **)&root->xa_head; 604 unsigned long maxindex; 605 unsigned int shift, offset = 0; 606 unsigned long max = index; 607 gfp_t gfp = root_gfp_mask(root); 608 609 shift = radix_tree_load_root(root, &child, &maxindex); 610 611 /* Make sure the tree is high enough. */ 612 if (max > maxindex) { 613 int error = radix_tree_extend(root, gfp, max, shift); 614 if (error < 0) 615 return error; 616 shift = error; 617 child = rcu_dereference_raw(root->xa_head); 618 } 619 620 while (shift > 0) { 621 shift -= RADIX_TREE_MAP_SHIFT; 622 if (child == NULL) { 623 /* Have to add a child node. */ 624 child = radix_tree_node_alloc(gfp, node, root, shift, 625 offset, 0, 0); 626 if (!child) 627 return -ENOMEM; 628 rcu_assign_pointer(*slot, node_to_entry(child)); 629 if (node) 630 node->count++; 631 } else if (!radix_tree_is_internal_node(child)) 632 break; 633 634 /* Go a level down */ 635 node = entry_to_node(child); 636 offset = radix_tree_descend(node, &child, index); 637 slot = &node->slots[offset]; 638 } 639 640 if (nodep) 641 *nodep = node; 642 if (slotp) 643 *slotp = slot; 644 return 0; 645 } 646 647 /* 648 * Free any nodes below this node. The tree is presumed to not need 649 * shrinking, and any user data in the tree is presumed to not need a 650 * destructor called on it. If we need to add a destructor, we can 651 * add that functionality later. Note that we may not clear tags or 652 * slots from the tree as an RCU walker may still have a pointer into 653 * this subtree. We could replace the entries with RADIX_TREE_RETRY, 654 * but we'll still have to clear those in rcu_free. 655 */ 656 static void radix_tree_free_nodes(struct radix_tree_node *node) 657 { 658 unsigned offset = 0; 659 struct radix_tree_node *child = entry_to_node(node); 660 661 for (;;) { 662 void *entry = rcu_dereference_raw(child->slots[offset]); 663 if (xa_is_node(entry) && child->shift) { 664 child = entry_to_node(entry); 665 offset = 0; 666 continue; 667 } 668 offset++; 669 while (offset == RADIX_TREE_MAP_SIZE) { 670 struct radix_tree_node *old = child; 671 offset = child->offset + 1; 672 child = child->parent; 673 WARN_ON_ONCE(!list_empty(&old->private_list)); 674 radix_tree_node_free(old); 675 if (old == entry_to_node(node)) 676 return; 677 } 678 } 679 } 680 681 static inline int insert_entries(struct radix_tree_node *node, 682 void __rcu **slot, void *item) 683 { 684 if (*slot) 685 return -EEXIST; 686 rcu_assign_pointer(*slot, item); 687 if (node) { 688 node->count++; 689 if (xa_is_value(item)) 690 node->nr_values++; 691 } 692 return 1; 693 } 694 695 /** 696 * radix_tree_insert - insert into a radix tree 697 * @root: radix tree root 698 * @index: index key 699 * @item: item to insert 700 * 701 * Insert an item into the radix tree at position @index. 702 */ 703 int radix_tree_insert(struct radix_tree_root *root, unsigned long index, 704 void *item) 705 { 706 struct radix_tree_node *node; 707 void __rcu **slot; 708 int error; 709 710 BUG_ON(radix_tree_is_internal_node(item)); 711 712 error = __radix_tree_create(root, index, &node, &slot); 713 if (error) 714 return error; 715 716 error = insert_entries(node, slot, item); 717 if (error < 0) 718 return error; 719 720 if (node) { 721 unsigned offset = get_slot_offset(node, slot); 722 BUG_ON(tag_get(node, 0, offset)); 723 BUG_ON(tag_get(node, 1, offset)); 724 BUG_ON(tag_get(node, 2, offset)); 725 } else { 726 BUG_ON(root_tags_get(root)); 727 } 728 729 return 0; 730 } 731 EXPORT_SYMBOL(radix_tree_insert); 732 733 /** 734 * __radix_tree_lookup - lookup an item in a radix tree 735 * @root: radix tree root 736 * @index: index key 737 * @nodep: returns node 738 * @slotp: returns slot 739 * 740 * Lookup and return the item at position @index in the radix 741 * tree @root. 742 * 743 * Until there is more than one item in the tree, no nodes are 744 * allocated and @root->xa_head is used as a direct slot instead of 745 * pointing to a node, in which case *@nodep will be NULL. 746 */ 747 void *__radix_tree_lookup(const struct radix_tree_root *root, 748 unsigned long index, struct radix_tree_node **nodep, 749 void __rcu ***slotp) 750 { 751 struct radix_tree_node *node, *parent; 752 unsigned long maxindex; 753 void __rcu **slot; 754 755 restart: 756 parent = NULL; 757 slot = (void __rcu **)&root->xa_head; 758 radix_tree_load_root(root, &node, &maxindex); 759 if (index > maxindex) 760 return NULL; 761 762 while (radix_tree_is_internal_node(node)) { 763 unsigned offset; 764 765 parent = entry_to_node(node); 766 offset = radix_tree_descend(parent, &node, index); 767 slot = parent->slots + offset; 768 if (node == RADIX_TREE_RETRY) 769 goto restart; 770 if (parent->shift == 0) 771 break; 772 } 773 774 if (nodep) 775 *nodep = parent; 776 if (slotp) 777 *slotp = slot; 778 return node; 779 } 780 781 /** 782 * radix_tree_lookup_slot - lookup a slot in a radix tree 783 * @root: radix tree root 784 * @index: index key 785 * 786 * Returns: the slot corresponding to the position @index in the 787 * radix tree @root. This is useful for update-if-exists operations. 788 * 789 * This function can be called under rcu_read_lock iff the slot is not 790 * modified by radix_tree_replace_slot, otherwise it must be called 791 * exclusive from other writers. Any dereference of the slot must be done 792 * using radix_tree_deref_slot. 793 */ 794 void __rcu **radix_tree_lookup_slot(const struct radix_tree_root *root, 795 unsigned long index) 796 { 797 void __rcu **slot; 798 799 if (!__radix_tree_lookup(root, index, NULL, &slot)) 800 return NULL; 801 return slot; 802 } 803 EXPORT_SYMBOL(radix_tree_lookup_slot); 804 805 /** 806 * radix_tree_lookup - perform lookup operation on a radix tree 807 * @root: radix tree root 808 * @index: index key 809 * 810 * Lookup the item at the position @index in the radix tree @root. 811 * 812 * This function can be called under rcu_read_lock, however the caller 813 * must manage lifetimes of leaf nodes (eg. RCU may also be used to free 814 * them safely). No RCU barriers are required to access or modify the 815 * returned item, however. 816 */ 817 void *radix_tree_lookup(const struct radix_tree_root *root, unsigned long index) 818 { 819 return __radix_tree_lookup(root, index, NULL, NULL); 820 } 821 EXPORT_SYMBOL(radix_tree_lookup); 822 823 static void replace_slot(void __rcu **slot, void *item, 824 struct radix_tree_node *node, int count, int values) 825 { 826 if (node && (count || values)) { 827 node->count += count; 828 node->nr_values += values; 829 } 830 831 rcu_assign_pointer(*slot, item); 832 } 833 834 static bool node_tag_get(const struct radix_tree_root *root, 835 const struct radix_tree_node *node, 836 unsigned int tag, unsigned int offset) 837 { 838 if (node) 839 return tag_get(node, tag, offset); 840 return root_tag_get(root, tag); 841 } 842 843 /* 844 * IDR users want to be able to store NULL in the tree, so if the slot isn't 845 * free, don't adjust the count, even if it's transitioning between NULL and 846 * non-NULL. For the IDA, we mark slots as being IDR_FREE while they still 847 * have empty bits, but it only stores NULL in slots when they're being 848 * deleted. 849 */ 850 static int calculate_count(struct radix_tree_root *root, 851 struct radix_tree_node *node, void __rcu **slot, 852 void *item, void *old) 853 { 854 if (is_idr(root)) { 855 unsigned offset = get_slot_offset(node, slot); 856 bool free = node_tag_get(root, node, IDR_FREE, offset); 857 if (!free) 858 return 0; 859 if (!old) 860 return 1; 861 } 862 return !!item - !!old; 863 } 864 865 /** 866 * __radix_tree_replace - replace item in a slot 867 * @root: radix tree root 868 * @node: pointer to tree node 869 * @slot: pointer to slot in @node 870 * @item: new item to store in the slot. 871 * 872 * For use with __radix_tree_lookup(). Caller must hold tree write locked 873 * across slot lookup and replacement. 874 */ 875 void __radix_tree_replace(struct radix_tree_root *root, 876 struct radix_tree_node *node, 877 void __rcu **slot, void *item) 878 { 879 void *old = rcu_dereference_raw(*slot); 880 int values = !!xa_is_value(item) - !!xa_is_value(old); 881 int count = calculate_count(root, node, slot, item, old); 882 883 /* 884 * This function supports replacing value entries and 885 * deleting entries, but that needs accounting against the 886 * node unless the slot is root->xa_head. 887 */ 888 WARN_ON_ONCE(!node && (slot != (void __rcu **)&root->xa_head) && 889 (count || values)); 890 replace_slot(slot, item, node, count, values); 891 892 if (!node) 893 return; 894 895 delete_node(root, node); 896 } 897 898 /** 899 * radix_tree_replace_slot - replace item in a slot 900 * @root: radix tree root 901 * @slot: pointer to slot 902 * @item: new item to store in the slot. 903 * 904 * For use with radix_tree_lookup_slot() and 905 * radix_tree_gang_lookup_tag_slot(). Caller must hold tree write locked 906 * across slot lookup and replacement. 907 * 908 * NOTE: This cannot be used to switch between non-entries (empty slots), 909 * regular entries, and value entries, as that requires accounting 910 * inside the radix tree node. When switching from one type of entry or 911 * deleting, use __radix_tree_lookup() and __radix_tree_replace() or 912 * radix_tree_iter_replace(). 913 */ 914 void radix_tree_replace_slot(struct radix_tree_root *root, 915 void __rcu **slot, void *item) 916 { 917 __radix_tree_replace(root, NULL, slot, item); 918 } 919 EXPORT_SYMBOL(radix_tree_replace_slot); 920 921 /** 922 * radix_tree_iter_replace - replace item in a slot 923 * @root: radix tree root 924 * @iter: iterator state 925 * @slot: pointer to slot 926 * @item: new item to store in the slot. 927 * 928 * For use with radix_tree_for_each_slot(). 929 * Caller must hold tree write locked. 930 */ 931 void radix_tree_iter_replace(struct radix_tree_root *root, 932 const struct radix_tree_iter *iter, 933 void __rcu **slot, void *item) 934 { 935 __radix_tree_replace(root, iter->node, slot, item); 936 } 937 938 static void node_tag_set(struct radix_tree_root *root, 939 struct radix_tree_node *node, 940 unsigned int tag, unsigned int offset) 941 { 942 while (node) { 943 if (tag_get(node, tag, offset)) 944 return; 945 tag_set(node, tag, offset); 946 offset = node->offset; 947 node = node->parent; 948 } 949 950 if (!root_tag_get(root, tag)) 951 root_tag_set(root, tag); 952 } 953 954 /** 955 * radix_tree_tag_set - set a tag on a radix tree node 956 * @root: radix tree root 957 * @index: index key 958 * @tag: tag index 959 * 960 * Set the search tag (which must be < RADIX_TREE_MAX_TAGS) 961 * corresponding to @index in the radix tree. From 962 * the root all the way down to the leaf node. 963 * 964 * Returns the address of the tagged item. Setting a tag on a not-present 965 * item is a bug. 966 */ 967 void *radix_tree_tag_set(struct radix_tree_root *root, 968 unsigned long index, unsigned int tag) 969 { 970 struct radix_tree_node *node, *parent; 971 unsigned long maxindex; 972 973 radix_tree_load_root(root, &node, &maxindex); 974 BUG_ON(index > maxindex); 975 976 while (radix_tree_is_internal_node(node)) { 977 unsigned offset; 978 979 parent = entry_to_node(node); 980 offset = radix_tree_descend(parent, &node, index); 981 BUG_ON(!node); 982 983 if (!tag_get(parent, tag, offset)) 984 tag_set(parent, tag, offset); 985 } 986 987 /* set the root's tag bit */ 988 if (!root_tag_get(root, tag)) 989 root_tag_set(root, tag); 990 991 return node; 992 } 993 EXPORT_SYMBOL(radix_tree_tag_set); 994 995 static void node_tag_clear(struct radix_tree_root *root, 996 struct radix_tree_node *node, 997 unsigned int tag, unsigned int offset) 998 { 999 while (node) { 1000 if (!tag_get(node, tag, offset)) 1001 return; 1002 tag_clear(node, tag, offset); 1003 if (any_tag_set(node, tag)) 1004 return; 1005 1006 offset = node->offset; 1007 node = node->parent; 1008 } 1009 1010 /* clear the root's tag bit */ 1011 if (root_tag_get(root, tag)) 1012 root_tag_clear(root, tag); 1013 } 1014 1015 /** 1016 * radix_tree_tag_clear - clear a tag on a radix tree node 1017 * @root: radix tree root 1018 * @index: index key 1019 * @tag: tag index 1020 * 1021 * Clear the search tag (which must be < RADIX_TREE_MAX_TAGS) 1022 * corresponding to @index in the radix tree. If this causes 1023 * the leaf node to have no tags set then clear the tag in the 1024 * next-to-leaf node, etc. 1025 * 1026 * Returns the address of the tagged item on success, else NULL. ie: 1027 * has the same return value and semantics as radix_tree_lookup(). 1028 */ 1029 void *radix_tree_tag_clear(struct radix_tree_root *root, 1030 unsigned long index, unsigned int tag) 1031 { 1032 struct radix_tree_node *node, *parent; 1033 unsigned long maxindex; 1034 int offset = 0; 1035 1036 radix_tree_load_root(root, &node, &maxindex); 1037 if (index > maxindex) 1038 return NULL; 1039 1040 parent = NULL; 1041 1042 while (radix_tree_is_internal_node(node)) { 1043 parent = entry_to_node(node); 1044 offset = radix_tree_descend(parent, &node, index); 1045 } 1046 1047 if (node) 1048 node_tag_clear(root, parent, tag, offset); 1049 1050 return node; 1051 } 1052 EXPORT_SYMBOL(radix_tree_tag_clear); 1053 1054 /** 1055 * radix_tree_iter_tag_clear - clear a tag on the current iterator entry 1056 * @root: radix tree root 1057 * @iter: iterator state 1058 * @tag: tag to clear 1059 */ 1060 void radix_tree_iter_tag_clear(struct radix_tree_root *root, 1061 const struct radix_tree_iter *iter, unsigned int tag) 1062 { 1063 node_tag_clear(root, iter->node, tag, iter_offset(iter)); 1064 } 1065 1066 /** 1067 * radix_tree_tag_get - get a tag on a radix tree node 1068 * @root: radix tree root 1069 * @index: index key 1070 * @tag: tag index (< RADIX_TREE_MAX_TAGS) 1071 * 1072 * Return values: 1073 * 1074 * 0: tag not present or not set 1075 * 1: tag set 1076 * 1077 * Note that the return value of this function may not be relied on, even if 1078 * the RCU lock is held, unless tag modification and node deletion are excluded 1079 * from concurrency. 1080 */ 1081 int radix_tree_tag_get(const struct radix_tree_root *root, 1082 unsigned long index, unsigned int tag) 1083 { 1084 struct radix_tree_node *node, *parent; 1085 unsigned long maxindex; 1086 1087 if (!root_tag_get(root, tag)) 1088 return 0; 1089 1090 radix_tree_load_root(root, &node, &maxindex); 1091 if (index > maxindex) 1092 return 0; 1093 1094 while (radix_tree_is_internal_node(node)) { 1095 unsigned offset; 1096 1097 parent = entry_to_node(node); 1098 offset = radix_tree_descend(parent, &node, index); 1099 1100 if (!tag_get(parent, tag, offset)) 1101 return 0; 1102 if (node == RADIX_TREE_RETRY) 1103 break; 1104 } 1105 1106 return 1; 1107 } 1108 EXPORT_SYMBOL(radix_tree_tag_get); 1109 1110 /* Construct iter->tags bit-mask from node->tags[tag] array */ 1111 static void set_iter_tags(struct radix_tree_iter *iter, 1112 struct radix_tree_node *node, unsigned offset, 1113 unsigned tag) 1114 { 1115 unsigned tag_long = offset / BITS_PER_LONG; 1116 unsigned tag_bit = offset % BITS_PER_LONG; 1117 1118 if (!node) { 1119 iter->tags = 1; 1120 return; 1121 } 1122 1123 iter->tags = node->tags[tag][tag_long] >> tag_bit; 1124 1125 /* This never happens if RADIX_TREE_TAG_LONGS == 1 */ 1126 if (tag_long < RADIX_TREE_TAG_LONGS - 1) { 1127 /* Pick tags from next element */ 1128 if (tag_bit) 1129 iter->tags |= node->tags[tag][tag_long + 1] << 1130 (BITS_PER_LONG - tag_bit); 1131 /* Clip chunk size, here only BITS_PER_LONG tags */ 1132 iter->next_index = __radix_tree_iter_add(iter, BITS_PER_LONG); 1133 } 1134 } 1135 1136 void __rcu **radix_tree_iter_resume(void __rcu **slot, 1137 struct radix_tree_iter *iter) 1138 { 1139 slot++; 1140 iter->index = __radix_tree_iter_add(iter, 1); 1141 iter->next_index = iter->index; 1142 iter->tags = 0; 1143 return NULL; 1144 } 1145 EXPORT_SYMBOL(radix_tree_iter_resume); 1146 1147 /** 1148 * radix_tree_next_chunk - find next chunk of slots for iteration 1149 * 1150 * @root: radix tree root 1151 * @iter: iterator state 1152 * @flags: RADIX_TREE_ITER_* flags and tag index 1153 * Returns: pointer to chunk first slot, or NULL if iteration is over 1154 */ 1155 void __rcu **radix_tree_next_chunk(const struct radix_tree_root *root, 1156 struct radix_tree_iter *iter, unsigned flags) 1157 { 1158 unsigned tag = flags & RADIX_TREE_ITER_TAG_MASK; 1159 struct radix_tree_node *node, *child; 1160 unsigned long index, offset, maxindex; 1161 1162 if ((flags & RADIX_TREE_ITER_TAGGED) && !root_tag_get(root, tag)) 1163 return NULL; 1164 1165 /* 1166 * Catch next_index overflow after ~0UL. iter->index never overflows 1167 * during iterating; it can be zero only at the beginning. 1168 * And we cannot overflow iter->next_index in a single step, 1169 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. 1170 * 1171 * This condition also used by radix_tree_next_slot() to stop 1172 * contiguous iterating, and forbid switching to the next chunk. 1173 */ 1174 index = iter->next_index; 1175 if (!index && iter->index) 1176 return NULL; 1177 1178 restart: 1179 radix_tree_load_root(root, &child, &maxindex); 1180 if (index > maxindex) 1181 return NULL; 1182 if (!child) 1183 return NULL; 1184 1185 if (!radix_tree_is_internal_node(child)) { 1186 /* Single-slot tree */ 1187 iter->index = index; 1188 iter->next_index = maxindex + 1; 1189 iter->tags = 1; 1190 iter->node = NULL; 1191 return (void __rcu **)&root->xa_head; 1192 } 1193 1194 do { 1195 node = entry_to_node(child); 1196 offset = radix_tree_descend(node, &child, index); 1197 1198 if ((flags & RADIX_TREE_ITER_TAGGED) ? 1199 !tag_get(node, tag, offset) : !child) { 1200 /* Hole detected */ 1201 if (flags & RADIX_TREE_ITER_CONTIG) 1202 return NULL; 1203 1204 if (flags & RADIX_TREE_ITER_TAGGED) 1205 offset = radix_tree_find_next_bit(node, tag, 1206 offset + 1); 1207 else 1208 while (++offset < RADIX_TREE_MAP_SIZE) { 1209 void *slot = rcu_dereference_raw( 1210 node->slots[offset]); 1211 if (slot) 1212 break; 1213 } 1214 index &= ~node_maxindex(node); 1215 index += offset << node->shift; 1216 /* Overflow after ~0UL */ 1217 if (!index) 1218 return NULL; 1219 if (offset == RADIX_TREE_MAP_SIZE) 1220 goto restart; 1221 child = rcu_dereference_raw(node->slots[offset]); 1222 } 1223 1224 if (!child) 1225 goto restart; 1226 if (child == RADIX_TREE_RETRY) 1227 break; 1228 } while (node->shift && radix_tree_is_internal_node(child)); 1229 1230 /* Update the iterator state */ 1231 iter->index = (index &~ node_maxindex(node)) | offset; 1232 iter->next_index = (index | node_maxindex(node)) + 1; 1233 iter->node = node; 1234 1235 if (flags & RADIX_TREE_ITER_TAGGED) 1236 set_iter_tags(iter, node, offset, tag); 1237 1238 return node->slots + offset; 1239 } 1240 EXPORT_SYMBOL(radix_tree_next_chunk); 1241 1242 /** 1243 * radix_tree_gang_lookup - perform multiple lookup on a radix tree 1244 * @root: radix tree root 1245 * @results: where the results of the lookup are placed 1246 * @first_index: start the lookup from this key 1247 * @max_items: place up to this many items at *results 1248 * 1249 * Performs an index-ascending scan of the tree for present items. Places 1250 * them at *@results and returns the number of items which were placed at 1251 * *@results. 1252 * 1253 * The implementation is naive. 1254 * 1255 * Like radix_tree_lookup, radix_tree_gang_lookup may be called under 1256 * rcu_read_lock. In this case, rather than the returned results being 1257 * an atomic snapshot of the tree at a single point in time, the 1258 * semantics of an RCU protected gang lookup are as though multiple 1259 * radix_tree_lookups have been issued in individual locks, and results 1260 * stored in 'results'. 1261 */ 1262 unsigned int 1263 radix_tree_gang_lookup(const struct radix_tree_root *root, void **results, 1264 unsigned long first_index, unsigned int max_items) 1265 { 1266 struct radix_tree_iter iter; 1267 void __rcu **slot; 1268 unsigned int ret = 0; 1269 1270 if (unlikely(!max_items)) 1271 return 0; 1272 1273 radix_tree_for_each_slot(slot, root, &iter, first_index) { 1274 results[ret] = rcu_dereference_raw(*slot); 1275 if (!results[ret]) 1276 continue; 1277 if (radix_tree_is_internal_node(results[ret])) { 1278 slot = radix_tree_iter_retry(&iter); 1279 continue; 1280 } 1281 if (++ret == max_items) 1282 break; 1283 } 1284 1285 return ret; 1286 } 1287 EXPORT_SYMBOL(radix_tree_gang_lookup); 1288 1289 /** 1290 * radix_tree_gang_lookup_tag - perform multiple lookup on a radix tree 1291 * based on a tag 1292 * @root: radix tree root 1293 * @results: where the results of the lookup are placed 1294 * @first_index: start the lookup from this key 1295 * @max_items: place up to this many items at *results 1296 * @tag: the tag index (< RADIX_TREE_MAX_TAGS) 1297 * 1298 * Performs an index-ascending scan of the tree for present items which 1299 * have the tag indexed by @tag set. Places the items at *@results and 1300 * returns the number of items which were placed at *@results. 1301 */ 1302 unsigned int 1303 radix_tree_gang_lookup_tag(const struct radix_tree_root *root, void **results, 1304 unsigned long first_index, unsigned int max_items, 1305 unsigned int tag) 1306 { 1307 struct radix_tree_iter iter; 1308 void __rcu **slot; 1309 unsigned int ret = 0; 1310 1311 if (unlikely(!max_items)) 1312 return 0; 1313 1314 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { 1315 results[ret] = rcu_dereference_raw(*slot); 1316 if (!results[ret]) 1317 continue; 1318 if (radix_tree_is_internal_node(results[ret])) { 1319 slot = radix_tree_iter_retry(&iter); 1320 continue; 1321 } 1322 if (++ret == max_items) 1323 break; 1324 } 1325 1326 return ret; 1327 } 1328 EXPORT_SYMBOL(radix_tree_gang_lookup_tag); 1329 1330 /** 1331 * radix_tree_gang_lookup_tag_slot - perform multiple slot lookup on a 1332 * radix tree based on a tag 1333 * @root: radix tree root 1334 * @results: where the results of the lookup are placed 1335 * @first_index: start the lookup from this key 1336 * @max_items: place up to this many items at *results 1337 * @tag: the tag index (< RADIX_TREE_MAX_TAGS) 1338 * 1339 * Performs an index-ascending scan of the tree for present items which 1340 * have the tag indexed by @tag set. Places the slots at *@results and 1341 * returns the number of slots which were placed at *@results. 1342 */ 1343 unsigned int 1344 radix_tree_gang_lookup_tag_slot(const struct radix_tree_root *root, 1345 void __rcu ***results, unsigned long first_index, 1346 unsigned int max_items, unsigned int tag) 1347 { 1348 struct radix_tree_iter iter; 1349 void __rcu **slot; 1350 unsigned int ret = 0; 1351 1352 if (unlikely(!max_items)) 1353 return 0; 1354 1355 radix_tree_for_each_tagged(slot, root, &iter, first_index, tag) { 1356 results[ret] = slot; 1357 if (++ret == max_items) 1358 break; 1359 } 1360 1361 return ret; 1362 } 1363 EXPORT_SYMBOL(radix_tree_gang_lookup_tag_slot); 1364 1365 static bool __radix_tree_delete(struct radix_tree_root *root, 1366 struct radix_tree_node *node, void __rcu **slot) 1367 { 1368 void *old = rcu_dereference_raw(*slot); 1369 int values = xa_is_value(old) ? -1 : 0; 1370 unsigned offset = get_slot_offset(node, slot); 1371 int tag; 1372 1373 if (is_idr(root)) 1374 node_tag_set(root, node, IDR_FREE, offset); 1375 else 1376 for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) 1377 node_tag_clear(root, node, tag, offset); 1378 1379 replace_slot(slot, NULL, node, -1, values); 1380 return node && delete_node(root, node); 1381 } 1382 1383 /** 1384 * radix_tree_iter_delete - delete the entry at this iterator position 1385 * @root: radix tree root 1386 * @iter: iterator state 1387 * @slot: pointer to slot 1388 * 1389 * Delete the entry at the position currently pointed to by the iterator. 1390 * This may result in the current node being freed; if it is, the iterator 1391 * is advanced so that it will not reference the freed memory. This 1392 * function may be called without any locking if there are no other threads 1393 * which can access this tree. 1394 */ 1395 void radix_tree_iter_delete(struct radix_tree_root *root, 1396 struct radix_tree_iter *iter, void __rcu **slot) 1397 { 1398 if (__radix_tree_delete(root, iter->node, slot)) 1399 iter->index = iter->next_index; 1400 } 1401 EXPORT_SYMBOL(radix_tree_iter_delete); 1402 1403 /** 1404 * radix_tree_delete_item - delete an item from a radix tree 1405 * @root: radix tree root 1406 * @index: index key 1407 * @item: expected item 1408 * 1409 * Remove @item at @index from the radix tree rooted at @root. 1410 * 1411 * Return: the deleted entry, or %NULL if it was not present 1412 * or the entry at the given @index was not @item. 1413 */ 1414 void *radix_tree_delete_item(struct radix_tree_root *root, 1415 unsigned long index, void *item) 1416 { 1417 struct radix_tree_node *node = NULL; 1418 void __rcu **slot = NULL; 1419 void *entry; 1420 1421 entry = __radix_tree_lookup(root, index, &node, &slot); 1422 if (!slot) 1423 return NULL; 1424 if (!entry && (!is_idr(root) || node_tag_get(root, node, IDR_FREE, 1425 get_slot_offset(node, slot)))) 1426 return NULL; 1427 1428 if (item && entry != item) 1429 return NULL; 1430 1431 __radix_tree_delete(root, node, slot); 1432 1433 return entry; 1434 } 1435 EXPORT_SYMBOL(radix_tree_delete_item); 1436 1437 /** 1438 * radix_tree_delete - delete an entry from a radix tree 1439 * @root: radix tree root 1440 * @index: index key 1441 * 1442 * Remove the entry at @index from the radix tree rooted at @root. 1443 * 1444 * Return: The deleted entry, or %NULL if it was not present. 1445 */ 1446 void *radix_tree_delete(struct radix_tree_root *root, unsigned long index) 1447 { 1448 return radix_tree_delete_item(root, index, NULL); 1449 } 1450 EXPORT_SYMBOL(radix_tree_delete); 1451 1452 /** 1453 * radix_tree_tagged - test whether any items in the tree are tagged 1454 * @root: radix tree root 1455 * @tag: tag to test 1456 */ 1457 int radix_tree_tagged(const struct radix_tree_root *root, unsigned int tag) 1458 { 1459 return root_tag_get(root, tag); 1460 } 1461 EXPORT_SYMBOL(radix_tree_tagged); 1462 1463 /** 1464 * idr_preload - preload for idr_alloc() 1465 * @gfp_mask: allocation mask to use for preloading 1466 * 1467 * Preallocate memory to use for the next call to idr_alloc(). This function 1468 * returns with preemption disabled. It will be enabled by idr_preload_end(). 1469 */ 1470 void idr_preload(gfp_t gfp_mask) 1471 { 1472 if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE)) 1473 local_lock(&radix_tree_preloads.lock); 1474 } 1475 EXPORT_SYMBOL(idr_preload); 1476 1477 void __rcu **idr_get_free(struct radix_tree_root *root, 1478 struct radix_tree_iter *iter, gfp_t gfp, 1479 unsigned long max) 1480 { 1481 struct radix_tree_node *node = NULL, *child; 1482 void __rcu **slot = (void __rcu **)&root->xa_head; 1483 unsigned long maxindex, start = iter->next_index; 1484 unsigned int shift, offset = 0; 1485 1486 grow: 1487 shift = radix_tree_load_root(root, &child, &maxindex); 1488 if (!radix_tree_tagged(root, IDR_FREE)) 1489 start = max(start, maxindex + 1); 1490 if (start > max) 1491 return ERR_PTR(-ENOSPC); 1492 1493 if (start > maxindex) { 1494 int error = radix_tree_extend(root, gfp, start, shift); 1495 if (error < 0) 1496 return ERR_PTR(error); 1497 shift = error; 1498 child = rcu_dereference_raw(root->xa_head); 1499 } 1500 if (start == 0 && shift == 0) 1501 shift = RADIX_TREE_MAP_SHIFT; 1502 1503 while (shift) { 1504 shift -= RADIX_TREE_MAP_SHIFT; 1505 if (child == NULL) { 1506 /* Have to add a child node. */ 1507 child = radix_tree_node_alloc(gfp, node, root, shift, 1508 offset, 0, 0); 1509 if (!child) 1510 return ERR_PTR(-ENOMEM); 1511 all_tag_set(child, IDR_FREE); 1512 rcu_assign_pointer(*slot, node_to_entry(child)); 1513 if (node) 1514 node->count++; 1515 } else if (!radix_tree_is_internal_node(child)) 1516 break; 1517 1518 node = entry_to_node(child); 1519 offset = radix_tree_descend(node, &child, start); 1520 if (!tag_get(node, IDR_FREE, offset)) { 1521 offset = radix_tree_find_next_bit(node, IDR_FREE, 1522 offset + 1); 1523 start = next_index(start, node, offset); 1524 if (start > max || start == 0) 1525 return ERR_PTR(-ENOSPC); 1526 while (offset == RADIX_TREE_MAP_SIZE) { 1527 offset = node->offset + 1; 1528 node = node->parent; 1529 if (!node) 1530 goto grow; 1531 shift = node->shift; 1532 } 1533 child = rcu_dereference_raw(node->slots[offset]); 1534 } 1535 slot = &node->slots[offset]; 1536 } 1537 1538 iter->index = start; 1539 if (node) 1540 iter->next_index = 1 + min(max, (start | node_maxindex(node))); 1541 else 1542 iter->next_index = 1; 1543 iter->node = node; 1544 set_iter_tags(iter, node, offset, IDR_FREE); 1545 1546 return slot; 1547 } 1548 1549 /** 1550 * idr_destroy - release all internal memory from an IDR 1551 * @idr: idr handle 1552 * 1553 * After this function is called, the IDR is empty, and may be reused or 1554 * the data structure containing it may be freed. 1555 * 1556 * A typical clean-up sequence for objects stored in an idr tree will use 1557 * idr_for_each() to free all objects, if necessary, then idr_destroy() to 1558 * free the memory used to keep track of those objects. 1559 */ 1560 void idr_destroy(struct idr *idr) 1561 { 1562 struct radix_tree_node *node = rcu_dereference_raw(idr->idr_rt.xa_head); 1563 if (radix_tree_is_internal_node(node)) 1564 radix_tree_free_nodes(node); 1565 idr->idr_rt.xa_head = NULL; 1566 root_tag_set(&idr->idr_rt, IDR_FREE); 1567 } 1568 EXPORT_SYMBOL(idr_destroy); 1569 1570 static void 1571 radix_tree_node_ctor(void *arg) 1572 { 1573 struct radix_tree_node *node = arg; 1574 1575 memset(node, 0, sizeof(*node)); 1576 INIT_LIST_HEAD(&node->private_list); 1577 } 1578 1579 static int radix_tree_cpu_dead(unsigned int cpu) 1580 { 1581 struct radix_tree_preload *rtp; 1582 struct radix_tree_node *node; 1583 1584 /* Free per-cpu pool of preloaded nodes */ 1585 rtp = &per_cpu(radix_tree_preloads, cpu); 1586 while (rtp->nr) { 1587 node = rtp->nodes; 1588 rtp->nodes = node->parent; 1589 kmem_cache_free(radix_tree_node_cachep, node); 1590 rtp->nr--; 1591 } 1592 return 0; 1593 } 1594 1595 void __init radix_tree_init(void) 1596 { 1597 int ret; 1598 1599 BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); 1600 BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); 1601 BUILD_BUG_ON(XA_CHUNK_SIZE > 255); 1602 radix_tree_node_cachep = kmem_cache_create("radix_tree_node", 1603 sizeof(struct radix_tree_node), 0, 1604 SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, 1605 radix_tree_node_ctor); 1606 ret = cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", 1607 NULL, radix_tree_cpu_dead); 1608 WARN_ON(ret < 0); 1609 } 1610