1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * drivers/base/devres.c - device resource management 4 * 5 * Copyright (c) 2006 SUSE Linux Products GmbH 6 * Copyright (c) 2006 Tejun Heo <teheo@suse.de> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/percpu.h> 13 14 #include <asm/sections.h> 15 16 #include "base.h" 17 18 struct devres_node { 19 struct list_head entry; 20 dr_release_t release; 21 #ifdef CONFIG_DEBUG_DEVRES 22 const char *name; 23 size_t size; 24 #endif 25 }; 26 27 struct devres { 28 struct devres_node node; 29 /* 30 * Some archs want to perform DMA into kmalloc caches 31 * and need a guaranteed alignment larger than 32 * the alignment of a 64-bit integer. 33 * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same 34 * buffer alignment as if it was allocated by plain kmalloc(). 35 */ 36 u8 __aligned(ARCH_KMALLOC_MINALIGN) data[]; 37 }; 38 39 struct devres_group { 40 struct devres_node node[2]; 41 void *id; 42 int color; 43 /* -- 8 pointers */ 44 }; 45 46 #ifdef CONFIG_DEBUG_DEVRES 47 static int log_devres = 0; 48 module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR); 49 50 static void set_node_dbginfo(struct devres_node *node, const char *name, 51 size_t size) 52 { 53 node->name = name; 54 node->size = size; 55 } 56 57 static void devres_log(struct device *dev, struct devres_node *node, 58 const char *op) 59 { 60 if (unlikely(log_devres)) 61 dev_err(dev, "DEVRES %3s %p %s (%zu bytes)\n", 62 op, node, node->name, node->size); 63 } 64 #else /* CONFIG_DEBUG_DEVRES */ 65 #define set_node_dbginfo(node, n, s) do {} while (0) 66 #define devres_log(dev, node, op) do {} while (0) 67 #endif /* CONFIG_DEBUG_DEVRES */ 68 69 /* 70 * Release functions for devres group. These callbacks are used only 71 * for identification. 72 */ 73 static void group_open_release(struct device *dev, void *res) 74 { 75 /* noop */ 76 } 77 78 static void group_close_release(struct device *dev, void *res) 79 { 80 /* noop */ 81 } 82 83 static struct devres_group * node_to_group(struct devres_node *node) 84 { 85 if (node->release == &group_open_release) 86 return container_of(node, struct devres_group, node[0]); 87 if (node->release == &group_close_release) 88 return container_of(node, struct devres_group, node[1]); 89 return NULL; 90 } 91 92 static bool check_dr_size(size_t size, size_t *tot_size) 93 { 94 /* We must catch any near-SIZE_MAX cases that could overflow. */ 95 if (unlikely(check_add_overflow(sizeof(struct devres), 96 size, tot_size))) 97 return false; 98 99 return true; 100 } 101 102 static __always_inline struct devres * alloc_dr(dr_release_t release, 103 size_t size, gfp_t gfp, int nid) 104 { 105 size_t tot_size; 106 struct devres *dr; 107 108 if (!check_dr_size(size, &tot_size)) 109 return NULL; 110 111 dr = kmalloc_node_track_caller(tot_size, gfp, nid); 112 if (unlikely(!dr)) 113 return NULL; 114 115 memset(dr, 0, offsetof(struct devres, data)); 116 117 INIT_LIST_HEAD(&dr->node.entry); 118 dr->node.release = release; 119 return dr; 120 } 121 122 static void add_dr(struct device *dev, struct devres_node *node) 123 { 124 devres_log(dev, node, "ADD"); 125 BUG_ON(!list_empty(&node->entry)); 126 list_add_tail(&node->entry, &dev->devres_head); 127 } 128 129 static void replace_dr(struct device *dev, 130 struct devres_node *old, struct devres_node *new) 131 { 132 devres_log(dev, old, "REPLACE"); 133 BUG_ON(!list_empty(&new->entry)); 134 list_replace(&old->entry, &new->entry); 135 } 136 137 #ifdef CONFIG_DEBUG_DEVRES 138 void * __devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid, 139 const char *name) 140 { 141 struct devres *dr; 142 143 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 144 if (unlikely(!dr)) 145 return NULL; 146 set_node_dbginfo(&dr->node, name, size); 147 return dr->data; 148 } 149 EXPORT_SYMBOL_GPL(__devres_alloc_node); 150 #else 151 /** 152 * devres_alloc_node - Allocate device resource data 153 * @release: Release function devres will be associated with 154 * @size: Allocation size 155 * @gfp: Allocation flags 156 * @nid: NUMA node 157 * 158 * Allocate devres of @size bytes. The allocated area is zeroed, then 159 * associated with @release. The returned pointer can be passed to 160 * other devres_*() functions. 161 * 162 * RETURNS: 163 * Pointer to allocated devres on success, NULL on failure. 164 */ 165 void * devres_alloc_node(dr_release_t release, size_t size, gfp_t gfp, int nid) 166 { 167 struct devres *dr; 168 169 dr = alloc_dr(release, size, gfp | __GFP_ZERO, nid); 170 if (unlikely(!dr)) 171 return NULL; 172 return dr->data; 173 } 174 EXPORT_SYMBOL_GPL(devres_alloc_node); 175 #endif 176 177 /** 178 * devres_for_each_res - Resource iterator 179 * @dev: Device to iterate resource from 180 * @release: Look for resources associated with this release function 181 * @match: Match function (optional) 182 * @match_data: Data for the match function 183 * @fn: Function to be called for each matched resource. 184 * @data: Data for @fn, the 3rd parameter of @fn 185 * 186 * Call @fn for each devres of @dev which is associated with @release 187 * and for which @match returns 1. 188 * 189 * RETURNS: 190 * void 191 */ 192 void devres_for_each_res(struct device *dev, dr_release_t release, 193 dr_match_t match, void *match_data, 194 void (*fn)(struct device *, void *, void *), 195 void *data) 196 { 197 struct devres_node *node; 198 struct devres_node *tmp; 199 unsigned long flags; 200 201 if (!fn) 202 return; 203 204 spin_lock_irqsave(&dev->devres_lock, flags); 205 list_for_each_entry_safe_reverse(node, tmp, 206 &dev->devres_head, entry) { 207 struct devres *dr = container_of(node, struct devres, node); 208 209 if (node->release != release) 210 continue; 211 if (match && !match(dev, dr->data, match_data)) 212 continue; 213 fn(dev, dr->data, data); 214 } 215 spin_unlock_irqrestore(&dev->devres_lock, flags); 216 } 217 EXPORT_SYMBOL_GPL(devres_for_each_res); 218 219 /** 220 * devres_free - Free device resource data 221 * @res: Pointer to devres data to free 222 * 223 * Free devres created with devres_alloc(). 224 */ 225 void devres_free(void *res) 226 { 227 if (res) { 228 struct devres *dr = container_of(res, struct devres, data); 229 230 BUG_ON(!list_empty(&dr->node.entry)); 231 kfree(dr); 232 } 233 } 234 EXPORT_SYMBOL_GPL(devres_free); 235 236 /** 237 * devres_add - Register device resource 238 * @dev: Device to add resource to 239 * @res: Resource to register 240 * 241 * Register devres @res to @dev. @res should have been allocated 242 * using devres_alloc(). On driver detach, the associated release 243 * function will be invoked and devres will be freed automatically. 244 */ 245 void devres_add(struct device *dev, void *res) 246 { 247 struct devres *dr = container_of(res, struct devres, data); 248 unsigned long flags; 249 250 spin_lock_irqsave(&dev->devres_lock, flags); 251 add_dr(dev, &dr->node); 252 spin_unlock_irqrestore(&dev->devres_lock, flags); 253 } 254 EXPORT_SYMBOL_GPL(devres_add); 255 256 static struct devres *find_dr(struct device *dev, dr_release_t release, 257 dr_match_t match, void *match_data) 258 { 259 struct devres_node *node; 260 261 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 262 struct devres *dr = container_of(node, struct devres, node); 263 264 if (node->release != release) 265 continue; 266 if (match && !match(dev, dr->data, match_data)) 267 continue; 268 return dr; 269 } 270 271 return NULL; 272 } 273 274 /** 275 * devres_find - Find device resource 276 * @dev: Device to lookup resource from 277 * @release: Look for resources associated with this release function 278 * @match: Match function (optional) 279 * @match_data: Data for the match function 280 * 281 * Find the latest devres of @dev which is associated with @release 282 * and for which @match returns 1. If @match is NULL, it's considered 283 * to match all. 284 * 285 * RETURNS: 286 * Pointer to found devres, NULL if not found. 287 */ 288 void * devres_find(struct device *dev, dr_release_t release, 289 dr_match_t match, void *match_data) 290 { 291 struct devres *dr; 292 unsigned long flags; 293 294 spin_lock_irqsave(&dev->devres_lock, flags); 295 dr = find_dr(dev, release, match, match_data); 296 spin_unlock_irqrestore(&dev->devres_lock, flags); 297 298 if (dr) 299 return dr->data; 300 return NULL; 301 } 302 EXPORT_SYMBOL_GPL(devres_find); 303 304 /** 305 * devres_get - Find devres, if non-existent, add one atomically 306 * @dev: Device to lookup or add devres for 307 * @new_res: Pointer to new initialized devres to add if not found 308 * @match: Match function (optional) 309 * @match_data: Data for the match function 310 * 311 * Find the latest devres of @dev which has the same release function 312 * as @new_res and for which @match return 1. If found, @new_res is 313 * freed; otherwise, @new_res is added atomically. 314 * 315 * RETURNS: 316 * Pointer to found or added devres. 317 */ 318 void * devres_get(struct device *dev, void *new_res, 319 dr_match_t match, void *match_data) 320 { 321 struct devres *new_dr = container_of(new_res, struct devres, data); 322 struct devres *dr; 323 unsigned long flags; 324 325 spin_lock_irqsave(&dev->devres_lock, flags); 326 dr = find_dr(dev, new_dr->node.release, match, match_data); 327 if (!dr) { 328 add_dr(dev, &new_dr->node); 329 dr = new_dr; 330 new_res = NULL; 331 } 332 spin_unlock_irqrestore(&dev->devres_lock, flags); 333 devres_free(new_res); 334 335 return dr->data; 336 } 337 EXPORT_SYMBOL_GPL(devres_get); 338 339 /** 340 * devres_remove - Find a device resource and remove it 341 * @dev: Device to find resource from 342 * @release: Look for resources associated with this release function 343 * @match: Match function (optional) 344 * @match_data: Data for the match function 345 * 346 * Find the latest devres of @dev associated with @release and for 347 * which @match returns 1. If @match is NULL, it's considered to 348 * match all. If found, the resource is removed atomically and 349 * returned. 350 * 351 * RETURNS: 352 * Pointer to removed devres on success, NULL if not found. 353 */ 354 void * devres_remove(struct device *dev, dr_release_t release, 355 dr_match_t match, void *match_data) 356 { 357 struct devres *dr; 358 unsigned long flags; 359 360 spin_lock_irqsave(&dev->devres_lock, flags); 361 dr = find_dr(dev, release, match, match_data); 362 if (dr) { 363 list_del_init(&dr->node.entry); 364 devres_log(dev, &dr->node, "REM"); 365 } 366 spin_unlock_irqrestore(&dev->devres_lock, flags); 367 368 if (dr) 369 return dr->data; 370 return NULL; 371 } 372 EXPORT_SYMBOL_GPL(devres_remove); 373 374 /** 375 * devres_destroy - Find a device resource and destroy it 376 * @dev: Device to find resource from 377 * @release: Look for resources associated with this release function 378 * @match: Match function (optional) 379 * @match_data: Data for the match function 380 * 381 * Find the latest devres of @dev associated with @release and for 382 * which @match returns 1. If @match is NULL, it's considered to 383 * match all. If found, the resource is removed atomically and freed. 384 * 385 * Note that the release function for the resource will not be called, 386 * only the devres-allocated data will be freed. The caller becomes 387 * responsible for freeing any other data. 388 * 389 * RETURNS: 390 * 0 if devres is found and freed, -ENOENT if not found. 391 */ 392 int devres_destroy(struct device *dev, dr_release_t release, 393 dr_match_t match, void *match_data) 394 { 395 void *res; 396 397 res = devres_remove(dev, release, match, match_data); 398 if (unlikely(!res)) 399 return -ENOENT; 400 401 devres_free(res); 402 return 0; 403 } 404 EXPORT_SYMBOL_GPL(devres_destroy); 405 406 407 /** 408 * devres_release - Find a device resource and destroy it, calling release 409 * @dev: Device to find resource from 410 * @release: Look for resources associated with this release function 411 * @match: Match function (optional) 412 * @match_data: Data for the match function 413 * 414 * Find the latest devres of @dev associated with @release and for 415 * which @match returns 1. If @match is NULL, it's considered to 416 * match all. If found, the resource is removed atomically, the 417 * release function called and the resource freed. 418 * 419 * RETURNS: 420 * 0 if devres is found and freed, -ENOENT if not found. 421 */ 422 int devres_release(struct device *dev, dr_release_t release, 423 dr_match_t match, void *match_data) 424 { 425 void *res; 426 427 res = devres_remove(dev, release, match, match_data); 428 if (unlikely(!res)) 429 return -ENOENT; 430 431 (*release)(dev, res); 432 devres_free(res); 433 return 0; 434 } 435 EXPORT_SYMBOL_GPL(devres_release); 436 437 static int remove_nodes(struct device *dev, 438 struct list_head *first, struct list_head *end, 439 struct list_head *todo) 440 { 441 int cnt = 0, nr_groups = 0; 442 struct list_head *cur; 443 444 /* First pass - move normal devres entries to @todo and clear 445 * devres_group colors. 446 */ 447 cur = first; 448 while (cur != end) { 449 struct devres_node *node; 450 struct devres_group *grp; 451 452 node = list_entry(cur, struct devres_node, entry); 453 cur = cur->next; 454 455 grp = node_to_group(node); 456 if (grp) { 457 /* clear color of group markers in the first pass */ 458 grp->color = 0; 459 nr_groups++; 460 } else { 461 /* regular devres entry */ 462 if (&node->entry == first) 463 first = first->next; 464 list_move_tail(&node->entry, todo); 465 cnt++; 466 } 467 } 468 469 if (!nr_groups) 470 return cnt; 471 472 /* Second pass - Scan groups and color them. A group gets 473 * color value of two iff the group is wholly contained in 474 * [cur, end). That is, for a closed group, both opening and 475 * closing markers should be in the range, while just the 476 * opening marker is enough for an open group. 477 */ 478 cur = first; 479 while (cur != end) { 480 struct devres_node *node; 481 struct devres_group *grp; 482 483 node = list_entry(cur, struct devres_node, entry); 484 cur = cur->next; 485 486 grp = node_to_group(node); 487 BUG_ON(!grp || list_empty(&grp->node[0].entry)); 488 489 grp->color++; 490 if (list_empty(&grp->node[1].entry)) 491 grp->color++; 492 493 BUG_ON(grp->color <= 0 || grp->color > 2); 494 if (grp->color == 2) { 495 /* No need to update cur or end. The removed 496 * nodes are always before both. 497 */ 498 list_move_tail(&grp->node[0].entry, todo); 499 list_del_init(&grp->node[1].entry); 500 } 501 } 502 503 return cnt; 504 } 505 506 static int release_nodes(struct device *dev, struct list_head *first, 507 struct list_head *end, unsigned long flags) 508 __releases(&dev->devres_lock) 509 { 510 LIST_HEAD(todo); 511 int cnt; 512 struct devres *dr, *tmp; 513 514 cnt = remove_nodes(dev, first, end, &todo); 515 516 spin_unlock_irqrestore(&dev->devres_lock, flags); 517 518 /* Release. Note that both devres and devres_group are 519 * handled as devres in the following loop. This is safe. 520 */ 521 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) { 522 devres_log(dev, &dr->node, "REL"); 523 dr->node.release(dev, dr->data); 524 kfree(dr); 525 } 526 527 return cnt; 528 } 529 530 /** 531 * devres_release_all - Release all managed resources 532 * @dev: Device to release resources for 533 * 534 * Release all resources associated with @dev. This function is 535 * called on driver detach. 536 */ 537 int devres_release_all(struct device *dev) 538 { 539 unsigned long flags; 540 541 /* Looks like an uninitialized device structure */ 542 if (WARN_ON(dev->devres_head.next == NULL)) 543 return -ENODEV; 544 spin_lock_irqsave(&dev->devres_lock, flags); 545 return release_nodes(dev, dev->devres_head.next, &dev->devres_head, 546 flags); 547 } 548 549 /** 550 * devres_open_group - Open a new devres group 551 * @dev: Device to open devres group for 552 * @id: Separator ID 553 * @gfp: Allocation flags 554 * 555 * Open a new devres group for @dev with @id. For @id, using a 556 * pointer to an object which won't be used for another group is 557 * recommended. If @id is NULL, address-wise unique ID is created. 558 * 559 * RETURNS: 560 * ID of the new group, NULL on failure. 561 */ 562 void * devres_open_group(struct device *dev, void *id, gfp_t gfp) 563 { 564 struct devres_group *grp; 565 unsigned long flags; 566 567 grp = kmalloc(sizeof(*grp), gfp); 568 if (unlikely(!grp)) 569 return NULL; 570 571 grp->node[0].release = &group_open_release; 572 grp->node[1].release = &group_close_release; 573 INIT_LIST_HEAD(&grp->node[0].entry); 574 INIT_LIST_HEAD(&grp->node[1].entry); 575 set_node_dbginfo(&grp->node[0], "grp<", 0); 576 set_node_dbginfo(&grp->node[1], "grp>", 0); 577 grp->id = grp; 578 if (id) 579 grp->id = id; 580 581 spin_lock_irqsave(&dev->devres_lock, flags); 582 add_dr(dev, &grp->node[0]); 583 spin_unlock_irqrestore(&dev->devres_lock, flags); 584 return grp->id; 585 } 586 EXPORT_SYMBOL_GPL(devres_open_group); 587 588 /* Find devres group with ID @id. If @id is NULL, look for the latest. */ 589 static struct devres_group * find_group(struct device *dev, void *id) 590 { 591 struct devres_node *node; 592 593 list_for_each_entry_reverse(node, &dev->devres_head, entry) { 594 struct devres_group *grp; 595 596 if (node->release != &group_open_release) 597 continue; 598 599 grp = container_of(node, struct devres_group, node[0]); 600 601 if (id) { 602 if (grp->id == id) 603 return grp; 604 } else if (list_empty(&grp->node[1].entry)) 605 return grp; 606 } 607 608 return NULL; 609 } 610 611 /** 612 * devres_close_group - Close a devres group 613 * @dev: Device to close devres group for 614 * @id: ID of target group, can be NULL 615 * 616 * Close the group identified by @id. If @id is NULL, the latest open 617 * group is selected. 618 */ 619 void devres_close_group(struct device *dev, void *id) 620 { 621 struct devres_group *grp; 622 unsigned long flags; 623 624 spin_lock_irqsave(&dev->devres_lock, flags); 625 626 grp = find_group(dev, id); 627 if (grp) 628 add_dr(dev, &grp->node[1]); 629 else 630 WARN_ON(1); 631 632 spin_unlock_irqrestore(&dev->devres_lock, flags); 633 } 634 EXPORT_SYMBOL_GPL(devres_close_group); 635 636 /** 637 * devres_remove_group - Remove a devres group 638 * @dev: Device to remove group for 639 * @id: ID of target group, can be NULL 640 * 641 * Remove the group identified by @id. If @id is NULL, the latest 642 * open group is selected. Note that removing a group doesn't affect 643 * any other resources. 644 */ 645 void devres_remove_group(struct device *dev, void *id) 646 { 647 struct devres_group *grp; 648 unsigned long flags; 649 650 spin_lock_irqsave(&dev->devres_lock, flags); 651 652 grp = find_group(dev, id); 653 if (grp) { 654 list_del_init(&grp->node[0].entry); 655 list_del_init(&grp->node[1].entry); 656 devres_log(dev, &grp->node[0], "REM"); 657 } else 658 WARN_ON(1); 659 660 spin_unlock_irqrestore(&dev->devres_lock, flags); 661 662 kfree(grp); 663 } 664 EXPORT_SYMBOL_GPL(devres_remove_group); 665 666 /** 667 * devres_release_group - Release resources in a devres group 668 * @dev: Device to release group for 669 * @id: ID of target group, can be NULL 670 * 671 * Release all resources in the group identified by @id. If @id is 672 * NULL, the latest open group is selected. The selected group and 673 * groups properly nested inside the selected group are removed. 674 * 675 * RETURNS: 676 * The number of released non-group resources. 677 */ 678 int devres_release_group(struct device *dev, void *id) 679 { 680 struct devres_group *grp; 681 unsigned long flags; 682 int cnt = 0; 683 684 spin_lock_irqsave(&dev->devres_lock, flags); 685 686 grp = find_group(dev, id); 687 if (grp) { 688 struct list_head *first = &grp->node[0].entry; 689 struct list_head *end = &dev->devres_head; 690 691 if (!list_empty(&grp->node[1].entry)) 692 end = grp->node[1].entry.next; 693 694 cnt = release_nodes(dev, first, end, flags); 695 } else { 696 WARN_ON(1); 697 spin_unlock_irqrestore(&dev->devres_lock, flags); 698 } 699 700 return cnt; 701 } 702 EXPORT_SYMBOL_GPL(devres_release_group); 703 704 /* 705 * Custom devres actions allow inserting a simple function call 706 * into the teadown sequence. 707 */ 708 709 struct action_devres { 710 void *data; 711 void (*action)(void *); 712 }; 713 714 static int devm_action_match(struct device *dev, void *res, void *p) 715 { 716 struct action_devres *devres = res; 717 struct action_devres *target = p; 718 719 return devres->action == target->action && 720 devres->data == target->data; 721 } 722 723 static void devm_action_release(struct device *dev, void *res) 724 { 725 struct action_devres *devres = res; 726 727 devres->action(devres->data); 728 } 729 730 /** 731 * devm_add_action() - add a custom action to list of managed resources 732 * @dev: Device that owns the action 733 * @action: Function that should be called 734 * @data: Pointer to data passed to @action implementation 735 * 736 * This adds a custom action to the list of managed resources so that 737 * it gets executed as part of standard resource unwinding. 738 */ 739 int devm_add_action(struct device *dev, void (*action)(void *), void *data) 740 { 741 struct action_devres *devres; 742 743 devres = devres_alloc(devm_action_release, 744 sizeof(struct action_devres), GFP_KERNEL); 745 if (!devres) 746 return -ENOMEM; 747 748 devres->data = data; 749 devres->action = action; 750 751 devres_add(dev, devres); 752 return 0; 753 } 754 EXPORT_SYMBOL_GPL(devm_add_action); 755 756 /** 757 * devm_remove_action() - removes previously added custom action 758 * @dev: Device that owns the action 759 * @action: Function implementing the action 760 * @data: Pointer to data passed to @action implementation 761 * 762 * Removes instance of @action previously added by devm_add_action(). 763 * Both action and data should match one of the existing entries. 764 */ 765 void devm_remove_action(struct device *dev, void (*action)(void *), void *data) 766 { 767 struct action_devres devres = { 768 .data = data, 769 .action = action, 770 }; 771 772 WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match, 773 &devres)); 774 } 775 EXPORT_SYMBOL_GPL(devm_remove_action); 776 777 /** 778 * devm_release_action() - release previously added custom action 779 * @dev: Device that owns the action 780 * @action: Function implementing the action 781 * @data: Pointer to data passed to @action implementation 782 * 783 * Releases and removes instance of @action previously added by 784 * devm_add_action(). Both action and data should match one of the 785 * existing entries. 786 */ 787 void devm_release_action(struct device *dev, void (*action)(void *), void *data) 788 { 789 struct action_devres devres = { 790 .data = data, 791 .action = action, 792 }; 793 794 WARN_ON(devres_release(dev, devm_action_release, devm_action_match, 795 &devres)); 796 797 } 798 EXPORT_SYMBOL_GPL(devm_release_action); 799 800 /* 801 * Managed kmalloc/kfree 802 */ 803 static void devm_kmalloc_release(struct device *dev, void *res) 804 { 805 /* noop */ 806 } 807 808 static int devm_kmalloc_match(struct device *dev, void *res, void *data) 809 { 810 return res == data; 811 } 812 813 /** 814 * devm_kmalloc - Resource-managed kmalloc 815 * @dev: Device to allocate memory for 816 * @size: Allocation size 817 * @gfp: Allocation gfp flags 818 * 819 * Managed kmalloc. Memory allocated with this function is 820 * automatically freed on driver detach. Like all other devres 821 * resources, guaranteed alignment is unsigned long long. 822 * 823 * RETURNS: 824 * Pointer to allocated memory on success, NULL on failure. 825 */ 826 void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp) 827 { 828 struct devres *dr; 829 830 if (unlikely(!size)) 831 return ZERO_SIZE_PTR; 832 833 /* use raw alloc_dr for kmalloc caller tracing */ 834 dr = alloc_dr(devm_kmalloc_release, size, gfp, dev_to_node(dev)); 835 if (unlikely(!dr)) 836 return NULL; 837 838 /* 839 * This is named devm_kzalloc_release for historical reasons 840 * The initial implementation did not support kmalloc, only kzalloc 841 */ 842 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 843 devres_add(dev, dr->data); 844 return dr->data; 845 } 846 EXPORT_SYMBOL_GPL(devm_kmalloc); 847 848 /** 849 * devm_krealloc - Resource-managed krealloc() 850 * @dev: Device to re-allocate memory for 851 * @ptr: Pointer to the memory chunk to re-allocate 852 * @new_size: New allocation size 853 * @gfp: Allocation gfp flags 854 * 855 * Managed krealloc(). Resizes the memory chunk allocated with devm_kmalloc(). 856 * Behaves similarly to regular krealloc(): if @ptr is NULL or ZERO_SIZE_PTR, 857 * it's the equivalent of devm_kmalloc(). If new_size is zero, it frees the 858 * previously allocated memory and returns ZERO_SIZE_PTR. This function doesn't 859 * change the order in which the release callback for the re-alloc'ed devres 860 * will be called (except when falling back to devm_kmalloc() or when freeing 861 * resources when new_size is zero). The contents of the memory are preserved 862 * up to the lesser of new and old sizes. 863 */ 864 void *devm_krealloc(struct device *dev, void *ptr, size_t new_size, gfp_t gfp) 865 { 866 size_t total_new_size, total_old_size; 867 struct devres *old_dr, *new_dr; 868 unsigned long flags; 869 870 if (unlikely(!new_size)) { 871 devm_kfree(dev, ptr); 872 return ZERO_SIZE_PTR; 873 } 874 875 if (unlikely(ZERO_OR_NULL_PTR(ptr))) 876 return devm_kmalloc(dev, new_size, gfp); 877 878 if (WARN_ON(is_kernel_rodata((unsigned long)ptr))) 879 /* 880 * We cannot reliably realloc a const string returned by 881 * devm_kstrdup_const(). 882 */ 883 return NULL; 884 885 if (!check_dr_size(new_size, &total_new_size)) 886 return NULL; 887 888 total_old_size = ksize(container_of(ptr, struct devres, data)); 889 if (total_old_size == 0) { 890 WARN(1, "Pointer doesn't point to dynamically allocated memory."); 891 return NULL; 892 } 893 894 /* 895 * If new size is smaller or equal to the actual number of bytes 896 * allocated previously - just return the same pointer. 897 */ 898 if (total_new_size <= total_old_size) 899 return ptr; 900 901 /* 902 * Otherwise: allocate new, larger chunk. We need to allocate before 903 * taking the lock as most probably the caller uses GFP_KERNEL. 904 */ 905 new_dr = alloc_dr(devm_kmalloc_release, 906 total_new_size, gfp, dev_to_node(dev)); 907 if (!new_dr) 908 return NULL; 909 910 /* 911 * The spinlock protects the linked list against concurrent 912 * modifications but not the resource itself. 913 */ 914 spin_lock_irqsave(&dev->devres_lock, flags); 915 916 old_dr = find_dr(dev, devm_kmalloc_release, devm_kmalloc_match, ptr); 917 if (!old_dr) { 918 spin_unlock_irqrestore(&dev->devres_lock, flags); 919 kfree(new_dr); 920 WARN(1, "Memory chunk not managed or managed by a different device."); 921 return NULL; 922 } 923 924 replace_dr(dev, &old_dr->node, &new_dr->node); 925 926 spin_unlock_irqrestore(&dev->devres_lock, flags); 927 928 /* 929 * We can copy the memory contents after releasing the lock as we're 930 * no longer modyfing the list links. 931 */ 932 memcpy(new_dr->data, old_dr->data, 933 total_old_size - offsetof(struct devres, data)); 934 /* 935 * Same for releasing the old devres - it's now been removed from the 936 * list. This is also the reason why we must not use devm_kfree() - the 937 * links are no longer valid. 938 */ 939 kfree(old_dr); 940 941 return new_dr->data; 942 } 943 EXPORT_SYMBOL_GPL(devm_krealloc); 944 945 /** 946 * devm_kstrdup - Allocate resource managed space and 947 * copy an existing string into that. 948 * @dev: Device to allocate memory for 949 * @s: the string to duplicate 950 * @gfp: the GFP mask used in the devm_kmalloc() call when 951 * allocating memory 952 * RETURNS: 953 * Pointer to allocated string on success, NULL on failure. 954 */ 955 char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) 956 { 957 size_t size; 958 char *buf; 959 960 if (!s) 961 return NULL; 962 963 size = strlen(s) + 1; 964 buf = devm_kmalloc(dev, size, gfp); 965 if (buf) 966 memcpy(buf, s, size); 967 return buf; 968 } 969 EXPORT_SYMBOL_GPL(devm_kstrdup); 970 971 /** 972 * devm_kstrdup_const - resource managed conditional string duplication 973 * @dev: device for which to duplicate the string 974 * @s: the string to duplicate 975 * @gfp: the GFP mask used in the kmalloc() call when allocating memory 976 * 977 * Strings allocated by devm_kstrdup_const will be automatically freed when 978 * the associated device is detached. 979 * 980 * RETURNS: 981 * Source string if it is in .rodata section otherwise it falls back to 982 * devm_kstrdup. 983 */ 984 const char *devm_kstrdup_const(struct device *dev, const char *s, gfp_t gfp) 985 { 986 if (is_kernel_rodata((unsigned long)s)) 987 return s; 988 989 return devm_kstrdup(dev, s, gfp); 990 } 991 EXPORT_SYMBOL_GPL(devm_kstrdup_const); 992 993 /** 994 * devm_kvasprintf - Allocate resource managed space and format a string 995 * into that. 996 * @dev: Device to allocate memory for 997 * @gfp: the GFP mask used in the devm_kmalloc() call when 998 * allocating memory 999 * @fmt: The printf()-style format string 1000 * @ap: Arguments for the format string 1001 * RETURNS: 1002 * Pointer to allocated string on success, NULL on failure. 1003 */ 1004 char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, 1005 va_list ap) 1006 { 1007 unsigned int len; 1008 char *p; 1009 va_list aq; 1010 1011 va_copy(aq, ap); 1012 len = vsnprintf(NULL, 0, fmt, aq); 1013 va_end(aq); 1014 1015 p = devm_kmalloc(dev, len+1, gfp); 1016 if (!p) 1017 return NULL; 1018 1019 vsnprintf(p, len+1, fmt, ap); 1020 1021 return p; 1022 } 1023 EXPORT_SYMBOL(devm_kvasprintf); 1024 1025 /** 1026 * devm_kasprintf - Allocate resource managed space and format a string 1027 * into that. 1028 * @dev: Device to allocate memory for 1029 * @gfp: the GFP mask used in the devm_kmalloc() call when 1030 * allocating memory 1031 * @fmt: The printf()-style format string 1032 * @...: Arguments for the format string 1033 * RETURNS: 1034 * Pointer to allocated string on success, NULL on failure. 1035 */ 1036 char *devm_kasprintf(struct device *dev, gfp_t gfp, const char *fmt, ...) 1037 { 1038 va_list ap; 1039 char *p; 1040 1041 va_start(ap, fmt); 1042 p = devm_kvasprintf(dev, gfp, fmt, ap); 1043 va_end(ap); 1044 1045 return p; 1046 } 1047 EXPORT_SYMBOL_GPL(devm_kasprintf); 1048 1049 /** 1050 * devm_kfree - Resource-managed kfree 1051 * @dev: Device this memory belongs to 1052 * @p: Memory to free 1053 * 1054 * Free memory allocated with devm_kmalloc(). 1055 */ 1056 void devm_kfree(struct device *dev, const void *p) 1057 { 1058 int rc; 1059 1060 /* 1061 * Special cases: pointer to a string in .rodata returned by 1062 * devm_kstrdup_const() or NULL/ZERO ptr. 1063 */ 1064 if (unlikely(is_kernel_rodata((unsigned long)p) || ZERO_OR_NULL_PTR(p))) 1065 return; 1066 1067 rc = devres_destroy(dev, devm_kmalloc_release, 1068 devm_kmalloc_match, (void *)p); 1069 WARN_ON(rc); 1070 } 1071 EXPORT_SYMBOL_GPL(devm_kfree); 1072 1073 /** 1074 * devm_kmemdup - Resource-managed kmemdup 1075 * @dev: Device this memory belongs to 1076 * @src: Memory region to duplicate 1077 * @len: Memory region length 1078 * @gfp: GFP mask to use 1079 * 1080 * Duplicate region of a memory using resource managed kmalloc 1081 */ 1082 void *devm_kmemdup(struct device *dev, const void *src, size_t len, gfp_t gfp) 1083 { 1084 void *p; 1085 1086 p = devm_kmalloc(dev, len, gfp); 1087 if (p) 1088 memcpy(p, src, len); 1089 1090 return p; 1091 } 1092 EXPORT_SYMBOL_GPL(devm_kmemdup); 1093 1094 struct pages_devres { 1095 unsigned long addr; 1096 unsigned int order; 1097 }; 1098 1099 static int devm_pages_match(struct device *dev, void *res, void *p) 1100 { 1101 struct pages_devres *devres = res; 1102 struct pages_devres *target = p; 1103 1104 return devres->addr == target->addr; 1105 } 1106 1107 static void devm_pages_release(struct device *dev, void *res) 1108 { 1109 struct pages_devres *devres = res; 1110 1111 free_pages(devres->addr, devres->order); 1112 } 1113 1114 /** 1115 * devm_get_free_pages - Resource-managed __get_free_pages 1116 * @dev: Device to allocate memory for 1117 * @gfp_mask: Allocation gfp flags 1118 * @order: Allocation size is (1 << order) pages 1119 * 1120 * Managed get_free_pages. Memory allocated with this function is 1121 * automatically freed on driver detach. 1122 * 1123 * RETURNS: 1124 * Address of allocated memory on success, 0 on failure. 1125 */ 1126 1127 unsigned long devm_get_free_pages(struct device *dev, 1128 gfp_t gfp_mask, unsigned int order) 1129 { 1130 struct pages_devres *devres; 1131 unsigned long addr; 1132 1133 addr = __get_free_pages(gfp_mask, order); 1134 1135 if (unlikely(!addr)) 1136 return 0; 1137 1138 devres = devres_alloc(devm_pages_release, 1139 sizeof(struct pages_devres), GFP_KERNEL); 1140 if (unlikely(!devres)) { 1141 free_pages(addr, order); 1142 return 0; 1143 } 1144 1145 devres->addr = addr; 1146 devres->order = order; 1147 1148 devres_add(dev, devres); 1149 return addr; 1150 } 1151 EXPORT_SYMBOL_GPL(devm_get_free_pages); 1152 1153 /** 1154 * devm_free_pages - Resource-managed free_pages 1155 * @dev: Device this memory belongs to 1156 * @addr: Memory to free 1157 * 1158 * Free memory allocated with devm_get_free_pages(). Unlike free_pages, 1159 * there is no need to supply the @order. 1160 */ 1161 void devm_free_pages(struct device *dev, unsigned long addr) 1162 { 1163 struct pages_devres devres = { .addr = addr }; 1164 1165 WARN_ON(devres_release(dev, devm_pages_release, devm_pages_match, 1166 &devres)); 1167 } 1168 EXPORT_SYMBOL_GPL(devm_free_pages); 1169 1170 static void devm_percpu_release(struct device *dev, void *pdata) 1171 { 1172 void __percpu *p; 1173 1174 p = *(void __percpu **)pdata; 1175 free_percpu(p); 1176 } 1177 1178 static int devm_percpu_match(struct device *dev, void *data, void *p) 1179 { 1180 struct devres *devr = container_of(data, struct devres, data); 1181 1182 return *(void **)devr->data == p; 1183 } 1184 1185 /** 1186 * __devm_alloc_percpu - Resource-managed alloc_percpu 1187 * @dev: Device to allocate per-cpu memory for 1188 * @size: Size of per-cpu memory to allocate 1189 * @align: Alignment of per-cpu memory to allocate 1190 * 1191 * Managed alloc_percpu. Per-cpu memory allocated with this function is 1192 * automatically freed on driver detach. 1193 * 1194 * RETURNS: 1195 * Pointer to allocated memory on success, NULL on failure. 1196 */ 1197 void __percpu *__devm_alloc_percpu(struct device *dev, size_t size, 1198 size_t align) 1199 { 1200 void *p; 1201 void __percpu *pcpu; 1202 1203 pcpu = __alloc_percpu(size, align); 1204 if (!pcpu) 1205 return NULL; 1206 1207 p = devres_alloc(devm_percpu_release, sizeof(void *), GFP_KERNEL); 1208 if (!p) { 1209 free_percpu(pcpu); 1210 return NULL; 1211 } 1212 1213 *(void __percpu **)p = pcpu; 1214 1215 devres_add(dev, p); 1216 1217 return pcpu; 1218 } 1219 EXPORT_SYMBOL_GPL(__devm_alloc_percpu); 1220 1221 /** 1222 * devm_free_percpu - Resource-managed free_percpu 1223 * @dev: Device this memory belongs to 1224 * @pdata: Per-cpu memory to free 1225 * 1226 * Free memory allocated with devm_alloc_percpu(). 1227 */ 1228 void devm_free_percpu(struct device *dev, void __percpu *pdata) 1229 { 1230 WARN_ON(devres_destroy(dev, devm_percpu_release, devm_percpu_match, 1231 (__force void *)pdata)); 1232 } 1233 EXPORT_SYMBOL_GPL(devm_free_percpu); 1234